text
stringlengths 7
99.5k
| meta
dict |
|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.atmosphere.websocket;
import org.apache.camel.test.AvailablePortFinder;
import org.apache.camel.test.junit5.CamelTestSupport;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
public class WebsocketCamelRouterWithInitParamTestSupport extends CamelTestSupport {
public static final String CONTEXT = "/mycontext";
public static final String CONTEXT_URL = "http://localhost/mycontext";
protected static final int PORT = AvailablePortFinder.getNextAvailable();
protected boolean startCamelContext = true;
protected Server server;
protected ServletHolder servletHolder;
@Override
@BeforeEach
public void setUp() throws Exception {
server = new Server(PORT);
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
context.setContextPath("/");
server.setHandler(context);
if (startCamelContext) {
super.setUp();
}
servletHolder = new ServletHolder(new CamelWebSocketServlet());
servletHolder.setName("CamelWsServlet");
servletHolder.setInitParameter("events", "true");
context.addServlet(servletHolder, "/*");
server.start();
}
@Override
@AfterEach
public void tearDown() throws Exception {
if (startCamelContext) {
super.tearDown();
}
server.stop();
server.destroy();
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.jsmpp.bean;
/**
* @author uudashr
*
*/
public class CancelSmResp extends Command {
private static final long serialVersionUID = -8535074405458491134L;
public CancelSmResp() {
super();
}
}
|
{
"pile_set_name": "Github"
}
|
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* The "snapshots" collection of methods.
* Typical usage is:
* <code>
* $computeService = new Google_Service_Compute(...);
* $snapshots = $computeService->snapshots;
* </code>
*/
class Google_Service_Compute_Resource_Snapshots extends Google_Service_Resource
{
/**
* Deletes the specified Snapshot resource. Keep in mind that deleting a single
* snapshot might not necessarily delete all the data on that snapshot. If any
* data on the snapshot that is marked for deletion is needed for subsequent
* snapshots, the data will be moved to the next corresponding snapshot.
*
* For more information, see Deleting snaphots. (snapshots.delete)
*
* @param string $project Project ID for this request.
* @param string $snapshot Name of the Snapshot resource to delete.
* @param array $optParams Optional parameters.
*
* @opt_param string requestId An optional request ID to identify requests.
* Specify a unique request ID so that if you must retry your request, the
* server will know to ignore the request if it has already been completed.
*
* For example, consider a situation where you make an initial request and the
* request times out. If you make the request again with the same request ID,
* the server can check if original operation with the same request ID was
* received, and if so, will ignore the second request. This prevents clients
* from accidentally creating duplicate commitments.
*
* The request ID must be a valid UUID with the exception that zero UUID is not
* supported (00000000-0000-0000-0000-000000000000).
* @return Google_Service_Compute_Operation
*/
public function delete($project, $snapshot, $optParams = array())
{
$params = array('project' => $project, 'snapshot' => $snapshot);
$params = array_merge($params, $optParams);
return $this->call('delete', array($params), "Google_Service_Compute_Operation");
}
/**
* Returns the specified Snapshot resource. Get a list of available snapshots by
* making a list() request. (snapshots.get)
*
* @param string $project Project ID for this request.
* @param string $snapshot Name of the Snapshot resource to return.
* @param array $optParams Optional parameters.
* @return Google_Service_Compute_Snapshot
*/
public function get($project, $snapshot, $optParams = array())
{
$params = array('project' => $project, 'snapshot' => $snapshot);
$params = array_merge($params, $optParams);
return $this->call('get', array($params), "Google_Service_Compute_Snapshot");
}
/**
* Retrieves the list of Snapshot resources contained within the specified
* project. (snapshots.listSnapshots)
*
* @param string $project Project ID for this request.
* @param array $optParams Optional parameters.
*
* @opt_param string filter A filter expression that filters resources listed in
* the response. The expression must specify the field name, a comparison
* operator, and the value that you want to use for filtering. The value must be
* a string, a number, or a boolean. The comparison operator must be either =,
* !=, >, or <.
*
* For example, if you are filtering Compute Engine instances, you can exclude
* instances named example-instance by specifying name != example-instance.
*
* You can also filter nested fields. For example, you could specify
* scheduling.automaticRestart = false to include instances only if they are not
* scheduled for automatic restarts. You can use filtering on nested fields to
* filter based on resource labels.
*
* To filter on multiple expressions, provide each separate expression within
* parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform =
* "Intel Skylake"). By default, each expression is an AND expression. However,
* you can include AND and OR expressions explicitly. For example, (cpuPlatform
* = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
* (scheduling.automaticRestart = true).
* @opt_param string maxResults The maximum number of results per page that
* should be returned. If the number of available results is larger than
* maxResults, Compute Engine returns a nextPageToken that can be used to get
* the next page of results in subsequent list requests. Acceptable values are 0
* to 500, inclusive. (Default: 500)
* @opt_param string orderBy Sorts list results by a certain order. By default,
* results are returned in alphanumerical order based on the resource name.
*
* You can also sort results in descending order based on the creation timestamp
* using orderBy="creationTimestamp desc". This sorts results based on the
* creationTimestamp field in reverse chronological order (newest result first).
* Use this to sort resources like operations so that the newest operation is
* returned first.
*
* Currently, only sorting by name or creationTimestamp desc is supported.
* @opt_param string pageToken Specifies a page token to use. Set pageToken to
* the nextPageToken returned by a previous list request to get the next page of
* results.
* @return Google_Service_Compute_SnapshotList
*/
public function listSnapshots($project, $optParams = array())
{
$params = array('project' => $project);
$params = array_merge($params, $optParams);
return $this->call('list', array($params), "Google_Service_Compute_SnapshotList");
}
/**
* Sets the labels on a snapshot. To learn more about labels, read the Labeling
* Resources documentation. (snapshots.setLabels)
*
* @param string $project Project ID for this request.
* @param string $resource Name of the resource for this request.
* @param Google_Service_Compute_GlobalSetLabelsRequest $postBody
* @param array $optParams Optional parameters.
* @return Google_Service_Compute_Operation
*/
public function setLabels($project, $resource, Google_Service_Compute_GlobalSetLabelsRequest $postBody, $optParams = array())
{
$params = array('project' => $project, 'resource' => $resource, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('setLabels', array($params), "Google_Service_Compute_Operation");
}
}
|
{
"pile_set_name": "Github"
}
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='hus')
crawl_bibleis(crawler, out, bible='HUSLLB')
|
{
"pile_set_name": "Github"
}
|
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// The utility to read checkpoints for google brain tensor ops and v3
// checkpoints for dist_belief.
//
#ifndef TENSORFLOW_UTIL_TENSOR_SLICE_READER_CACHE_H_
#define TENSORFLOW_UTIL_TENSOR_SLICE_READER_CACHE_H_
#include <unordered_map>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/tensor_slice_reader.h"
namespace tensorflow {
namespace checkpoint {
class TensorSliceReaderCache;
// Wrapper to a lazily allocated TensorSliceReaderCache.
class TensorSliceReaderCacheWrapper {
public:
TensorSliceReaderCacheWrapper();
~TensorSliceReaderCacheWrapper();
// Same as TensorSliceReaderCache::GetReader().
const TensorSliceReader* GetReader(
const string& filepattern,
TensorSliceReader::OpenTableFunction open_function,
int preferred_shard) const;
private:
mutable mutex mu_;
mutable TensorSliceReaderCache* cache_ = nullptr;
};
// A cache of TensorSliceReaders.
class TensorSliceReaderCache {
public:
TensorSliceReaderCache();
~TensorSliceReaderCache();
// Returns the TensorSliceReader corresponding to 'filepattern' and the
// open_function. May return nullptr if we can not create a new
// TensorSliceReader for the filepattern/open_function combination.
const TensorSliceReader* GetReader(
const string& filepattern,
TensorSliceReader::OpenTableFunction open_function, int preferred_shard);
private:
// Need to use a regular function type in the key map as std::function does
// not support ==.
typedef Status (*OpenFuncType)(const string&, TensorSliceReader::Table**);
// Protects attributes below.
mutex mu_;
// Maps of opened readers.
std::unordered_map<string, std::pair<OpenFuncType, TensorSliceReader*>>
readers_;
// Set of keys that a previous GetReader() call is still trying to populate.
std::set<string> still_opening_;
// Condition variable to notify when a reader has been created.
condition_variable cv_;
};
} // namespace checkpoint
} // namespace tensorflow
#endif // TENSORFLOW_UTIL_TENSOR_SLICE_READER_CACHE_H_
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"log"
"time"
"strings"
gce "cloud.google.com/go/compute/metadata"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
monitoring "google.golang.org/api/monitoring/v3"
)
// SD Dummy Exporter is a testing utility that exports a metric of constant value to Stackdriver
// in a loop. Metric name and value can be specified with flags 'metric-name' and 'metric-value'.
// SD Dummy Exporter assumes that it runs as a pod in GCE or GKE cluster, and the pod id, pod name
// and namespace are passed to it with 'pod-id', 'pod-name' and 'namespace' flags.
// Pod ID and pod name can be passed to a pod via Downward API.
func main() {
// Gather pod information
podId := flag.String("pod-id", "", "pod id")
namespace := flag.String("namespace", "", "namespace")
podName := flag.String("pod-name", "", "pod name")
metricName := flag.String("metric-name", "foo", "custom metric name")
metricValue := flag.Int64("metric-value", 0, "custom metric value")
metricLabelsArg := flag.String("metric-labels", "bar=1", "custom metric labels")
// Whether to use old Stackdriver resource model - use monitored resource "gke_container"
// For old resource model, podId flag has to be set.
useOldResourceModel := flag.Bool("use-old-resource-model", true, "use old stackdriver resource model")
// Whether to use new Stackdriver resource model - use monitored resource "k8s_pod"
// For new resource model, podName and namespace flags have to be set.
useNewResourceModel := flag.Bool("use-new-resource-model", false, "use new stackdriver resource model")
flag.Parse()
if *podId == "" && *useOldResourceModel {
log.Fatalf("No pod id specified.")
}
if *podName == "" && *useNewResourceModel {
log.Fatalf("No pod name specified.")
}
if *namespace == "" && *useNewResourceModel {
log.Fatalf("No pod namespace specified.")
}
stackdriverService, err := getStackDriverService()
if err != nil {
log.Fatalf("Error getting Stackdriver service: %v", err)
}
metricLabels := make(map[string]string)
for _, label := range strings.Split(*metricLabelsArg, ",") {
labelParts := strings.Split(label, "=")
metricLabels[labelParts[0]] = labelParts[1]
}
oldModelLabels := getResourceLabelsForOldModel(*podId)
newModelLabels := getResourceLabelsForNewModel(*namespace, *podName)
for {
if *useOldResourceModel {
err := exportMetric(stackdriverService, *metricName, *metricValue, metricLabels, "gke_container", oldModelLabels)
if err != nil {
log.Printf("Failed to write time series data for old resource model: %v\n", err)
} else {
log.Printf("Finished writing time series for new resource model with value: %v\n", metricValue)
}
}
if *useNewResourceModel {
err := exportMetric(stackdriverService, *metricName, *metricValue, metricLabels, "k8s_pod", newModelLabels)
if err != nil {
log.Printf("Failed to write time series data for new resource model: %v\n", err)
} else {
log.Printf("Finished writing time series for new resource model with value: %v\n", metricValue)
}
}
time.Sleep(5000 * time.Millisecond)
}
}
func getStackDriverService() (*monitoring.Service, error) {
oauthClient := oauth2.NewClient(context.Background(), google.ComputeTokenSource(""))
return monitoring.New(oauthClient)
}
// getResourceLabelsForOldModel returns resource labels needed to correctly label metric data
// exported to StackDriver. Labels contain details on the cluster (project id, name)
// and pod for which the metric is exported (zone, id).
func getResourceLabelsForOldModel(podId string) map[string]string {
projectId, _ := gce.ProjectID()
zone, _ := gce.Zone()
clusterName, _ := gce.InstanceAttributeValue("cluster-name")
clusterName = strings.TrimSpace(clusterName)
return map[string]string{
"project_id": projectId,
"zone": zone,
"cluster_name": clusterName,
// container name doesn't matter here, because the metric is exported for
// the pod, not the container
"container_name": "",
"pod_id": podId,
// namespace_id and instance_id don't matter
"namespace_id": "default",
"instance_id": "",
}
}
// getResourceLabelsForNewModel returns resource labels needed to correctly label metric data
// exported to StackDriver. Labels contain details on the cluster (project id, location, name)
// and pod for which the metric is exported (namespace, name).
func getResourceLabelsForNewModel(namespace, name string) map[string]string {
projectId, _ := gce.ProjectID()
location, _ := gce.InstanceAttributeValue("cluster-location")
location = strings.TrimSpace(location)
clusterName, _ := gce.InstanceAttributeValue("cluster-name")
clusterName = strings.TrimSpace(clusterName)
return map[string]string{
"project_id": projectId,
"location": location,
"cluster_name": clusterName,
"namespace_name": namespace,
"pod_name": name,
}
}
func exportMetric(stackdriverService *monitoring.Service, metricName string,
metricValue int64, metricLabels map[string]string, monitoredResource string, resourceLabels map[string]string) error {
dataPoint := &monitoring.Point{
Interval: &monitoring.TimeInterval{
EndTime: time.Now().Format(time.RFC3339),
},
Value: &monitoring.TypedValue{
Int64Value: &metricValue,
},
}
// Write time series data.
request := &monitoring.CreateTimeSeriesRequest{
TimeSeries: []*monitoring.TimeSeries{
{
Metric: &monitoring.Metric{
Type: "custom.googleapis.com/" + metricName,
Labels: metricLabels,
},
Resource: &monitoring.MonitoredResource{
Type: monitoredResource,
Labels: resourceLabels,
},
Points: []*monitoring.Point{
dataPoint,
},
},
},
}
projectName := fmt.Sprintf("projects/%s", resourceLabels["project_id"])
_, err := stackdriverService.Projects.TimeSeries.Create(projectName, request).Do()
return err
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 1998-2016 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.site.tags;
import com.google.common.collect.Maps;
import de.neuland.jade4j.JadeConfiguration;
import de.neuland.jade4j.template.JadeTemplate;
import org.springframework.web.context.WebApplicationContext;
import org.springframework.web.context.support.WebApplicationContextUtils;
import ru.org.linux.comment.ApiCommentTopicInfo;
import ru.org.linux.comment.PreparedComment;
import ru.org.linux.topic.Topic;
import javax.servlet.jsp.tagext.TagSupport;
import java.util.Map;
public class CommentTag extends TagSupport {
private PreparedComment comment;
private boolean enableSchema;
private Topic topic;
private boolean showMenu;
private boolean commentsAllowed;
public void setComment(PreparedComment comment) {
this.comment = comment;
}
public void setEnableSchema(boolean enableSchema) {
this.enableSchema = enableSchema;
}
public void setTopic(Topic topic) {
this.topic = topic;
}
public void setShowMenu(boolean showMenu) {
this.showMenu = showMenu;
}
public void setCommentsAllowed(boolean commentsAllowed) {
this.commentsAllowed = commentsAllowed;
}
@Override
public int doStartTag() {
WebApplicationContext context = WebApplicationContextUtils.getWebApplicationContext(pageContext.getServletContext());
JadeConfiguration jadeConfiguration = context.getBean(JadeConfiguration.class);
JadeTemplate jadeTemplate = context.getBean("TemplateComment", JadeTemplate.class);
Map<String, Object> data = Maps.newHashMap();
data.put("comment", comment);
data.put("enableSchema", enableSchema);
data.put("topic", new ApiCommentTopicInfo(
topic.getId(),
topic.getLink(),
commentsAllowed
));
data.put("showMenu", showMenu);
// TODO: move to globals
data.put("dateFormat", new SignTag.DateFormatHandler());
jadeConfiguration.renderTemplate(jadeTemplate, data, pageContext.getOut());
/*
ObjectMapper mapper = new ObjectMapper();
try {
pageContext.getOut().append(mapper.writer().writeValueAsString(data.get("user")));
} catch (Exception e) {
e.printStackTrace();
// throw new RuntimeException(e);
}
*/
return SKIP_BODY;
}
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"mime"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
"k8s.io/apiserver/pkg/storage/storagebackend"
)
// StorageCodecConfig are the arguments passed to newStorageCodecFn
type StorageCodecConfig struct {
StorageMediaType string
StorageSerializer runtime.StorageSerializer
StorageVersion schema.GroupVersion
MemoryVersion schema.GroupVersion
Config storagebackend.Config
EncoderDecoratorFn func(runtime.Encoder) runtime.Encoder
DecoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder
}
// NewStorageCodec assembles a storage codec for the provided storage media type, the provided serializer, and the requested
// storage and memory versions.
func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, runtime.GroupVersioner, error) {
mediaType, _, err := mime.ParseMediaType(opts.StorageMediaType)
if err != nil {
return nil, nil, fmt.Errorf("%q is not a valid mime-type", opts.StorageMediaType)
}
serializer, ok := runtime.SerializerInfoForMediaType(opts.StorageSerializer.SupportedMediaTypes(), mediaType)
if !ok {
return nil, nil, fmt.Errorf("unable to find serializer for %q", mediaType)
}
s := serializer.Serializer
// Give callers the opportunity to wrap encoders and decoders. For decoders, each returned decoder will
// be passed to the recognizer so that multiple decoders are available.
var encoder runtime.Encoder = s
if opts.EncoderDecoratorFn != nil {
encoder = opts.EncoderDecoratorFn(encoder)
}
decoders := []runtime.Decoder{
// selected decoder as the primary
s,
// universal deserializer as a fallback
opts.StorageSerializer.UniversalDeserializer(),
// base64-wrapped universal deserializer as a last resort.
// this allows reading base64-encoded protobuf, which should only exist if etcd2+protobuf was used at some point.
// data written that way could exist in etcd2, or could have been migrated to etcd3.
// TODO: flag this type of data if we encounter it, require migration (read to decode, write to persist using a supported encoder), and remove in 1.8
runtime.NewBase64Serializer(nil, opts.StorageSerializer.UniversalDeserializer()),
}
if opts.DecoderDecoratorFn != nil {
decoders = opts.DecoderDecoratorFn(decoders)
}
encodeVersioner := runtime.NewMultiGroupVersioner(
opts.StorageVersion,
schema.GroupKind{Group: opts.StorageVersion.Group},
schema.GroupKind{Group: opts.MemoryVersion.Group},
)
// Ensure the storage receives the correct version.
encoder = opts.StorageSerializer.EncoderForVersion(
encoder,
encodeVersioner,
)
decoder := opts.StorageSerializer.DecoderToVersion(
recognizer.NewDecoder(decoders...),
runtime.NewCoercingMultiGroupVersioner(
opts.MemoryVersion,
schema.GroupKind{Group: opts.MemoryVersion.Group},
schema.GroupKind{Group: opts.StorageVersion.Group},
),
)
return runtime.NewCodec(encoder, decoder), encodeVersioner, nil
}
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <cmath>
#include <complex>
#include <tuple>
#include <vector>
#include "dali/kernels/common/utils.h"
#include "dali/kernels/imgproc/convolution/baseline_convolution.h"
#include "dali/kernels/imgproc/convolution/separable_convolution_cpu.h"
#include "dali/kernels/scratch.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/test_tensors.h"
namespace dali {
namespace kernels {
template <typename T>
void InitTriangleWindow(const TensorView<StorageCPU, T, 1> &window) {
int radius = window.num_elements() / 2;
for (int i = 0; i < radius; i++) {
*window(i) = i + 1;
*window(window.num_elements() - i - 1) = i + 1;
}
*window(radius) = radius + 1;
}
TEST(SeparableConvolutionTest, Axes1WithChannels) {
std::array<int, 1> window_dims = {5};
TestTensorList<float, 1> kernel_window;
TestTensorList<float, 2> input;
TestTensorList<int, 2> output, baseline_output;
TensorListShape<2> data_shape = uniform_list_shape<2>(1, {16, 3});
kernel_window.reshape(uniform_list_shape<1>(1, {window_dims[0]}));
input.reshape(data_shape);
output.reshape(data_shape);
baseline_output.reshape(data_shape);
auto kernel_window_v = kernel_window.cpu()[0];
auto in_v = input.cpu()[0];
auto out_v = output.cpu()[0];
auto baseline_out_v = baseline_output.cpu()[0];
std::mt19937 rng;
UniformRandomFill(in_v, rng, 0, 255);
InitTriangleWindow(kernel_window_v);
SeparableConvolutionCpu<int, float, float, 1, true> kernel;
KernelContext ctx;
auto req = kernel.Setup(ctx, data_shape[0], window_dims);
ScratchpadAllocator scratch_alloc;
scratch_alloc.Reserve(req.scratch_sizes);
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_v, in_v,
uniform_array<1, TensorView<StorageCPU, const float, 1>>(kernel_window_v));
testing::BaselineConvolve(baseline_out_v, in_v, kernel_window_v, 0, window_dims[0] / 2);
Check(out_v, baseline_out_v);
}
TEST(SeparableConvolutionTest, Axes1NoChannels) {
std::array<int, 1> window_dims = {5};
TestTensorList<float, 1> kernel_window;
TestTensorList<float, 2> input;
TestTensorList<int, 1> output;
TestTensorList<int, 2> baseline_output;
TensorListShape<2> data_shape = uniform_list_shape<2>(1, {16, 1});
kernel_window.reshape(uniform_list_shape<1>(1, {window_dims[0]}));
input.reshape(data_shape);
output.reshape(data_shape.first<1>());
baseline_output.reshape(data_shape);
auto kernel_window_v = kernel_window.cpu()[0];
auto baseline_in_v = input.cpu()[0];
TensorView<StorageCPU, float, 1> in_v = {baseline_in_v.data, baseline_in_v.shape.first<1>()};
auto out_v = output.cpu()[0];
auto baseline_out_v = baseline_output.cpu()[0];
std::mt19937 rng;
UniformRandomFill(in_v, rng, 0, 255);
InitTriangleWindow(kernel_window_v);
SeparableConvolutionCpu<int, float, float, 1, false> kernel;
KernelContext ctx;
auto req = kernel.Setup(ctx, data_shape[0].first<1>(), window_dims);
ScratchpadAllocator scratch_alloc;
scratch_alloc.Reserve(req.scratch_sizes);
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_v, in_v,
uniform_array<1, TensorView<StorageCPU, const float, 1>>(kernel_window_v));
testing::BaselineConvolve(baseline_out_v, baseline_in_v, kernel_window_v, 0, window_dims[0] / 2);
TensorView<StorageCPU, int, 1> compare_v = {baseline_out_v.data, baseline_out_v.shape.first<1>()};
Check(out_v, compare_v);
}
TEST(SeparableConvolutionTest, Axes2WithChannels) {
std::array<int, 2> window_dims = {5, 7};
TestTensorList<float, 1> kernel_window_0, kernel_window_1;
TestTensorList<int, 3> input;
TestTensorList<float, 3> intermediate;
TestTensorList<int, 3> output, baseline_output;
TensorListShape<3> data_shape = uniform_list_shape<3>(1, {20, 16, 3});
kernel_window_0.reshape(uniform_list_shape<1>(1, {window_dims[0]}));
kernel_window_1.reshape(uniform_list_shape<1>(1, {window_dims[1]}));
input.reshape(data_shape);
intermediate.reshape(data_shape);
output.reshape(data_shape);
baseline_output.reshape(data_shape);
auto kernel_window_0_v = kernel_window_0.cpu()[0];
auto kernel_window_1_v = kernel_window_1.cpu()[0];
auto in_v = input.cpu()[0];
auto interm_v = intermediate.cpu()[0];
auto out_v = output.cpu()[0];
auto baseline_out_v = baseline_output.cpu()[0];
std::mt19937 rng;
UniformRandomFill(in_v, rng, 0, 255);
InitTriangleWindow(kernel_window_0_v);
InitTriangleWindow(kernel_window_1_v);
SeparableConvolutionCpu<int, int, float, 2, true> kernel;
static_assert(
std::is_same<typename SeparableConvolutionCpu<int, int, float, 2, true>::Intermediate,
float>::value,
"Unexpected intermediate type");
KernelContext ctx;
auto req = kernel.Setup(ctx, data_shape[0], window_dims);
ScratchpadAllocator scratch_alloc;
scratch_alloc.Reserve(req.scratch_sizes);
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_v, in_v, {kernel_window_0_v, kernel_window_1_v});
testing::BaselineConvolve(interm_v, in_v, kernel_window_1_v, 1, window_dims[1] / 2);
testing::BaselineConvolve(baseline_out_v, interm_v, kernel_window_0_v, 0, window_dims[0] / 2);
Check(out_v, baseline_out_v);
}
TEST(SeparableConvolutionTest, Axes2NoChannels) {
std::array<int, 2> window_dims = {5, 7};
TestTensorList<float, 1> kernel_window_0, kernel_window_1;
TestTensorList<int, 3> input;
TestTensorList<float, 3> intermediate;
TestTensorList<int, 2> output;
TestTensorList<int, 3> baseline_output;
TensorListShape<3> data_shape = uniform_list_shape<3>(1, {20, 16, 1});
kernel_window_0.reshape(uniform_list_shape<1>(1, {window_dims[0]}));
kernel_window_1.reshape(uniform_list_shape<1>(1, {window_dims[1]}));
input.reshape(data_shape);
intermediate.reshape(data_shape);
output.reshape(data_shape.first<2>());
baseline_output.reshape(data_shape);
auto kernel_window_0_v = kernel_window_0.cpu()[0];
auto kernel_window_1_v = kernel_window_1.cpu()[0];
auto baseline_in_v = input.cpu()[0];
TensorView<StorageCPU, int, 2> in_v = {baseline_in_v.data, baseline_in_v.shape.first<2>()};
auto interm_v = intermediate.cpu()[0];
auto out_v = output.cpu()[0];
auto baseline_out_v = baseline_output.cpu()[0];
std::mt19937 rng;
UniformRandomFill(in_v, rng, 0, 255);
InitTriangleWindow(kernel_window_0_v);
InitTriangleWindow(kernel_window_1_v);
SeparableConvolutionCpu<int, int, float, 2, false> kernel;
static_assert(
std::is_same<typename SeparableConvolutionCpu<int, int, float, 2, false>::Intermediate,
float>::value,
"Unexpected intermediate type");
KernelContext ctx;
auto req = kernel.Setup(ctx, data_shape[0].first<2>(), window_dims);
ScratchpadAllocator scratch_alloc;
scratch_alloc.Reserve(req.scratch_sizes);
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_v, in_v, {kernel_window_0_v, kernel_window_1_v});
testing::BaselineConvolve(interm_v, baseline_in_v, kernel_window_1_v, 1, window_dims[1] / 2);
testing::BaselineConvolve(baseline_out_v, interm_v, kernel_window_0_v, 0, window_dims[0] / 2);
TensorView<StorageCPU, int, 2> compare_v = {baseline_out_v.data, baseline_out_v.shape.first<2>()};
Check(out_v, compare_v);
}
TEST(SeparableConvolutionTest, Axes3WithChannels) {
std::array<int, 3> window_dims = {5, 7, 3};
TestTensorList<uint16_t, 1> kernel_window_0, kernel_window_1, kernel_window_2;
TestTensorList<int16_t, 4> input;
TestTensorList<int, 4> intermediate_0, intermediate_1;
TestTensorList<int16_t, 4> output, baseline_output;
TensorListShape<4> data_shape = uniform_list_shape<4>(1, {14, 20, 16, 3});
kernel_window_0.reshape(uniform_list_shape<1>(1, {window_dims[0]}));
kernel_window_1.reshape(uniform_list_shape<1>(1, {window_dims[1]}));
kernel_window_2.reshape(uniform_list_shape<1>(1, {window_dims[2]}));
input.reshape(data_shape);
intermediate_0.reshape(data_shape);
intermediate_1.reshape(data_shape);
output.reshape(data_shape);
baseline_output.reshape(data_shape);
auto kernel_window_0_v = kernel_window_0.cpu()[0];
auto kernel_window_1_v = kernel_window_1.cpu()[0];
auto kernel_window_2_v = kernel_window_2.cpu()[0];
auto in_v = input.cpu()[0];
auto interm_0_v = intermediate_0.cpu()[0];
auto interm_1_v = intermediate_1.cpu()[0];
auto out_v = output.cpu()[0];
auto baseline_out_v = baseline_output.cpu()[0];
std::mt19937 rng;
UniformRandomFill(in_v, rng, 0, 255);
InitTriangleWindow(kernel_window_0_v);
InitTriangleWindow(kernel_window_1_v);
InitTriangleWindow(kernel_window_2_v);
SeparableConvolutionCpu<int16_t, int16_t, uint16_t, 3, true> kernel;
static_assert(
std::is_same<
typename SeparableConvolutionCpu<int16_t, int16_t, uint16_t, 3, true>::Intermediate,
int>::value,
"Unexpected intermediate type");
KernelContext ctx;
auto req = kernel.Setup(ctx, data_shape[0], window_dims);
ScratchpadAllocator scratch_alloc;
scratch_alloc.Reserve(req.scratch_sizes);
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_v, in_v, {kernel_window_0_v, kernel_window_1_v, kernel_window_2_v});
testing::BaselineConvolve(interm_0_v, in_v, kernel_window_2_v, 2, window_dims[2] / 2);
testing::BaselineConvolve(interm_1_v, interm_0_v, kernel_window_1_v, 1, window_dims[1] / 2);
testing::BaselineConvolve(baseline_out_v, interm_1_v, kernel_window_0_v, 0, window_dims[0] / 2);
Check(out_v, baseline_out_v);
}
TEST(SeparableConvolutionTest, Axes3NoChannels) {
std::array<int, 3> window_dims = {5, 7, 3};
TestTensorList<float, 1> kernel_window_0, kernel_window_1, kernel_window_2;
TestTensorList<int, 4> input;
TestTensorList<float, 4> intermediate_0, intermediate_1;
TestTensorList<float, 3> output;
TestTensorList<float, 4> baseline_output;
TensorListShape<4> data_shape = uniform_list_shape<4>(1, {14, 20, 16, 1});
kernel_window_0.reshape(uniform_list_shape<1>(1, {window_dims[0]}));
kernel_window_1.reshape(uniform_list_shape<1>(1, {window_dims[1]}));
kernel_window_2.reshape(uniform_list_shape<1>(1, {window_dims[2]}));
input.reshape(data_shape);
intermediate_0.reshape(data_shape);
intermediate_1.reshape(data_shape);
output.reshape(data_shape.first<3>());
baseline_output.reshape(data_shape);
auto kernel_window_0_v = kernel_window_0.cpu()[0];
auto kernel_window_1_v = kernel_window_1.cpu()[0];
auto kernel_window_2_v = kernel_window_2.cpu()[0];
auto baseline_in_v = input.cpu()[0];
TensorView<StorageCPU, int, 3> in_v = {baseline_in_v.data, baseline_in_v.shape.first<3>()};
auto interm_0_v = intermediate_0.cpu()[0];
auto interm_1_v = intermediate_1.cpu()[0];
auto out_v = output.cpu()[0];
auto baseline_out_v = baseline_output.cpu()[0];
std::mt19937 rng;
UniformRandomFill(in_v, rng, 0, 255);
InitTriangleWindow(kernel_window_0_v);
InitTriangleWindow(kernel_window_1_v);
InitTriangleWindow(kernel_window_2_v);
SeparableConvolutionCpu<float, int, float, 3, false> kernel;
static_assert(
std::is_same<typename SeparableConvolutionCpu<float, int, float, 3, false>::Intermediate,
float>::value,
"Unexpected intermediate type");
KernelContext ctx;
auto req = kernel.Setup(ctx, data_shape[0].first<3>(), window_dims);
ScratchpadAllocator scratch_alloc;
scratch_alloc.Reserve(req.scratch_sizes);
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_v, in_v, {kernel_window_0_v, kernel_window_1_v, kernel_window_2_v});
testing::BaselineConvolve(interm_0_v, baseline_in_v, kernel_window_2_v, 2, window_dims[2] / 2);
testing::BaselineConvolve(interm_1_v, interm_0_v, kernel_window_1_v, 1, window_dims[1] / 2);
testing::BaselineConvolve(baseline_out_v, interm_1_v, kernel_window_0_v, 0, window_dims[0] / 2);
TensorView<StorageCPU, float, 3> compare_v = {baseline_out_v.data,
baseline_out_v.shape.first<3>()};
Check(out_v, compare_v);
}
} // namespace kernels
} // namespace dali
|
{
"pile_set_name": "Github"
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-19 12:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CMDB', '0036_auto_20181213_1404'),
]
operations = [
migrations.AlterUniqueTogether(
name='networkcard_assets',
unique_together=set([('host', 'macaddress', 'ip')]),
),
]
|
{
"pile_set_name": "Github"
}
|
// go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build openbsd,arm
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(ngid int, gid *_Gid_t) (err error) {
_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
wpid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(s int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimes(path string, timeval *[2]Timeval) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func futimes(fd int, timeval *[2]Timeval) (err error) {
_, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Madvise(b []byte, behav int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlockall(flags int) (err error) {
_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mprotect(b []byte, prot int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Msync(b []byte, flags int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlockall() (err error) {
_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getcwd(buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Access(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
_, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chflags(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chmod(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chroot(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(fd int) (nfd int, err error) {
r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup2(from int, to int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup3(from int, to int, flags int) (err error) {
_, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Exit(code int) {
Syscall(SYS_EXIT, uintptr(code), 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchdir(fd int) (err error) {
_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchflags(fd int, flags int) (err error) {
_, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmod(fd int, mode uint32) (err error) {
_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Flock(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fpathconf(fd int, name int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatfs(fd int, stat *Statfs_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
_, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
egid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgid(pid int) (pgid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
pgid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgrp() (pgrp int) {
r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
pgrp = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpid() (pid int) {
r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
pid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getppid() (ppid int) {
r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
ppid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpriority(which int, who int) (prio int, err error) {
r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
prio = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrtable() (rtable int, err error) {
r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0)
rtable = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getsid(pid int) (sid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
sid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettimeofday(tv *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Issetugid() (tainted bool) {
r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
tainted = bool(r0 != 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kill(pid int, signum syscall.Signal) (err error) {
_, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kqueue() (fd int, err error) {
r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lchown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Link(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifo(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pathconf(path string, name int) (val int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func read(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlink(path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(buf) > 0 {
_p1 = unsafe.Pointer(&buf[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(buf) > 0 {
_p1 = unsafe.Pointer(&buf[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rename(from string, to string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(from)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(to)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Renameat(fromfd int, from string, tofd int, to string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(from)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(to)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Revoke(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rmdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0)
newoffset = int64(int64(r1)<<32 | int64(r0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setegid(egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seteuid(euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setgid(gid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setlogin(name string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpgid(pid int, pgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpriority(which int, who int, prio int) (err error) {
_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresgid(rgid int, egid int, sgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresuid(ruid int, euid int, suid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrtable(rtable int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setsid() (pid int, err error) {
r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
pid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Settimeofday(tp *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setuid(uid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Statfs(path string, stat *Statfs_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(oldpath)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(newpath)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sync() (err error) {
_, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Umask(newmask int) (oldmask int) {
r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
oldmask = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlink(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlinkat(dirfd int, path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unmount(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func write(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0)
ret = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func munmap(addr uintptr, length uintptr) (err error) {
_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (c) 2007-2017 Xplenty, Inc. All Rights Reserved.
*
* Project and contact information: http://www.cascading.org/
*
* This file is part of the Cascading project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cascading.tuple.io;
import java.util.List;
import cascading.tuple.Tuple;
/** Class IndexTuple allows for managing an int index value with a Tuple instance. Used internally for co-grouping values. */
public class IndexTuple extends Tuple implements Comparable<Object>
{
int index;
Tuple tuple;
/** Constructor IndexTuple creates a new IndexTuple instance. */
public IndexTuple()
{
super( (List<Object>) null );
}
/**
* Constructor IndexTuple creates a new IndexTuple instance.
*
* @param index of type int
* @param tuple of type Tuple
*/
public IndexTuple( int index, Tuple tuple )
{
super( (List<Comparable>) null );
this.index = index;
this.tuple = tuple;
}
public void setIndex( int index )
{
this.index = index;
}
public int getIndex()
{
return index;
}
public void setTuple( Tuple tuple )
{
this.tuple = tuple;
}
public Tuple getTuple()
{
return tuple;
}
@Override
public String print()
{
return printTo( new StringBuffer() ).toString();
}
public StringBuffer printTo( StringBuffer buffer )
{
buffer.append( "{" );
buffer.append( index ).append( ":" );
tuple.printTo( buffer );
buffer.append( "}" );
return buffer;
}
public int compareTo( Object object )
{
if( object instanceof IndexTuple )
return compareTo( (IndexTuple) object );
return -1;
}
public int compareTo( IndexTuple indexTuple )
{
int c = this.index - indexTuple.index;
if( c != 0 )
return c;
return this.tuple.compareTo( indexTuple.tuple );
}
@Override
public boolean equals( Object object )
{
if( this == object )
return true;
if( object == null || getClass() != object.getClass() )
return false;
IndexTuple that = (IndexTuple) object;
if( index != that.index )
return false;
if( tuple != null ? !tuple.equals( that.tuple ) : that.tuple != null )
return false;
return true;
}
@Override
public int hashCode()
{
int result = index;
result = 31 * result + ( tuple != null ? tuple.hashCode() : 0 );
return result;
}
@Override
public String toString()
{
return "[" + index + "]" + tuple;
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2017-2020 Lei Ni (nilei81@gmail.com) and other Dragonboat authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rsm
import (
"crypto/md5"
"encoding/binary"
"sort"
"strings"
"github.com/lni/goutils/logutil"
pb "github.com/lni/dragonboat/v3/raftpb"
)
func addressEqual(addr1 string, addr2 string) bool {
return strings.EqualFold(strings.TrimSpace(addr1),
strings.TrimSpace(addr2))
}
func deepCopyMembership(m pb.Membership) pb.Membership {
c := pb.Membership{
ConfigChangeId: m.ConfigChangeId,
Addresses: make(map[uint64]string),
Removed: make(map[uint64]bool),
Observers: make(map[uint64]string),
Witnesses: make(map[uint64]string),
}
for nid, addr := range m.Addresses {
c.Addresses[nid] = addr
}
for nid := range m.Removed {
c.Removed[nid] = true
}
for nid, addr := range m.Observers {
c.Observers[nid] = addr
}
for nid, addr := range m.Witnesses {
c.Witnesses[nid] = addr
}
return c
}
type membership struct {
clusterID uint64
nodeID uint64
ordered bool
members *pb.Membership
}
func newMembership(clusterID uint64, nodeID uint64, ordered bool) *membership {
return &membership{
clusterID: clusterID,
nodeID: nodeID,
ordered: ordered,
members: &pb.Membership{
Addresses: make(map[uint64]string),
Observers: make(map[uint64]string),
Removed: make(map[uint64]bool),
Witnesses: make(map[uint64]string),
},
}
}
func (m *membership) id() string {
return logutil.DescribeSM(m.clusterID, m.nodeID)
}
func (m *membership) set(n pb.Membership) {
cm := deepCopyMembership(n)
m.members = &cm
}
func (m *membership) get() pb.Membership {
return deepCopyMembership(*m.members)
}
func (m *membership) getHash() uint64 {
vals := make([]uint64, 0)
for v := range m.members.Addresses {
vals = append(vals, v)
}
sort.Slice(vals, func(i, j int) bool { return vals[i] < vals[j] })
vals = append(vals, m.members.ConfigChangeId)
data := make([]byte, 8)
hash := md5.New()
for _, v := range vals {
binary.LittleEndian.PutUint64(data, v)
if _, err := hash.Write(data); err != nil {
panic(err)
}
}
md5sum := hash.Sum(nil)
return binary.LittleEndian.Uint64(md5sum[:8])
}
func (m *membership) isEmpty() bool {
return len(m.members.Addresses) == 0
}
func (m *membership) isConfChangeUpToDate(cc pb.ConfigChange) bool {
if !m.ordered || cc.Initialize {
return true
}
if m.members.ConfigChangeId == cc.ConfigChangeId {
return true
}
return false
}
func (m *membership) isAddingRemovedNode(cc pb.ConfigChange) bool {
if cc.Type == pb.AddNode ||
cc.Type == pb.AddObserver ||
cc.Type == pb.AddWitness {
_, ok := m.members.Removed[cc.NodeID]
return ok
}
return false
}
func (m *membership) isPromotingObserver(cc pb.ConfigChange) bool {
if cc.Type == pb.AddNode {
oa, ok := m.members.Observers[cc.NodeID]
return ok && addressEqual(oa, string(cc.Address))
}
return false
}
func (m *membership) isInvalidObserverPromotion(cc pb.ConfigChange) bool {
if cc.Type == pb.AddNode {
oa, ok := m.members.Observers[cc.NodeID]
return ok && !addressEqual(oa, string(cc.Address))
}
return false
}
func (m *membership) isAddingExistingMember(cc pb.ConfigChange) bool {
// try to add again with the same node ID
if cc.Type == pb.AddNode {
_, ok := m.members.Addresses[cc.NodeID]
if ok {
return true
}
}
if cc.Type == pb.AddObserver {
_, ok := m.members.Observers[cc.NodeID]
if ok {
return true
}
}
if cc.Type == pb.AddWitness {
_, ok := m.members.Witnesses[cc.NodeID]
if ok {
return true
}
}
if m.isPromotingObserver(cc) {
return false
}
if cc.Type == pb.AddNode ||
cc.Type == pb.AddObserver ||
cc.Type == pb.AddWitness {
for _, addr := range m.members.Addresses {
if addressEqual(addr, string(cc.Address)) {
return true
}
}
for _, addr := range m.members.Observers {
if addressEqual(addr, string(cc.Address)) {
return true
}
}
for _, addr := range m.members.Witnesses {
if addressEqual(addr, string(cc.Address)) {
return true
}
}
}
return false
}
func (m *membership) isAddingNodeAsObserver(cc pb.ConfigChange) bool {
if cc.Type == pb.AddObserver {
_, ok := m.members.Addresses[cc.NodeID]
return ok
}
return false
}
func (m *membership) isAddingNodeAsWitness(cc pb.ConfigChange) bool {
if cc.Type == pb.AddWitness {
_, ok := m.members.Addresses[cc.NodeID]
return ok
}
return false
}
func (m *membership) isAddingWitnessAsObserver(cc pb.ConfigChange) bool {
if cc.Type == pb.AddObserver {
_, ok := m.members.Witnesses[cc.NodeID]
return ok
}
return false
}
func (m *membership) isAddingWitnessAsNode(cc pb.ConfigChange) bool {
if cc.Type == pb.AddNode {
_, ok := m.members.Witnesses[cc.NodeID]
return ok
}
return false
}
func (m *membership) isAddingObserverAsWitness(cc pb.ConfigChange) bool {
if cc.Type == pb.AddWitness {
_, ok := m.members.Observers[cc.NodeID]
return ok
}
return false
}
func (m *membership) isDeletingOnlyNode(cc pb.ConfigChange) bool {
if cc.Type == pb.RemoveNode && len(m.members.Addresses) == 1 {
_, ok := m.members.Addresses[cc.NodeID]
return ok
}
return false
}
func (m *membership) applyConfigChange(cc pb.ConfigChange, index uint64) {
m.members.ConfigChangeId = index
switch cc.Type {
case pb.AddNode:
nodeAddr := string(cc.Address)
delete(m.members.Observers, cc.NodeID)
if _, ok := m.members.Witnesses[cc.NodeID]; ok {
panic("not suppose to reach here")
}
m.members.Addresses[cc.NodeID] = nodeAddr
case pb.AddObserver:
if _, ok := m.members.Addresses[cc.NodeID]; ok {
panic("not suppose to reach here")
}
m.members.Observers[cc.NodeID] = string(cc.Address)
case pb.AddWitness:
if _, ok := m.members.Addresses[cc.NodeID]; ok {
panic("not suppose to reach here")
}
if _, ok := m.members.Observers[cc.NodeID]; ok {
panic("not suppose to reach here")
}
m.members.Witnesses[cc.NodeID] = string(cc.Address)
case pb.RemoveNode:
delete(m.members.Addresses, cc.NodeID)
delete(m.members.Observers, cc.NodeID)
delete(m.members.Witnesses, cc.NodeID)
m.members.Removed[cc.NodeID] = true
default:
panic("unknown config change type")
}
}
var nid = logutil.NodeID
func (m *membership) handleConfigChange(cc pb.ConfigChange, index uint64) bool {
// order id requested by user
ccid := cc.ConfigChangeId
nodeBecomingObserver := m.isAddingNodeAsObserver(cc)
nodeBecomingWitness := m.isAddingNodeAsWitness(cc)
witnessBecomingNode := m.isAddingWitnessAsNode(cc)
witnessBecomingObserver := m.isAddingWitnessAsObserver(cc)
observerBecomingWitness := m.isAddingObserverAsWitness(cc)
alreadyMember := m.isAddingExistingMember(cc)
addRemovedNode := m.isAddingRemovedNode(cc)
upToDateCC := m.isConfChangeUpToDate(cc)
deleteOnlyNode := m.isDeletingOnlyNode(cc)
invalidPromotion := m.isInvalidObserverPromotion(cc)
accepted := upToDateCC &&
!addRemovedNode &&
!alreadyMember &&
!nodeBecomingObserver &&
!nodeBecomingWitness &&
!witnessBecomingNode &&
!witnessBecomingObserver &&
!observerBecomingWitness &&
!deleteOnlyNode &&
!invalidPromotion
if accepted {
// current entry index, it will be recorded as the conf change id of the members
m.applyConfigChange(cc, index)
if cc.Type == pb.AddNode {
plog.Infof("%s applied ADD ccid %d (%d), %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), string(cc.Address))
} else if cc.Type == pb.RemoveNode {
plog.Infof("%s applied REMOVE ccid %d (%d), %s",
m.id(), ccid, index, nid(cc.NodeID))
} else if cc.Type == pb.AddObserver {
plog.Infof("%s applied ADD OBSERVER ccid %d (%d), %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), string(cc.Address))
} else if cc.Type == pb.AddWitness {
plog.Infof("%s applied ADD WITNESS ccid %d (%d), %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), string(cc.Address))
} else {
plog.Panicf("unknown cc.Type value %d", cc.Type)
}
} else {
if !upToDateCC {
plog.Warningf("%s rej out-of-order ConfChange ccid %d (%d), type %s",
m.id(), ccid, index, cc.Type)
} else if addRemovedNode {
plog.Warningf("%s rej add removed ccid %d (%d), %s",
m.id(), ccid, index, nid(cc.NodeID))
} else if alreadyMember {
plog.Warningf("%s rej add exist ccid %d (%d) %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), cc.Address)
} else if nodeBecomingObserver {
plog.Warningf("%s rej add exist as observer ccid %d (%d) %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), cc.Address)
} else if nodeBecomingWitness {
plog.Warningf("%s rej add exist as witness ccid %d (%d) %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), cc.Address)
} else if witnessBecomingNode {
plog.Warningf("%s rej add witness as node ccid %d (%d) %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), cc.Address)
} else if witnessBecomingObserver {
plog.Warningf("%s rej add witness as observer ccid %d (%d) %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), cc.Address)
} else if observerBecomingWitness {
plog.Warningf("%s rej add observer as witness ccid %d (%d) %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), cc.Address)
} else if deleteOnlyNode {
plog.Warningf("%s rej remove the only node %s", m.id(), nid(cc.NodeID))
} else if invalidPromotion {
plog.Warningf("%s rej invalid observer promotion ccid %d (%d) %s (%s)",
m.id(), ccid, index, nid(cc.NodeID), cc.Address)
} else {
plog.Panicf("config change rejected for unknown reasons")
}
}
return accepted
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2014 The Serviced Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
// --------------------------------------------------------------------------------------------------
// --------------------------------------------------------------------------------------------------
// **** USE OF THE METHODS IN THIS FILE IS DEPRECATED ****
//
// THAT MEANS DO NOT ADD MORE METHODS TO dao.ControlPlane
//
// Instead of adding new RPC calls via dao.ControlPlane, new RPCs should be added
// rpc/master.ClientInterface
// --------------------------------------------------------------------------------------------------
// --------------------------------------------------------------------------------------------------
import (
"time"
"github.com/control-center/serviced/domain/addressassignment"
"github.com/control-center/serviced/domain/logfilter"
"github.com/control-center/serviced/domain/service"
"github.com/control-center/serviced/metrics"
)
// ControlPlaneError is a generic ControlPlane error
type ControlPlaneError struct {
Msg string
}
// Implement the Error() interface for ControlPlaneError
func (s ControlPlaneError) Error() string {
return s.Msg
}
// EntityRequest is a request for a control center object.
type EntityRequest interface{}
// ServiceRequest identifies a service plus some query parameters.
type ServiceRequest struct {
Tags []string
TenantID string
UpdatedSince time.Duration
NameRegex string
}
// ServiceCloneRequest specifies a service to clone and how to modify the clone's name.
type ServiceCloneRequest struct {
ServiceID string
Suffix string
}
// ServiceMigrationRequest is request to modify one or more services.
type ServiceMigrationRequest struct {
ServiceID string // The tenant service ID
Modified []*service.Service // Services modified by the migration
Added []*service.Service // Services added by the migration
Deploy []*ServiceDeploymentRequest // ServiceDefinitions to be deployed by the migration
LogFilters map[string]logfilter.LogFilter // LogFilters to add/replace
}
// ServiceStateRequest specifies a request for a service's service state.
type ServiceStateRequest struct {
ServiceID string
ServiceStateID string
}
// ScheduleServiceRequest specifies a request to schedule a service to run.
type ScheduleServiceRequest struct {
ServiceIDs []string
AutoLaunch bool
Synchronous bool
}
// WaitServiceRequest is a request to wait for a set of services to gain the requested status.
type WaitServiceRequest struct {
ServiceIDs []string // List of service IDs to monitor
DesiredState service.DesiredState // State which to monitor for
Timeout time.Duration // Time to wait before cancelling the subprocess
Recursive bool // Recursively wait for the desired state
}
// HostServiceRequest is a request for the service state of a host.
type HostServiceRequest struct {
HostID string
ServiceStateID string
}
// AttachRequest is a request to run a command in the container of a running service.
type AttachRequest struct {
Running *RunningService
Command string
Args []string
}
// FindChildRequest is a request to locate a service's child by name.
type FindChildRequest struct {
ServiceID string
ChildName string
}
// SnapshotRequest is a request to create a snapshot.
type SnapshotRequest struct {
ServiceID string
Message string
Tag string
ContainerID string
SnapshotSpacePercent int
}
// TagSnapshotRequest is a request to add a tag (label) to the specified snapshot.
type TagSnapshotRequest struct {
SnapshotID string
TagName string
}
// SnapshotByTagRequest is request for the snapshot idenfified by the tag name.
type SnapshotByTagRequest struct {
ServiceID string
TagName string
}
// RollbackRequest is a request to apply a snapshot to the current system.
type RollbackRequest struct {
SnapshotID string
ForceRestart bool
}
// MetricRequest is a request for the metrics of the instances of a service.
type MetricRequest struct {
StartTime time.Time
HostID string
ServiceID string
Instances []metrics.ServiceInstance
}
// The ControlPlane interface is the API for a serviced master.
type ControlPlane interface {
//---------------------------------------------------------------------------
// Service CRUD
// Add a new service
AddService(svc service.Service, serviceID *string) error
// Clones a new service
CloneService(request ServiceCloneRequest, serviceID *string) error
// Deploy a new service
DeployService(svc ServiceDeploymentRequest, serviceID *string) error
// Update an existing service
UpdateService(svc service.Service, _ *int) error
// Migrate a service definition
MigrateServices(request ServiceMigrationRequest, _ *int) error
// Remove a service definition
RemoveService(serviceID string, _ *int) error
// Get a service from serviced
GetService(serviceID string, svc *service.Service) error
// Find a child service with the given name
FindChildService(request FindChildRequest, svc *service.Service) error
// Assign IP addresses to all services at and below the provided service
AssignIPs(assignmentRequest addressassignment.AssignmentRequest, _ *int) (err error)
// Get a list of tenant IDs
GetTenantIDs(_ struct{}, tenantIDs *[]string) error
//---------------------------------------------------------------------------
//ServiceState CRUD
// Schedule the given service to start
StartService(request ScheduleServiceRequest, affected *int) error
// Schedule the given service to restart
RestartService(request ScheduleServiceRequest, affected *int) error
// Schedule the given service to rebalance
RebalanceService(request ScheduleServiceRequest, affected *int) error
// Schedule the given service to stop
StopService(request ScheduleServiceRequest, affected *int) error
// Schedule the given service to pause
PauseService(request ScheduleServiceRequest, affected *int) error
// Stop a running instance of a service
StopRunningInstance(request HostServiceRequest, _ *int) error
// Wait for a particular service state
WaitService(request WaitServiceRequest, _ *int) error
// Computes the status of the service based on its service instances
GetServiceStatus(serviceID string, status *[]service.Instance) error
// Get logs for the given app
GetServiceLogs(serviceID string, logs *string) error
// Get logs for the given app
GetServiceStateLogs(request ServiceStateRequest, logs *string) error
// Get all running services
GetRunningServices(request EntityRequest, runningServices *[]RunningService) error
// Get the services instances for a given service
GetRunningServicesForHost(hostID string, runningServices *[]RunningService) error
// Get the service instances for a given service
GetRunningServicesForService(serviceID string, runningServices *[]RunningService) error
// Attach to a running container with a predefined action
Action(request AttachRequest, _ *int) error
// ------------------------------------------------------------------------
// Metrics
// Get service memory stats for a particular host
GetHostMemoryStats(req MetricRequest, stats *metrics.MemoryUsageStats) error
// Get service memory stats for a particular service
GetServiceMemoryStats(req MetricRequest, stats *metrics.MemoryUsageStats) error
// Get service memory stats for a particular service instance
GetInstanceMemoryStats(req MetricRequest, stats *[]metrics.MemoryUsageStats) error
// -----------------------------------------------------------------------
// Filesystem CRUD
// Backup captures the state of the application stack and writes the output
// to disk.
Backup(backupRequest BackupRequest, filename *string) (err error)
// GetBackupEstimate estimates space required to take backup and space available
GetBackupEstimate(backupRequest BackupRequest, estimate *BackupEstimate) (err error)
// AsyncBackup is the same as backup but asynchronous
AsyncBackup(backupRequest BackupRequest, filename *string) (err error)
// Restore reverts the full application stack from a backup file
Restore(restoreRequest RestoreRequest, _ *int) (err error)
// AsyncRestore is the same as restore but asynchronous
AsyncRestore(restoreRequest RestoreRequest, _ *int) (err error)
// Adds 1 or more tags to an existing snapshot
TagSnapshot(request TagSnapshotRequest, _ *int) error
// Removes a specific tag from an existing snapshot
RemoveSnapshotTag(request SnapshotByTagRequest, snapshotID *string) error
// Gets the snapshot from a specific service with a specific tag
GetSnapshotByServiceIDAndTag(request SnapshotByTagRequest, snapshot *SnapshotInfo) error
// ListBackups returns the list of backups
ListBackups(dirpath string, files *[]BackupFile) (err error)
// BackupStatus returns the current status of a running backup or restore
BackupStatus(_ EntityRequest, status *string) (err error)
// Snapshot captures the state of a single application
Snapshot(req SnapshotRequest, snapshotID *string) (err error)
// Rollback reverts a single application to the state of a snapshot
Rollback(req RollbackRequest, _ *int) (err error)
// DeleteSnapshot deletes a single snapshot
DeleteSnapshot(snapshotID string, _ *int) (err error)
// DeleteSnapshots deletes all snapshots for a service
DeleteSnapshots(serviceID string, _ *int) (err error)
// ListSnapshots returns a list of all snapshots for a service
ListSnapshots(serviceID string, snapshots *[]SnapshotInfo) (err error)
// ResetRegistry prompts all images to be re-pushed into the docker
// registry.
ResetRegistry(_ EntityRequest, _ *int) (err error)
// RepairRegistry will try to recover the latest image of all service
// images from the docker registry and save it to the index.
RepairRegistry(_ EntityRequest, _ *int) (err error)
// ReadyDFS waits for the DFS to be idle when creating a service shell.
ReadyDFS(serviceID string, _ *int) (err error)
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.neoremind.kraps.rpc
/**
* A callback that [[RpcEndpoint]] can use to send back a message or failure. It's thread-safe
* and can be called in any thread.
*/
trait RpcCallContext {
/**
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its [[RpcEndpoint.receive]]
* will be called.
*/
def reply(response: Any): Unit
/**
* Report a failure to the sender.
*/
def sendFailure(e: Throwable): Unit
/**
* The sender of this message.
*/
def senderAddress: RpcAddress
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
// +groupName=certificates.k8s.io
package v1beta1 // import "k8s.io/api/certificates/v1beta1"
|
{
"pile_set_name": "Github"
}
|
/****************************************************************************
* net/socket/socket.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/socket.h>
#include <errno.h>
#include <assert.h>
#include <debug.h>
#include "usrsock/usrsock.h"
#include "socket/socket.h"
#ifdef CONFIG_NET
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: psock_socket
*
* Description:
* socket() creates an endpoint for communication and returns a socket
* structure.
*
* Input Parameters:
* domain (see sys/socket.h)
* type (see sys/socket.h)
* protocol (see sys/socket.h)
* psock A pointer to a user allocated socket structure to be
* initialized.
*
* Returned Value:
* Returns zero (OK) on success. On failure, it returns a negated errno
* value to indicate the nature of the error:
*
* EACCES
* Permission to create a socket of the specified type and/or protocol
* is denied.
* EAFNOSUPPORT
* The implementation does not support the specified address family.
* EINVAL
* Unknown protocol, or protocol family not available.
* EMFILE
* Process file table overflow.
* ENFILE
* The system limit on the total number of open files has been reached.
* ENOBUFS or ENOMEM
* Insufficient memory is available. The socket cannot be created until
* sufficient resources are freed.
* EPROTONOSUPPORT
* The protocol type or the specified protocol is not supported within
* this domain.
*
****************************************************************************/
int psock_socket(int domain, int type, int protocol,
FAR struct socket *psock)
{
FAR const struct sock_intf_s *sockif = NULL;
int ret;
/* Initialize the socket structure */
psock->s_crefs = 1;
psock->s_domain = domain;
psock->s_conn = NULL;
#if defined(CONFIG_NET_TCP_WRITE_BUFFERS) || defined(CONFIG_NET_UDP_WRITE_BUFFERS)
psock->s_sndcb = NULL;
#endif
if (type & SOCK_CLOEXEC)
{
psock->s_flags |= _SF_CLOEXEC;
}
if (type & SOCK_NONBLOCK)
{
psock->s_flags |= _SF_NONBLOCK;
}
type &= SOCK_TYPE_MASK;
psock->s_type = type;
#ifdef CONFIG_NET_USRSOCK
if (domain != PF_LOCAL && domain != PF_UNSPEC)
{
/* Handle special setup for USRSOCK sockets (user-space networking
* stack).
*/
ret = g_usrsock_sockif.si_setup(psock, protocol);
psock->s_sockif = &g_usrsock_sockif;
return ret;
}
#endif /* CONFIG_NET_USRSOCK */
/* Get the socket interface */
sockif = net_sockif(domain, type, protocol);
if (sockif == NULL)
{
nerr("ERROR: socket address family unsupported: %d\n", domain);
return -EAFNOSUPPORT;
}
/* The remaining of the socket initialization depends on the address
* family.
*/
DEBUGASSERT(sockif->si_setup != NULL);
psock->s_sockif = sockif;
ret = sockif->si_setup(psock, protocol);
if (ret < 0)
{
nerr("ERROR: socket si_setup() failed: %d\n", ret);
return ret;
}
return OK;
}
/****************************************************************************
* Name: socket
*
* Description:
* socket() creates an endpoint for communication and returns a descriptor.
*
* Input Parameters:
* domain (see sys/socket.h)
* type (see sys/socket.h)
* protocol (see sys/socket.h)
*
* Returned Value:
* A non-negative socket descriptor on success; -1 on error with errno set
* appropriately.
*
* EACCES
* Permission to create a socket of the specified type and/or protocol
* is denied.
* EAFNOSUPPORT
* The implementation does not support the specified address family.
* EINVAL
* Unknown protocol, or protocol family not available.
* EMFILE
* Process file table overflow.
* ENFILE
* The system limit on the total number of open files has been reached.
* ENOBUFS or ENOMEM
* Insufficient memory is available. The socket cannot be created until
* sufficient resources are freed.
* EPROTONOSUPPORT
* The protocol type or the specified protocol is not supported within
* this domain.
*
* Assumptions:
*
****************************************************************************/
int socket(int domain, int type, int protocol)
{
FAR struct socket *psock;
int errcode;
int sockfd;
int ret;
/* Allocate a socket descriptor */
sockfd = sockfd_allocate(0);
if (sockfd < 0)
{
nerr("ERROR: Failed to allocate a socket descriptor\n");
errcode = ENFILE;
goto errout;
}
/* Get the underlying socket structure */
psock = sockfd_socket(sockfd);
if (!psock)
{
errcode = ENOSYS; /* should not happen */
goto errout_with_sockfd;
}
/* Initialize the socket structure */
ret = psock_socket(domain, type, protocol, psock);
if (ret < 0)
{
nerr("ERROR: psock_socket() failed: %d\n", ret);
errcode = -ret;
goto errout_with_sockfd;
}
/* The socket has been successfully initialized */
psock->s_flags |= _SF_INITD;
return sockfd;
errout_with_sockfd:
sockfd_release(sockfd);
errout:
set_errno(errcode);
return ERROR;
}
#endif /* CONFIG_NET */
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2016 - 2018 ExoMedia Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.devbrackets.android.exomedia.core.video.mp;
import android.content.Context;
import android.media.AudioManager;
import android.media.MediaPlayer;
import android.media.PlaybackParams;
import android.net.Uri;
import android.os.Build;
import android.support.annotation.FloatRange;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.util.Log;
import android.view.Surface;
import com.devbrackets.android.exomedia.core.ListenerMux;
import com.devbrackets.android.exomedia.core.exoplayer.WindowInfo;
import com.devbrackets.android.exomedia.core.video.ClearableSurface;
import java.io.IOException;
import java.util.Map;
import static android.content.ContentValues.TAG;
/**
* A delegated object used to handle the majority of the
* functionality for the "Native" video view implementation
* to simplify support for both the {@link android.view.TextureView}
* and {@link android.view.SurfaceView} implementations
*/
@SuppressWarnings("WeakerAccess")
public class NativeVideoDelegate {
public interface Callback {
void videoSizeChanged(int width, int height);
}
public enum State {
ERROR,
IDLE,
PREPARING,
PREPARED,
PLAYING,
PAUSED,
COMPLETED
}
protected Map<String, String> headers;
protected State currentState = State.IDLE;
protected Context context;
protected Callback callback;
protected ClearableSurface clearableSurface;
protected MediaPlayer mediaPlayer;
protected boolean playRequested = false;
protected long requestedSeek;
protected int currentBufferPercent;
@FloatRange(from = 0.0, to = 1.0)
protected float requestedVolume = 1.0f;
protected ListenerMux listenerMux;
@NonNull
protected InternalListeners internalListeners = new InternalListeners();
@Nullable
protected MediaPlayer.OnCompletionListener onCompletionListener;
@Nullable
protected MediaPlayer.OnPreparedListener onPreparedListener;
@Nullable
protected MediaPlayer.OnBufferingUpdateListener onBufferingUpdateListener;
@Nullable
protected MediaPlayer.OnSeekCompleteListener onSeekCompleteListener;
@Nullable
protected MediaPlayer.OnErrorListener onErrorListener;
@Nullable
protected MediaPlayer.OnInfoListener onInfoListener;
public NativeVideoDelegate(@NonNull Context context, @NonNull Callback callback, @NonNull ClearableSurface clearableSurface) {
this.context = context;
this.callback = callback;
this.clearableSurface = clearableSurface;
initMediaPlayer();
currentState = State.IDLE;
}
public void start() {
if (isReady()) {
mediaPlayer.start();
currentState = State.PLAYING;
}
playRequested = true;
listenerMux.setNotifiedCompleted(false);
}
public void pause() {
if (isReady() && mediaPlayer.isPlaying()) {
mediaPlayer.pause();
currentState = State.PAUSED;
}
playRequested = false;
}
public long getDuration() {
if (!listenerMux.isPrepared() || !isReady()) {
return 0;
}
return mediaPlayer.getDuration();
}
public long getCurrentPosition() {
if (!listenerMux.isPrepared() || !isReady()) {
return 0;
}
return mediaPlayer.getCurrentPosition();
}
@FloatRange(from = 0.0, to = 1.0)
public float getVolume() {
return requestedVolume;
}
public boolean setVolume(@FloatRange(from = 0.0, to = 1.0) float volume) {
requestedVolume = volume;
mediaPlayer.setVolume(volume, volume);
return true;
}
public void seekTo(long milliseconds) {
if (isReady()) {
mediaPlayer.seekTo((int) milliseconds);
requestedSeek = 0;
} else {
requestedSeek = milliseconds;
}
}
public boolean isPlaying() {
return isReady() && mediaPlayer.isPlaying();
}
public int getBufferPercentage() {
if (mediaPlayer != null) {
return currentBufferPercent;
}
return 0;
}
@Nullable
public WindowInfo getWindowInfo() {
return null;
}
public boolean setPlaybackSpeed(float speed) {
// Marshmallow+ support setting the playback speed natively
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
PlaybackParams params = new PlaybackParams();
params.setSpeed(speed);
mediaPlayer.setPlaybackParams(params);
return true;
}
return false;
}
public float getPlaybackSpeed() {
// Marshmallow+ support setting the playback speed natively
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
return mediaPlayer.getPlaybackParams().getSpeed();
}
return 1F;
}
/**
* Performs the functionality to stop the video in playback
*
* @param clearSurface <code>true</code> if the surface should be cleared
*/
public void stopPlayback(boolean clearSurface) {
currentState = State.IDLE;
if (isReady()) {
try {
mediaPlayer.stop();
} catch (Exception e) {
Log.d(TAG, "stopPlayback: error calling mediaPlayer.stop()", e);
}
}
playRequested = false;
if (clearSurface) {
listenerMux.clearSurfaceWhenReady(clearableSurface);
}
}
/**
* Cleans up the resources being held. This should only be called when
* destroying the video view
*/
public void suspend() {
currentState = State.IDLE;
try {
mediaPlayer.reset();
mediaPlayer.release();
} catch (Exception e) {
Log.d(TAG, "stopPlayback: error calling mediaPlayer.reset() or mediaPlayer.release()", e);
}
playRequested = false;
}
public boolean restart() {
if (currentState != State.COMPLETED) {
return false;
}
seekTo(0);
start();
//Makes sure the listeners get the onPrepared callback
listenerMux.setNotifiedPrepared(false);
listenerMux.setNotifiedCompleted(false);
return true;
}
/**
* Sets video URI using specific headers.
*
* @param uri The Uri for the video to play
* @param headers The headers for the URI request.
* Note that the cross domain redirection is allowed by default, but that can be
* changed with key/value pairs through the headers parameter with
* "android-allow-cross-domain-redirect" as the key and "0" or "1" as the value
* to disallow or allow cross domain redirection.
*/
public void setVideoURI(Uri uri, @Nullable Map<String, String> headers) {
this.headers = headers;
requestedSeek = 0;
playRequested = false;
openVideo(uri);
}
public void setListenerMux(ListenerMux listenerMux) {
this.listenerMux = listenerMux;
setOnCompletionListener(listenerMux);
setOnPreparedListener(listenerMux);
setOnBufferingUpdateListener(listenerMux);
setOnSeekCompleteListener(listenerMux);
setOnErrorListener(listenerMux);
}
/**
* Register a callback to be invoked when the media file
* is loaded and ready to go.
*
* @param listener The callback that will be run
*/
public void setOnPreparedListener(@Nullable MediaPlayer.OnPreparedListener listener) {
onPreparedListener = listener;
}
/**
* Register a callback to be invoked when the end of a media file
* has been reached during playback.
*
* @param listener The callback that will be run
*/
public void setOnCompletionListener(@Nullable MediaPlayer.OnCompletionListener listener) {
onCompletionListener = listener;
}
/**
* Register a callback to be invoked when the status of a network
* stream's buffer has changed.
*
* @param listener the callback that will be run.
*/
public void setOnBufferingUpdateListener(@Nullable MediaPlayer.OnBufferingUpdateListener listener) {
onBufferingUpdateListener = listener;
}
/**
* Register a callback to be invoked when a seek operation has been
* completed.
*
* @param listener the callback that will be run
*/
public void setOnSeekCompleteListener(@Nullable MediaPlayer.OnSeekCompleteListener listener) {
onSeekCompleteListener = listener;
}
/**
* Register a callback to be invoked when an error occurs
* during playback or setup. If no listener is specified,
* or if the listener returned false, TextureVideoView will inform
* the user of any errors.
*
* @param listener The callback that will be run
*/
public void setOnErrorListener(@Nullable MediaPlayer.OnErrorListener listener) {
onErrorListener = listener;
}
/**
* Register a callback to be invoked when an informational event
* occurs during playback or setup.
*
* @param listener The callback that will be run
*/
public void setOnInfoListener(@Nullable MediaPlayer.OnInfoListener listener) {
onInfoListener = listener;
}
public void onSurfaceSizeChanged(int width, int height) {
if (mediaPlayer == null || width <= 0 || height <= 0) {
return;
}
if (requestedSeek != 0) {
seekTo(requestedSeek);
}
if (playRequested) {
start();
}
}
public void onSurfaceReady(Surface surface) {
mediaPlayer.setSurface(surface);
if (playRequested) {
start();
}
}
protected void initMediaPlayer() {
mediaPlayer = new MediaPlayer();
mediaPlayer.setOnInfoListener(internalListeners);
mediaPlayer.setOnErrorListener(internalListeners);
mediaPlayer.setOnPreparedListener(internalListeners);
mediaPlayer.setOnCompletionListener(internalListeners);
mediaPlayer.setOnSeekCompleteListener(internalListeners);
mediaPlayer.setOnBufferingUpdateListener(internalListeners);
mediaPlayer.setOnVideoSizeChangedListener(internalListeners);
mediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC);
mediaPlayer.setScreenOnWhilePlaying(true);
}
protected boolean isReady() {
return currentState != State.ERROR && currentState != State.IDLE && currentState != State.PREPARING;
}
protected void openVideo(@Nullable Uri uri) {
if (uri == null) {
return;
}
currentBufferPercent = 0;
try {
mediaPlayer.reset();
mediaPlayer.setDataSource(context.getApplicationContext(), uri, headers);
mediaPlayer.prepareAsync();
currentState = State.PREPARING;
} catch (IOException | IllegalArgumentException ex) {
Log.w(TAG, "Unable to open content: " + uri, ex);
currentState = State.ERROR;
internalListeners.onError(mediaPlayer, MediaPlayer.MEDIA_ERROR_UNKNOWN, 0);
}
}
public class InternalListeners implements MediaPlayer.OnBufferingUpdateListener, MediaPlayer.OnErrorListener, MediaPlayer.OnPreparedListener,
MediaPlayer.OnCompletionListener, MediaPlayer.OnSeekCompleteListener, MediaPlayer.OnInfoListener, MediaPlayer.OnVideoSizeChangedListener {
@Override
public void onBufferingUpdate(MediaPlayer mp, int percent) {
currentBufferPercent = percent;
if (onBufferingUpdateListener != null) {
onBufferingUpdateListener.onBufferingUpdate(mp, percent);
}
}
@Override
public void onCompletion(MediaPlayer mp) {
currentState = State.COMPLETED;
if (onCompletionListener != null) {
onCompletionListener.onCompletion(mediaPlayer);
}
}
@Override
public void onSeekComplete(MediaPlayer mp) {
if (onSeekCompleteListener != null) {
onSeekCompleteListener.onSeekComplete(mp);
}
}
@Override
public boolean onError(MediaPlayer mp, int what, int extra) {
Log.d(TAG, "Error: " + what + "," + extra);
currentState = State.ERROR;
return onErrorListener == null || onErrorListener.onError(mediaPlayer, what, extra);
}
@Override
public void onPrepared(MediaPlayer mp) {
currentState = State.PREPARED;
if (onPreparedListener != null) {
onPreparedListener.onPrepared(mediaPlayer);
}
callback.videoSizeChanged(mp.getVideoWidth(), mp.getVideoHeight());
if (requestedSeek != 0) {
seekTo(requestedSeek);
}
if (playRequested) {
start();
}
}
@Override
public boolean onInfo(MediaPlayer mp, int what, int extra) {
return onInfoListener == null || onInfoListener.onInfo(mp, what, extra);
}
@Override
public void onVideoSizeChanged(MediaPlayer mp, int width, int height) {
callback.videoSizeChanged(mp.getVideoWidth(), mp.getVideoHeight());
}
}
}
|
{
"pile_set_name": "Github"
}
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This script is used to capture the content of config.status-generated
# files and subsequently restore their timestamp if they haven't changed.
import argparse
import errno
import itertools
import os
import re
import subprocess
import sys
import pickle
import mozpack.path as mozpath
class Pool(object):
def __new__(cls, size):
try:
import multiprocessing
size = min(size, multiprocessing.cpu_count())
return multiprocessing.Pool(size)
except:
return super(Pool, cls).__new__(cls)
def imap_unordered(self, fn, iterable):
return itertools.imap(fn, iterable)
def close(self):
pass
def join(self):
pass
class File(object):
def __init__(self, path):
self._path = path
self._content = open(path, 'rb').read()
stat = os.stat(path)
self._times = (stat.st_atime, stat.st_mtime)
@property
def path(self):
return self._path
@property
def mtime(self):
return self._times[1]
@property
def modified(self):
'''Returns whether the file was modified since the instance was
created. Result is memoized.'''
if hasattr(self, '_modified'):
return self._modified
modified = True
if os.path.exists(self._path):
if open(self._path, 'rb').read() == self._content:
modified = False
self._modified = modified
return modified
def update_time(self):
'''If the file hasn't changed since the instance was created,
restore its old modification time.'''
if not self.modified:
os.utime(self._path, self._times)
# As defined in the various sub-configures in the tree
PRECIOUS_VARS = set([
'build_alias',
'host_alias',
'target_alias',
'CC',
'CFLAGS',
'LDFLAGS',
'LIBS',
'CPPFLAGS',
'CPP',
'CCC',
'CXXFLAGS',
'CXX',
'CCASFLAGS',
'CCAS',
])
CONFIGURE_DATA = 'configure.pkl'
# Autoconf, in some of the sub-configures used in the tree, likes to error
# out when "precious" variables change in value. The solution it gives to
# straighten things is to either run make distclean or remove config.cache.
# There's no reason not to do the latter automatically instead of failing,
# doing the cleanup (which, on buildbots means a full clobber), and
# restarting from scratch.
def maybe_clear_cache(data):
env = dict(data['env'])
for kind in ('target', 'host', 'build'):
arg = data[kind]
if arg is not None:
env['%s_alias' % kind] = arg
# configure can take variables assignments in its arguments, and that
# overrides whatever is in the environment.
for arg in data['args']:
if arg[:1] != '-' and '=' in arg:
key, value = arg.split('=', 1)
env[key] = value
comment = re.compile(r'^\s+#')
cache = {}
with open(data['cache-file']) as f:
for line in f:
if not comment.match(line) and '=' in line:
key, value = line.rstrip(os.linesep).split('=', 1)
# If the value is quoted, unquote it
if value[:1] == "'":
value = value[1:-1].replace("'\\''", "'")
cache[key] = value
for precious in PRECIOUS_VARS:
# If there is no entry at all for that precious variable, then
# its value is not precious for that particular configure.
if 'ac_cv_env_%s_set' % precious not in cache:
continue
is_set = cache.get('ac_cv_env_%s_set' % precious) == 'set'
value = cache.get('ac_cv_env_%s_value' % precious) if is_set else None
if value != env.get(precious):
print 'Removing %s because of %s value change from:' \
% (data['cache-file'], precious)
print ' %s' % (value if value is not None else 'undefined')
print 'to:'
print ' %s' % env.get(precious, 'undefined')
os.remove(data['cache-file'])
return True
return False
def split_template(s):
"""Given a "file:template" string, returns "file", "template". If the string
is of the form "file" (without a template), returns "file", "file.in"."""
if ':' in s:
return s.split(':', 1)
return s, '%s.in' % s
def get_config_files(data):
config_status = mozpath.join(data['objdir'], 'config.status')
if not os.path.exists(config_status):
return [], []
configure = mozpath.join(data['srcdir'], 'configure')
config_files = []
command_files = []
# Scan the config.status output for information about configuration files
# it generates.
config_status_output = subprocess.check_output(
[data['shell'], '-c', '%s --help' % config_status],
stderr=subprocess.STDOUT).splitlines()
state = None
for line in config_status_output:
if line.startswith('Configuration') and line.endswith(':'):
if line.endswith('commands:'):
state = 'commands'
else:
state = 'config'
elif not line.strip():
state = None
elif state:
for f, t in (split_template(couple) for couple in line.split()):
f = mozpath.join(data['objdir'], f)
t = mozpath.join(data['srcdir'], t)
if state == 'commands':
command_files.append(f)
else:
config_files.append((f, t))
return config_files, command_files
def prepare(srcdir, objdir, shell, args):
parser = argparse.ArgumentParser()
parser.add_argument('--target', type=str)
parser.add_argument('--host', type=str)
parser.add_argument('--build', type=str)
parser.add_argument('--cache-file', type=str)
# The --srcdir argument is simply ignored. It's a useless autoconf feature
# that we don't support well anyways. This makes it stripped from `others`
# and allows to skip setting it when calling the subconfigure (configure
# will take it from the configure path anyways).
parser.add_argument('--srcdir', type=str)
data_file = os.path.join(objdir, CONFIGURE_DATA)
previous_args = None
if os.path.exists(data_file):
with open(data_file, 'rb') as f:
data = pickle.load(f)
previous_args = data['args']
# Msys likes to break environment variables and command line arguments,
# so read those from stdin, as they are passed from the configure script
# when necessary (on windows).
# However, for some reason, $PATH is not handled like other environment
# variables, and msys remangles it even when giving it is already a msys
# $PATH. Fortunately, the mangling/demangling is just find for $PATH, so
# we can just take the value from the environment. Msys will convert it
# back properly when calling subconfigure.
input = sys.stdin.read()
if input:
data = {a: b for [a, b] in eval(input)}
environ = {a: b for a, b in data['env']}
environ['PATH'] = os.environ['PATH']
args = data['args']
else:
environ = os.environ
args, others = parser.parse_known_args(args)
data = {
'target': args.target,
'host': args.host,
'build': args.build,
'args': others,
'shell': shell,
'srcdir': srcdir,
'env': environ,
}
if args.cache_file:
data['cache-file'] = mozpath.normpath(mozpath.join(os.getcwd(),
args.cache_file))
else:
data['cache-file'] = mozpath.join(objdir, 'config.cache')
if previous_args is not None:
data['previous-args'] = previous_args
try:
os.makedirs(objdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(data_file, 'wb') as f:
pickle.dump(data, f)
def prefix_lines(text, prefix):
return ''.join('%s> %s' % (prefix, line) for line in text.splitlines(True))
def run(objdir):
ret = 0
output = ''
with open(os.path.join(objdir, CONFIGURE_DATA), 'rb') as f:
data = pickle.load(f)
data['objdir'] = objdir
cache_file = data['cache-file']
cleared_cache = True
if os.path.exists(cache_file):
cleared_cache = maybe_clear_cache(data)
config_files, command_files = get_config_files(data)
contents = []
for f, t in config_files:
contents.append(File(f))
# AC_CONFIG_COMMANDS actually only registers tags, not file names
# but most commands are tagged with the file name they create.
# However, a few don't, or are tagged with a directory name (and their
# command is just to create that directory)
for f in command_files:
if os.path.isfile(f):
contents.append(File(f))
# Only run configure if one of the following is true:
# - config.status doesn't exist
# - config.status is older than configure
# - the configure arguments changed
# - the environment changed in a way that requires a cache clear.
configure = mozpath.join(data['srcdir'], 'configure')
config_status_path = mozpath.join(objdir, 'config.status')
skip_configure = True
if not os.path.exists(config_status_path):
skip_configure = False
config_status = None
else:
config_status = File(config_status_path)
if config_status.mtime < os.path.getmtime(configure) or \
data.get('previous-args', data['args']) != data['args'] or \
cleared_cache:
skip_configure = False
relobjdir = os.path.relpath(objdir, os.getcwd())
if not skip_configure:
command = [data['shell'], configure]
for kind in ('target', 'build', 'host'):
if data.get(kind) is not None:
command += ['--%s=%s' % (kind, data[kind])]
command += data['args']
command += ['--cache-file=%s' % cache_file]
# Pass --no-create to configure so that it doesn't run config.status.
# We're going to run it ourselves.
command += ['--no-create']
print prefix_lines('configuring', relobjdir)
print prefix_lines('running %s' % ' '.join(command[:-1]), relobjdir)
sys.stdout.flush()
try:
output += subprocess.check_output(command,
stderr=subprocess.STDOUT, cwd=objdir, env=data['env'])
except subprocess.CalledProcessError as e:
return relobjdir, e.returncode, e.output
# Leave config.status with a new timestamp if configure is newer than
# its original mtime.
if config_status and os.path.getmtime(configure) <= config_status.mtime:
config_status.update_time()
# Only run config.status if one of the following is true:
# - config.status changed or did not exist
# - one of the templates for config files is newer than the corresponding
# config file.
skip_config_status = True
if not config_status or config_status.modified:
# If config.status doesn't exist after configure (because it's not
# an autoconf configure), skip it.
if os.path.exists(config_status_path):
skip_config_status = False
else:
# config.status changed or was created, so we need to update the
# list of config and command files.
config_files, command_files = get_config_files(data)
for f, t in config_files:
if not os.path.exists(t) or \
os.path.getmtime(f) < os.path.getmtime(t):
skip_config_status = False
if not skip_config_status:
if skip_configure:
print prefix_lines('running config.status', relobjdir)
sys.stdout.flush()
try:
output += subprocess.check_output([data['shell'], '-c',
'./config.status'], stderr=subprocess.STDOUT, cwd=objdir,
env=data['env'])
except subprocess.CalledProcessError as e:
ret = e.returncode
output += e.output
for f in contents:
f.update_time()
return relobjdir, ret, output
def subconfigure(args):
parser = argparse.ArgumentParser()
parser.add_argument('--list', type=str,
help='File containing a list of subconfigures to run')
parser.add_argument('--skip', type=str,
help='File containing a list of Subconfigures to skip')
parser.add_argument('subconfigures', type=str, nargs='*',
help='Subconfigures to run if no list file is given')
args, others = parser.parse_known_args(args)
subconfigures = args.subconfigures
if args.list:
subconfigures.extend(open(args.list, 'rb').read().splitlines())
if args.skip:
skips = set(open(args.skip, 'rb').read().splitlines())
subconfigures = [s for s in subconfigures if s not in skips]
if not subconfigures:
return 0
ret = 0
# One would think using a ThreadPool would be faster, considering
# everything happens in subprocesses anyways, but no, it's actually
# slower on Windows. (20s difference overall!)
pool = Pool(len(subconfigures))
for relobjdir, returncode, output in \
pool.imap_unordered(run, subconfigures):
print prefix_lines(output, relobjdir)
sys.stdout.flush()
ret = max(returncode, ret)
if ret:
break
pool.close()
pool.join()
return ret
def main(args):
if args[0] != '--prepare':
return subconfigure(args)
topsrcdir = os.path.abspath(args[1])
subdir = args[2]
# subdir can be of the form srcdir:objdir
if ':' in subdir:
srcdir, subdir = subdir.split(':', 1)
else:
srcdir = subdir
srcdir = os.path.join(topsrcdir, srcdir)
objdir = os.path.abspath(subdir)
return prepare(srcdir, objdir, args[3], args[4:])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
// Base version information.
//
// This is the fallback data used when version information from git is not
// provided via go ldflags. It provides an approximation of the Kubernetes
// version for ad-hoc builds (e.g. `go build`) that cannot get the version
// information from git.
//
// If you are looking at these fields in the git tree, they look
// strange. They are modified on the fly by the build process. The
// in-tree values are dummy values used for "git archive", which also
// works for GitHub tar downloads.
//
// When releasing a new Kubernetes version, this file is updated by
// build/mark_new_version.sh to reflect the new version, and then a
// git annotated tag (using format vX.Y where X == Major version and Y
// == Minor version) is created to point to the commit that updates
// component-base/version/base.go
var (
// TODO: Deprecate gitMajor and gitMinor, use only gitVersion
// instead. First step in deprecation, keep the fields but make
// them irrelevant. (Next we'll take it out, which may muck with
// scripts consuming the kubectl version output - but most of
// these should be looking at gitVersion already anyways.)
gitMajor string // major version, always numeric
gitMinor string // minor version, numeric possibly followed by "+"
// semantic version, derived by build scripts (see
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md
// for a detailed discussion of this field)
//
// TODO: This field is still called "gitVersion" for legacy
// reasons. For prerelease versions, the build metadata on the
// semantic version is a git hash, but the version itself is no
// longer the direct output of "git describe", but a slight
// translation to be semver compliant.
// NOTE: The $Format strings are replaced during 'git archive' thanks to the
// companion .gitattributes file containing 'export-subst' in this same
// directory. See also https://git-scm.com/docs/gitattributes
gitVersion = "v0.0.0-master+$Format:%h$"
gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState = "" // state of git tree, either "clean" or "dirty"
buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
)
|
{
"pile_set_name": "Github"
}
|
/*******************************************************************************
* Copyright 2012-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use
* this file except in compliance with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file.
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
* *****************************************************************************
*
* AWS Tools for Windows (TM) PowerShell (TM)
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Management.Automation;
using System.Text;
using Amazon.PowerShell.Common;
using Amazon.Runtime;
using Amazon.CloudFormation;
using Amazon.CloudFormation.Model;
namespace Amazon.PowerShell.Cmdlets.CFN
{
/// <summary>
/// Updates a stack as specified in the template. After the call completes successfully,
/// the stack update starts. You can check the status of the stack via the <a>DescribeStacks</a>
/// action.
///
///
/// <para>
/// To get a copy of the template for an existing stack, you can use the <a>GetTemplate</a>
/// action.
/// </para><para>
/// For more information about creating an update template, updating a stack, and monitoring
/// the progress of the update, see <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html">Updating
/// a Stack</a>.
/// </para>
/// </summary>
[Cmdlet("Update", "CFNStack", SupportsShouldProcess = true, ConfirmImpact = ConfirmImpact.Medium)]
[OutputType("System.String")]
[AWSCmdlet("Calls the AWS CloudFormation UpdateStack API operation.", Operation = new[] {"UpdateStack"}, SelectReturnType = typeof(Amazon.CloudFormation.Model.UpdateStackResponse))]
[AWSCmdletOutput("System.String or Amazon.CloudFormation.Model.UpdateStackResponse",
"This cmdlet returns a System.String object.",
"The service call response (type Amazon.CloudFormation.Model.UpdateStackResponse) can also be referenced from properties attached to the cmdlet entry in the $AWSHistory stack."
)]
public partial class UpdateCFNStackCmdlet : AmazonCloudFormationClientCmdlet, IExecutor
{
#region Parameter Capability
/// <summary>
/// <para>
/// <para>In some cases, you must explicitly acknowledge that your stack template contains certain
/// capabilities in order for AWS CloudFormation to update the stack.</para><ul><li><para><code>CAPABILITY_IAM</code> and <code>CAPABILITY_NAMED_IAM</code></para><para>Some stack templates might include resources that can affect permissions in your AWS
/// account; for example, by creating new AWS Identity and Access Management (IAM) users.
/// For those stacks, you must explicitly acknowledge this by specifying one of these
/// capabilities.</para><para>The following IAM resources require you to specify either the <code>CAPABILITY_IAM</code>
/// or <code>CAPABILITY_NAMED_IAM</code> capability.</para><ul><li><para>If you have IAM resources, you can specify either capability. </para></li><li><para>If you have IAM resources with custom names, you <i>must</i> specify <code>CAPABILITY_NAMED_IAM</code>.
/// </para></li><li><para>If you don't specify either of these capabilities, AWS CloudFormation returns an <code>InsufficientCapabilities</code>
/// error.</para></li></ul><para>If your stack template contains these resources, we recommend that you review all
/// permissions associated with them and edit their permissions if necessary.</para><ul><li><para><a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html">
/// AWS::IAM::AccessKey</a></para></li><li><para><a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html">
/// AWS::IAM::Group</a></para></li><li><para><a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html">
/// AWS::IAM::InstanceProfile</a></para></li><li><para><a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html">
/// AWS::IAM::Policy</a></para></li><li><para><a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html">
/// AWS::IAM::Role</a></para></li><li><para><a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html">
/// AWS::IAM::User</a></para></li><li><para><a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html">
/// AWS::IAM::UserToGroupAddition</a></para></li></ul><para>For more information, see <a href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities">Acknowledging
/// IAM Resources in AWS CloudFormation Templates</a>.</para></li><li><para><code>CAPABILITY_AUTO_EXPAND</code></para><para>Some template contain macros. Macros perform custom processing on templates; this
/// can include simple actions like find-and-replace operations, all the way to extensive
/// transformations of entire templates. Because of this, users typically create a change
/// set from the processed template, so that they can review the changes resulting from
/// the macros before actually updating the stack. If your stack template contains one
/// or more macros, and you choose to update a stack directly from the processed template,
/// without first reviewing the resulting changes in a change set, you must acknowledge
/// this capability. This includes the <a href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html">AWS::Include</a>
/// and <a href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html">AWS::Serverless</a>
/// transforms, which are macros hosted by AWS CloudFormation.</para><para>Change sets do not currently support nested stacks. If you want to update a stack
/// from a stack template that contains macros <i>and</i> nested stacks, you must update
/// the stack directly from the template using this capability.</para><important><para>You should only update stacks directly from a stack template that contains macros
/// if you know what processing the macro performs.</para><para>Each macro relies on an underlying Lambda service function for processing stack templates.
/// Be aware that the Lambda function owner can update the function operation without
/// AWS CloudFormation being notified.</para></important><para>For more information, see <a href="http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html">Using
/// AWS CloudFormation Macros to Perform Custom Processing on Templates</a>.</para></li></ul>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("Capabilities")]
public System.String[] Capability { get; set; }
#endregion
#region Parameter ClientRequestToken
/// <summary>
/// <para>
/// <para>A unique identifier for this <code>UpdateStack</code> request. Specify this token
/// if you plan to retry requests so that AWS CloudFormation knows that you're not attempting
/// to update a stack with the same name. You might retry <code>UpdateStack</code> requests
/// to ensure that AWS CloudFormation successfully received them.</para><para>All events triggered by a given stack operation are assigned the same client request
/// token, which you can use to track operations. For example, if you execute a <code>CreateStack</code>
/// operation with the token <code>token1</code>, then all the <code>StackEvents</code>
/// generated by that operation will have <code>ClientRequestToken</code> set as <code>token1</code>.</para><para>In the console, stack operations display the client request token on the Events tab.
/// Stack operations that are initiated from the console use the token format <i>Console-StackOperation-ID</i>,
/// which helps you easily identify the stack operation . For example, if you create a
/// stack using the console, each stack event would be assigned the same token in the
/// following format: <code>Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002</code>.
/// </para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String ClientRequestToken { get; set; }
#endregion
#region Parameter RollbackConfiguration_MonitoringTimeInMinute
/// <summary>
/// <para>
/// <para>The amount of time, in minutes, during which CloudFormation should monitor all the
/// rollback triggers after the stack creation or update operation deploys all necessary
/// resources.</para><para>The default is 0 minutes.</para><para>If you specify a monitoring period but do not specify any rollback triggers, CloudFormation
/// still waits the specified period of time before cleaning up old resources after update
/// operations. You can use this monitoring period to perform any manual stack validation
/// desired, and manually cancel the stack creation or update (using <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CancelUpdateStack.html">CancelUpdateStack</a>,
/// for example) as necessary.</para><para>If you specify 0 for this parameter, CloudFormation still monitors the specified rollback
/// triggers during stack creation and update operations. Then, for update operations,
/// it begins disposing of old resources immediately once the operation completes.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("RollbackConfiguration_MonitoringTimeInMinutes")]
public System.Int32? RollbackConfiguration_MonitoringTimeInMinute { get; set; }
#endregion
#region Parameter NotificationARNs
/// <summary>
/// <para>
/// <para>Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that AWS CloudFormation
/// associates with the stack. Specify an empty list to remove all notification topics.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String[] NotificationARNs { get; set; }
#endregion
#region Parameter Parameter
/// <summary>
/// <para>
/// <para>A list of <code>Parameter</code> structures that specify input parameters for the
/// stack. For more information, see the <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html">Parameter</a>
/// data type.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("Parameters")]
public Amazon.CloudFormation.Model.Parameter[] Parameter { get; set; }
#endregion
#region Parameter ResourceType
/// <summary>
/// <para>
/// <para>The template resource types that you have permissions to work with for this update
/// stack action, such as <code>AWS::EC2::Instance</code>, <code>AWS::EC2::*</code>, or
/// <code>Custom::MyCustomInstance</code>.</para><para>If the list of resource types doesn't include a resource that you're updating, the
/// stack update fails. By default, AWS CloudFormation grants permissions to all resource
/// types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific
/// condition keys in IAM policies. For more information, see <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html">Controlling
/// Access with AWS Identity and Access Management</a>.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("ResourceTypes")]
public System.String[] ResourceType { get; set; }
#endregion
#region Parameter RoleARN
/// <summary>
/// <para>
/// <para>The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role
/// that AWS CloudFormation assumes to update the stack. AWS CloudFormation uses the role's
/// credentials to make calls on your behalf. AWS CloudFormation always uses this role
/// for all future operations on the stack. As long as users have permission to operate
/// on the stack, AWS CloudFormation uses this role even if the users don't have permission
/// to pass it. Ensure that the role grants least privilege.</para><para>If you don't specify a value, AWS CloudFormation uses the role that was previously
/// associated with the stack. If no role is available, AWS CloudFormation uses a temporary
/// session that is generated from your user credentials.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String RoleARN { get; set; }
#endregion
#region Parameter RollbackConfiguration_RollbackTrigger
/// <summary>
/// <para>
/// <para>The triggers to monitor during stack creation or update actions. </para><para>By default, AWS CloudFormation saves the rollback triggers specified for a stack and
/// applies them to any subsequent update operations for the stack, unless you specify
/// otherwise. If you do specify rollback triggers for this parameter, those triggers
/// replace any list of triggers previously specified for the stack. This means:</para><ul><li><para>To use the rollback triggers previously specified for this stack, if any, don't specify
/// this parameter.</para></li><li><para>To specify new or updated rollback triggers, you must specify <i>all</i> the triggers
/// that you want used for this stack, even triggers you've specifed before (for example,
/// when creating the stack or during a previous stack update). Any triggers that you
/// don't include in the updated list of triggers are no longer applied to the stack.</para></li><li><para>To remove all currently specified triggers, specify an empty list for this parameter.</para></li></ul><para>If a specified trigger is missing, the entire stack operation fails and is rolled
/// back. </para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("RollbackConfiguration_RollbackTriggers")]
public Amazon.CloudFormation.Model.RollbackTrigger[] RollbackConfiguration_RollbackTrigger { get; set; }
#endregion
#region Parameter StackName
/// <summary>
/// <para>
/// <para>The name or unique stack ID of the stack to update.</para>
/// </para>
/// </summary>
#if !MODULAR
[System.Management.Automation.Parameter(Position = 0, ValueFromPipelineByPropertyName = true, ValueFromPipeline = true)]
#else
[System.Management.Automation.Parameter(Position = 0, ValueFromPipelineByPropertyName = true, ValueFromPipeline = true, Mandatory = true)]
[System.Management.Automation.AllowEmptyString]
[System.Management.Automation.AllowNull]
#endif
[Amazon.PowerShell.Common.AWSRequiredParameter]
public System.String StackName { get; set; }
#endregion
#region Parameter StackPolicyBody
/// <summary>
/// <para>
/// <para>Structure containing a new stack policy body. You can specify either the <code>StackPolicyBody</code>
/// or the <code>StackPolicyURL</code> parameter, but not both.</para><para>You might update the stack policy, for example, in order to protect a new resource
/// that you created during a stack update. If you do not specify a stack policy, the
/// current policy that is associated with the stack is unchanged.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String StackPolicyBody { get; set; }
#endregion
#region Parameter StackPolicyDuringUpdateBody
/// <summary>
/// <para>
/// <para>Structure containing the temporary overriding stack policy body. You can specify either
/// the <code>StackPolicyDuringUpdateBody</code> or the <code>StackPolicyDuringUpdateURL</code>
/// parameter, but not both.</para><para>If you want to update protected resources, specify a temporary overriding stack policy
/// during this update. If you do not specify a stack policy, the current policy that
/// is associated with the stack will be used.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String StackPolicyDuringUpdateBody { get; set; }
#endregion
#region Parameter StackPolicyDuringUpdateURL
/// <summary>
/// <para>
/// <para>Location of a file containing the temporary overriding stack policy. The URL must
/// point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the
/// stack. You can specify either the <code>StackPolicyDuringUpdateBody</code> or the
/// <code>StackPolicyDuringUpdateURL</code> parameter, but not both.</para><para>If you want to update protected resources, specify a temporary overriding stack policy
/// during this update. If you do not specify a stack policy, the current policy that
/// is associated with the stack will be used.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String StackPolicyDuringUpdateURL { get; set; }
#endregion
#region Parameter StackPolicyURL
/// <summary>
/// <para>
/// <para>Location of a file containing the updated stack policy. The URL must point to a policy
/// (max size: 16KB) located in an S3 bucket in the same Region as the stack. You can
/// specify either the <code>StackPolicyBody</code> or the <code>StackPolicyURL</code>
/// parameter, but not both.</para><para>You might update the stack policy, for example, in order to protect a new resource
/// that you created during a stack update. If you do not specify a stack policy, the
/// current policy that is associated with the stack is unchanged.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String StackPolicyURL { get; set; }
#endregion
#region Parameter Tag
/// <summary>
/// <para>
/// <para>Key-value pairs to associate with this stack. AWS CloudFormation also propagates these
/// tags to supported resources in the stack. You can specify a maximum number of 50 tags.</para><para>If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's
/// tags. If you specify an empty value, AWS CloudFormation removes all associated tags.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
[Alias("Tags")]
public Amazon.CloudFormation.Model.Tag[] Tag { get; set; }
#endregion
#region Parameter TemplateBody
/// <summary>
/// <para>
/// <para>Structure containing the template body with a minimum length of 1 byte and a maximum
/// length of 51,200 bytes. (For more information, go to <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template
/// Anatomy</a> in the AWS CloudFormation User Guide.)</para><para>Conditional: You must specify only one of the following parameters: <code>TemplateBody</code>,
/// <code>TemplateURL</code>, or set the <code>UsePreviousTemplate</code> to <code>true</code>.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String TemplateBody { get; set; }
#endregion
#region Parameter TemplateURL
/// <summary>
/// <para>
/// <para>Location of file containing the template body. The URL must point to a template that
/// is located in an Amazon S3 bucket. For more information, go to <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html">Template
/// Anatomy</a> in the AWS CloudFormation User Guide.</para><para>Conditional: You must specify only one of the following parameters: <code>TemplateBody</code>,
/// <code>TemplateURL</code>, or set the <code>UsePreviousTemplate</code> to <code>true</code>.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.String TemplateURL { get; set; }
#endregion
#region Parameter UsePreviousTemplate
/// <summary>
/// <para>
/// <para>Reuse the existing template that is associated with the stack that you are updating.</para><para>Conditional: You must specify only one of the following parameters: <code>TemplateBody</code>,
/// <code>TemplateURL</code>, or set the <code>UsePreviousTemplate</code> to <code>true</code>.</para>
/// </para>
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public System.Boolean? UsePreviousTemplate { get; set; }
#endregion
#region Parameter Select
/// <summary>
/// Use the -Select parameter to control the cmdlet output. The default value is 'StackId'.
/// Specifying -Select '*' will result in the cmdlet returning the whole service response (Amazon.CloudFormation.Model.UpdateStackResponse).
/// Specifying the name of a property of type Amazon.CloudFormation.Model.UpdateStackResponse will result in that property being returned.
/// Specifying -Select '^ParameterName' will result in the cmdlet returning the selected cmdlet parameter value.
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public string Select { get; set; } = "StackId";
#endregion
#region Parameter PassThru
/// <summary>
/// Changes the cmdlet behavior to return the value passed to the StackName parameter.
/// The -PassThru parameter is deprecated, use -Select '^StackName' instead. This parameter will be removed in a future version.
/// </summary>
[System.Obsolete("The -PassThru parameter is deprecated, use -Select '^StackName' instead. This parameter will be removed in a future version.")]
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public SwitchParameter PassThru { get; set; }
#endregion
#region Parameter Force
/// <summary>
/// This parameter overrides confirmation prompts to force
/// the cmdlet to continue its operation. This parameter should always
/// be used with caution.
/// </summary>
[System.Management.Automation.Parameter(ValueFromPipelineByPropertyName = true)]
public SwitchParameter Force { get; set; }
#endregion
protected override void ProcessRecord()
{
base.ProcessRecord();
var resourceIdentifiersText = FormatParameterValuesForConfirmationMsg(nameof(this.StackName), MyInvocation.BoundParameters);
if (!ConfirmShouldProceed(this.Force.IsPresent, resourceIdentifiersText, "Update-CFNStack (UpdateStack)"))
{
return;
}
var context = new CmdletContext();
// allow for manipulation of parameters prior to loading into context
PreExecutionContextLoad(context);
#pragma warning disable CS0618, CS0612 //A class member was marked with the Obsolete attribute
if (ParameterWasBound(nameof(this.Select)))
{
context.Select = CreateSelectDelegate<Amazon.CloudFormation.Model.UpdateStackResponse, UpdateCFNStackCmdlet>(Select) ??
throw new System.ArgumentException("Invalid value for -Select parameter.", nameof(this.Select));
if (this.PassThru.IsPresent)
{
throw new System.ArgumentException("-PassThru cannot be used when -Select is specified.", nameof(this.Select));
}
}
else if (this.PassThru.IsPresent)
{
context.Select = (response, cmdlet) => this.StackName;
}
#pragma warning restore CS0618, CS0612 //A class member was marked with the Obsolete attribute
if (this.Capability != null)
{
context.Capability = new List<System.String>(this.Capability);
}
context.ClientRequestToken = this.ClientRequestToken;
if (this.NotificationARNs != null)
{
context.NotificationARNs = new List<System.String>(this.NotificationARNs);
}
if (this.Parameter != null)
{
context.Parameter = new List<Amazon.CloudFormation.Model.Parameter>(this.Parameter);
}
if (this.ResourceType != null)
{
context.ResourceType = new List<System.String>(this.ResourceType);
}
context.RoleARN = this.RoleARN;
context.RollbackConfiguration_MonitoringTimeInMinute = this.RollbackConfiguration_MonitoringTimeInMinute;
if (this.RollbackConfiguration_RollbackTrigger != null)
{
context.RollbackConfiguration_RollbackTrigger = new List<Amazon.CloudFormation.Model.RollbackTrigger>(this.RollbackConfiguration_RollbackTrigger);
}
context.StackName = this.StackName;
#if MODULAR
if (this.StackName == null && ParameterWasBound(nameof(this.StackName)))
{
WriteWarning("You are passing $null as a value for parameter StackName which is marked as required. In case you believe this parameter was incorrectly marked as required, report this by opening an issue at https://github.com/aws/aws-tools-for-powershell/issues.");
}
#endif
context.StackPolicyBody = this.StackPolicyBody;
context.StackPolicyDuringUpdateBody = this.StackPolicyDuringUpdateBody;
context.StackPolicyDuringUpdateURL = this.StackPolicyDuringUpdateURL;
context.StackPolicyURL = this.StackPolicyURL;
if (this.Tag != null)
{
context.Tag = new List<Amazon.CloudFormation.Model.Tag>(this.Tag);
}
context.TemplateBody = this.TemplateBody;
context.TemplateURL = this.TemplateURL;
context.UsePreviousTemplate = this.UsePreviousTemplate;
// allow further manipulation of loaded context prior to processing
PostExecutionContextLoad(context);
var output = Execute(context) as CmdletOutput;
ProcessOutput(output);
}
#region IExecutor Members
public object Execute(ExecutorContext context)
{
var cmdletContext = context as CmdletContext;
// create request
var request = new Amazon.CloudFormation.Model.UpdateStackRequest();
if (cmdletContext.Capability != null)
{
request.Capabilities = cmdletContext.Capability;
}
if (cmdletContext.ClientRequestToken != null)
{
request.ClientRequestToken = cmdletContext.ClientRequestToken;
}
if (cmdletContext.NotificationARNs != null)
{
request.NotificationARNs = cmdletContext.NotificationARNs;
}
if (cmdletContext.Parameter != null)
{
request.Parameters = cmdletContext.Parameter;
}
if (cmdletContext.ResourceType != null)
{
request.ResourceTypes = cmdletContext.ResourceType;
}
if (cmdletContext.RoleARN != null)
{
request.RoleARN = cmdletContext.RoleARN;
}
// populate RollbackConfiguration
var requestRollbackConfigurationIsNull = true;
request.RollbackConfiguration = new Amazon.CloudFormation.Model.RollbackConfiguration();
System.Int32? requestRollbackConfiguration_rollbackConfiguration_MonitoringTimeInMinute = null;
if (cmdletContext.RollbackConfiguration_MonitoringTimeInMinute != null)
{
requestRollbackConfiguration_rollbackConfiguration_MonitoringTimeInMinute = cmdletContext.RollbackConfiguration_MonitoringTimeInMinute.Value;
}
if (requestRollbackConfiguration_rollbackConfiguration_MonitoringTimeInMinute != null)
{
request.RollbackConfiguration.MonitoringTimeInMinutes = requestRollbackConfiguration_rollbackConfiguration_MonitoringTimeInMinute.Value;
requestRollbackConfigurationIsNull = false;
}
List<Amazon.CloudFormation.Model.RollbackTrigger> requestRollbackConfiguration_rollbackConfiguration_RollbackTrigger = null;
if (cmdletContext.RollbackConfiguration_RollbackTrigger != null)
{
requestRollbackConfiguration_rollbackConfiguration_RollbackTrigger = cmdletContext.RollbackConfiguration_RollbackTrigger;
}
if (requestRollbackConfiguration_rollbackConfiguration_RollbackTrigger != null)
{
request.RollbackConfiguration.RollbackTriggers = requestRollbackConfiguration_rollbackConfiguration_RollbackTrigger;
requestRollbackConfigurationIsNull = false;
}
// determine if request.RollbackConfiguration should be set to null
if (requestRollbackConfigurationIsNull)
{
request.RollbackConfiguration = null;
}
if (cmdletContext.StackName != null)
{
request.StackName = cmdletContext.StackName;
}
if (cmdletContext.StackPolicyBody != null)
{
request.StackPolicyBody = cmdletContext.StackPolicyBody;
}
if (cmdletContext.StackPolicyDuringUpdateBody != null)
{
request.StackPolicyDuringUpdateBody = cmdletContext.StackPolicyDuringUpdateBody;
}
if (cmdletContext.StackPolicyDuringUpdateURL != null)
{
request.StackPolicyDuringUpdateURL = cmdletContext.StackPolicyDuringUpdateURL;
}
if (cmdletContext.StackPolicyURL != null)
{
request.StackPolicyURL = cmdletContext.StackPolicyURL;
}
if (cmdletContext.Tag != null)
{
request.Tags = cmdletContext.Tag;
}
if (cmdletContext.TemplateBody != null)
{
request.TemplateBody = cmdletContext.TemplateBody;
}
if (cmdletContext.TemplateURL != null)
{
request.TemplateURL = cmdletContext.TemplateURL;
}
if (cmdletContext.UsePreviousTemplate != null)
{
request.UsePreviousTemplate = cmdletContext.UsePreviousTemplate.Value;
}
CmdletOutput output;
// issue call
var client = Client ?? CreateClient(_CurrentCredentials, _RegionEndpoint);
try
{
var response = CallAWSServiceOperation(client, request);
object pipelineOutput = null;
pipelineOutput = cmdletContext.Select(response, this);
output = new CmdletOutput
{
PipelineOutput = pipelineOutput,
ServiceResponse = response
};
}
catch (Exception e)
{
output = new CmdletOutput { ErrorResponse = e };
}
return output;
}
public ExecutorContext CreateContext()
{
return new CmdletContext();
}
#endregion
#region AWS Service Operation Call
private Amazon.CloudFormation.Model.UpdateStackResponse CallAWSServiceOperation(IAmazonCloudFormation client, Amazon.CloudFormation.Model.UpdateStackRequest request)
{
Utils.Common.WriteVerboseEndpointMessage(this, client.Config, "AWS CloudFormation", "UpdateStack");
try
{
#if DESKTOP
return client.UpdateStack(request);
#elif CORECLR
return client.UpdateStackAsync(request).GetAwaiter().GetResult();
#else
#error "Unknown build edition"
#endif
}
catch (AmazonServiceException exc)
{
var webException = exc.InnerException as System.Net.WebException;
if (webException != null)
{
throw new Exception(Utils.Common.FormatNameResolutionFailureMessage(client.Config, webException.Message), webException);
}
throw;
}
}
#endregion
internal partial class CmdletContext : ExecutorContext
{
public List<System.String> Capability { get; set; }
public System.String ClientRequestToken { get; set; }
public List<System.String> NotificationARNs { get; set; }
public List<Amazon.CloudFormation.Model.Parameter> Parameter { get; set; }
public List<System.String> ResourceType { get; set; }
public System.String RoleARN { get; set; }
public System.Int32? RollbackConfiguration_MonitoringTimeInMinute { get; set; }
public List<Amazon.CloudFormation.Model.RollbackTrigger> RollbackConfiguration_RollbackTrigger { get; set; }
public System.String StackName { get; set; }
public System.String StackPolicyBody { get; set; }
public System.String StackPolicyDuringUpdateBody { get; set; }
public System.String StackPolicyDuringUpdateURL { get; set; }
public System.String StackPolicyURL { get; set; }
public List<Amazon.CloudFormation.Model.Tag> Tag { get; set; }
public System.String TemplateBody { get; set; }
public System.String TemplateURL { get; set; }
public System.Boolean? UsePreviousTemplate { get; set; }
public System.Func<Amazon.CloudFormation.Model.UpdateStackResponse, UpdateCFNStackCmdlet, object> Select { get; set; } =
(response, cmdlet) => response.StackId;
}
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.statefun.flink.io.kinesis.polyglot;
import com.google.protobuf.Any;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.flink.statefun.flink.io.generated.KinesisEgressRecord;
import org.apache.flink.statefun.sdk.kinesis.egress.EgressRecord;
import org.apache.flink.statefun.sdk.kinesis.egress.KinesisEgressSerializer;
public final class GenericKinesisEgressSerializer implements KinesisEgressSerializer<Any> {
private static final long serialVersionUID = 1L;
@Override
public EgressRecord serialize(Any value) {
final KinesisEgressRecord kinesisEgressRecord = asKinesisEgressRecord(value);
final EgressRecord.Builder builder =
EgressRecord.newBuilder()
.withData(kinesisEgressRecord.getValueBytes().toByteArray())
.withStream(kinesisEgressRecord.getStream())
.withPartitionKey(kinesisEgressRecord.getPartitionKey());
final String explicitHashKey = kinesisEgressRecord.getExplicitHashKey();
if (explicitHashKey != null && !explicitHashKey.isEmpty()) {
builder.withExplicitHashKey(explicitHashKey);
}
return builder.build();
}
private static KinesisEgressRecord asKinesisEgressRecord(Any message) {
if (!message.is(KinesisEgressRecord.class)) {
throw new IllegalStateException(
"The generic Kinesis egress expects only messages of type "
+ KinesisEgressRecord.class.getName());
}
try {
return message.unpack(KinesisEgressRecord.class);
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(
"Unable to unpack message as a " + KinesisEgressRecord.class.getName(), e);
}
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2015 Karumi.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.byl.qrobot.menu;
import android.view.View;
/**
* Interface used to notify click events performed in ExpandableItems inside an ExpandableSelector
* widget.
*/
public interface OnExpandableItemClickListener {
void onExpandableItemClickListener(int index, View view);
}
|
{
"pile_set_name": "Github"
}
|
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
class Google_Service_Compute_UsableSubnetworksAggregatedListWarningData extends Google_Model
{
public $key;
public $value;
public function setKey($key)
{
$this->key = $key;
}
public function getKey()
{
return $this->key;
}
public function setValue($value)
{
$this->value = $value;
}
public function getValue()
{
return $this->value;
}
}
|
{
"pile_set_name": "Github"
}
|
// Sandstorm - Personal Cloud Sandbox
// Copyright (c) 2016 Sandstorm Development Group, Inc. and contributors
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
"use strict";
var crypto = require("crypto");
var utils = require("../utils"),
short_wait = utils.short_wait,
medium_wait = utils.medium_wait,
long_wait = utils.long_wait;
var COLLECTIONS_APP_ID = "s3u2xgmqwznz2n3apf30sm3gw1d85y029enw5pymx734cnk5n78h";
var COLLECTIONS_PACKAGE_ID = "e9408a7c077f7a9baeb9c02f0437ae40";
var COLLECTIONS_PACKAGE_URL = "https://sandstorm.io/apps/david/collections3.spk";
module.exports = {};
function setGrainTitle(browser, collectionTitle) {
return browser
.waitForElementVisible("#grainTitle", short_wait)
.click("#grainTitle")
.setAlertText(collectionTitle)
.acceptAlert()
.grainFrame()
.waitForElementVisible("button[title='add description']", short_wait)
.click("button[title='add description']")
.waitForElementVisible("form.description-row>input[type=text]", short_wait)
.setValue("form.description-row>input[type=text]", "This is " + collectionTitle)
.click("form.description-row>button")
.frame(null)
}
function powerboxCardSelector(grainId) {
return ".popup.request .candidate-cards .powerbox-card button[data-card-id=grain-" + grainId + "]";
}
module.exports["Test Collections"] = function (browser) {
// Prepend 'A' so that the default handle is valid.
var devNameAlice = "A" + crypto.randomBytes(10).toString("hex");
var devNameBob = "A" + crypto.randomBytes(10).toString("hex");
var devNameCarol = "A" + crypto.randomBytes(10).toString("hex");
browser
.loginDevAccount(devNameBob)
.executeAsync(function (done) {
done(Meteor.userId());
}, [], function (result) {
var bobAccountId = result.value;
browser.execute("window.Meteor.logout()")
.loginDevAccount(devNameCarol)
.executeAsync(function (done) {
done(Meteor.userId());
}, [], function (result) {
var carolAccountId = result.value;
browser.execute("window.Meteor.logout()")
.init()
.loginDevAccount(devNameAlice)
.installApp(COLLECTIONS_PACKAGE_URL, COLLECTIONS_PACKAGE_ID, COLLECTIONS_APP_ID);
browser = setGrainTitle(browser, "Collection A");
browser.executeAsync(function (bobAccountId, done) {
// Share Collection A to Bob.
var grainId = Grains.findOne()._id;
Meteor.call("newApiToken", { accountId: Meteor.userId() },
grainId, "petname", { allAccess: null },
{ user: { accountId: bobAccountId, title: "Collection A", } },
function(error, result) {
done({ error: error, grainId: grainId, });
});
}, [bobAccountId], function (result) {
var grainIdA = result.value.grainId;
browser.assert.equal(!result.value.error, true);
browser.newGrain(COLLECTIONS_APP_ID, function (grainIdB) {
browser = setGrainTitle(browser, "Collection B");
browser.newGrain(COLLECTIONS_APP_ID, function (grainIdC) {
browser = setGrainTitle(browser, "Collection C");
browser = browser
.url(browser.launch_url + "/grain/" + grainIdA)
.grainFrame()
.waitForElementVisible("table.grain-list-table>tbody>tr.add-grain>td>button", medium_wait)
.click("table.grain-list-table>tbody>tr.add-grain>td>button")
.frame(null)
.waitForElementVisible(powerboxCardSelector(grainIdB), short_wait)
.click(powerboxCardSelector(grainIdB))
// Add with 'editor' permissions.
.waitForElementVisible(".popup.request .selected-card>form input[value='0']", short_wait)
.click(".popup.request .selected-card>form input[value='0']")
.click(".popup.request .selected-card>form button.connect-button")
.grainFrame()
.waitForElementVisible("table.grain-list-table>tbody>tr.add-grain>td>button", short_wait)
.click("table.grain-list-table>tbody>tr.add-grain>td>button")
.frame(null)
.waitForElementVisible(powerboxCardSelector(grainIdC), short_wait)
.click(powerboxCardSelector(grainIdC))
// Add with 'viewer' permissions.
.waitForElementVisible(".popup.request .selected-card>form input[value='1']", short_wait)
.click(".popup.request .selected-card>form input[value='1']")
.click(".popup.request .selected-card>form button.connect-button")
.grainFrame()
.waitForElementVisible("table.grain-list-table>tbody tr:nth-child(3).grain", short_wait)
.click("table.grain-list-table>tbody tr:nth-child(3).grain .click-to-go")
.frame(null)
.grainFrame(grainIdC)
.waitForElementVisible(".description-row p", short_wait)
.assert.containsText(".description-row p", "This is Collection C")
.waitForElementVisible(".description-row button.description-button", short_wait)
.frame(null)
.execute("window.Meteor.logout()")
// Log in as Bob
.loginDevAccount(devNameBob)
.url(browser.launch_url + "/grain/" + grainIdA)
.grainFrame()
.waitForElementVisible(".description-row p", short_wait)
.assert.containsText(".description-row p", "This is Collection A")
.waitForElementVisible(".description-row button.description-button", short_wait)
.waitForElementVisible("table.grain-list-table>tbody>tr.add-grain>td>button", short_wait)
.waitForElementVisible("table.grain-list-table>tbody tr:nth-child(2).grain", short_wait)
.assert.containsText("table.grain-list-table>tbody tr:nth-child(2).grain td>button",
"Collection B")
.click("table.grain-list-table>tbody tr:nth-child(2).grain .click-to-go")
.frame(null)
.grainFrame(grainIdB)
.waitForElementVisible(".description-row p", short_wait)
.assert.containsText(".description-row p", "This is Collection B")
.waitForElementVisible(".description-row button.description-button", short_wait)
// As Bob, add collection A to collection B, creating a cycle of references.
.waitForElementVisible("table.grain-list-table>tbody>tr.add-grain>td>button", short_wait)
.click("table.grain-list-table>tbody>tr.add-grain>td>button")
.frame(null)
.waitForElementVisible(powerboxCardSelector(grainIdA), short_wait)
.click(powerboxCardSelector(grainIdA))
// Add with 'viewer' permissions.
.waitForElementVisible(".popup.request .selected-card>form input[value='1']", short_wait)
.click(".popup.request .selected-card>form input[value='1']")
.click(".popup.request .selected-card>form button.connect-button")
// Navigate back to collection A by clicking on it in collection B.
.grainFrame()
.waitForElementVisible("table.grain-list-table>tbody tr:nth-child(2).grain", short_wait)
.click("table.grain-list-table>tbody tr:nth-child(2).grain .click-to-go")
.frame(null)
.grainFrame(grainIdA)
.waitForElementVisible("table.grain-list-table>tbody>tr.add-grain>td>button", short_wait)
.waitForElementVisible("table.grain-list-table>tbody tr:nth-child(3).grain", short_wait)
.assert.containsText("table.grain-list-table>tbody tr:nth-child(3).grain td>button",
"Collection C")
.click("table.grain-list-table>tbody tr:nth-child(3).grain .click-to-go")
.frame(null)
.grainFrame(grainIdC)
.waitForElementVisible(".description-row p", short_wait)
.assert.containsText(".description-row p", "This is Collection C")
.assert.elementNotPresent(".description-row button.description-button")
.frame(null)
.executeAsync(function (carolAccountId, grainIdB, done) {
// As Bob, share Collection B to Carol.
Meteor.call("newApiToken", { accountId: Meteor.userId() },
grainIdB, "petname", { allAccess: null },
{ user: { accountId: carolAccountId, title: "Collection B", } },
function(error, result) {
done({ error: error });
});
}, [carolAccountId, grainIdB], function (result) {
browser.assert.equal(!result.value.error, true);
browser
.execute("window.Meteor.logout()")
// Log in as Carol
.loginDevAccount(devNameCarol)
.url(browser.launch_url + "/grain/" + grainIdB)
.grainFrame()
.waitForElementVisible(".description-row p", short_wait)
.assert.containsText(".description-row p", "This is Collection B")
.waitForElementVisible(".description-row button.description-button", short_wait)
.waitForElementVisible("table.grain-list-table>tbody tr:nth-child(2).grain",
short_wait)
.assert.containsText("table.grain-list-table>tbody tr:nth-child(2).grain td>button",
"Collection A")
.click("table.grain-list-table>tbody tr:nth-child(2).grain .click-to-go")
.grainFrame(grainIdA)
.waitForElementVisible(".description-row p", short_wait)
.assert.containsText(".description-row p", "This is Collection A")
// Carol does not have edit permissions.
.assert.elementNotPresent(".description-row button.description-button")
.frame(null)
.execute("window.Meteor.logout()")
// Log back in as Alice
.loginDevAccount(devNameAlice)
.url(browser.launch_url + "/grain/" + grainIdA)
.disableGuidedTour()
.grainFrame()
// Unlink collection B from collection A.
.waitForElementVisible("table.grain-list-table>tbody tr:nth-child(3).grain",
short_wait)
.waitForElementVisible("table.grain-list-table>tbody tr:nth-child(2).grain",
short_wait)
.assert.containsText("table.grain-list-table>tbody tr:nth-child(2).grain td>button",
"Collection B")
.click("table.grain-list-table>tbody tr:nth-child(2).grain td>input[type=checkbox]")
.waitForElementVisible(".bulk-action-buttons>button[title='unlink selected grains']",
short_wait)
.click(".bulk-action-buttons>button[title='unlink selected grains']")
.frame(null)
.execute("window.Meteor.logout()")
// Log back in as Carol. Check that she can no longer access collection A.
.loginDevAccount(devNameCarol)
// Add some characters onto the end of the URL because otherwise we trigger
// the grain-tab restore logic and tabs open for both Collection A and Collection B.
.url(browser.launch_url + "/grain/" + grainIdA + "/#")
.waitForElementVisible(".grain-interstitial.request-access", medium_wait)
.end();
});
});
});
});
});
});
};
module.exports["Test collections anonymous user"] = function (browser) {
browser = browser
.init()
.loginDevAccount()
.installApp(COLLECTIONS_PACKAGE_URL, COLLECTIONS_PACKAGE_ID, COLLECTIONS_APP_ID);
browser = setGrainTitle(browser, "Collection A");
browser.executeAsync(function (done) {
var grainId = Grains.findOne()._id;
Meteor.call("newApiToken", { accountId: Meteor.userId() },
grainId, "petname", { allAccess: null },
{ webkey: { forSharing: true }, },
function(error, result) {
done({ error: error, grainId: grainId, token: (result || {}).token });
});
}, [], function (result) {
var grainIdA = result.value.grainId;
var tokenA = result.value.token;
browser.assert.equal(!result.value.error, true);
browser.newGrain(COLLECTIONS_APP_ID, function (grainIdB) {
browser = setGrainTitle(browser, "Collection B");
browser
.url(browser.launch_url + "/grain/" + grainIdA)
.grainFrame()
.waitForElementVisible("table.grain-list-table>tbody>tr.add-grain>td>button", medium_wait)
.click("table.grain-list-table>tbody>tr.add-grain>td>button")
.frame(null)
.waitForElementVisible(powerboxCardSelector(grainIdB), short_wait)
.click(powerboxCardSelector(grainIdB))
// Add with 'editor' permissions.
.waitForElementVisible(".popup.request .selected-card>form input[value='0']", short_wait)
.click(".popup.request .selected-card>form input[value='0']")
.click(".popup.request .selected-card>form button.connect-button")
// Visit token A anonymously. The link should still work.
.frame(null)
.execute("window.Meteor.logout()")
.url(browser.launch_url + "/shared/" + tokenA)
.waitForElementVisible(".popup.login button.close-popup", short_wait)
.click(".popup.login button.close-popup")
.grainFrame()
.waitForElementVisible(".description-row p", short_wait)
.assert.containsText(".description-row p", "This is Collection A")
.waitForElementVisible(".description-row button.description-button", short_wait)
.waitForElementVisible("table.grain-list-table>tbody tr:nth-child(1).grain", short_wait)
.assert.containsText("table.grain-list-table>tbody tr:nth-child(1).grain td>button",
"Collection B")
.click("table.grain-list-table>tbody tr:nth-child(1).grain .click-to-go")
.frame(null)
.grainFrame(grainIdB)
.waitForElementVisible(".description-row p", short_wait)
.assert.containsText(".description-row p", "This is Collection B")
.waitForElementVisible(".description-row button.description-button", short_wait)
.frame(null)
.url(function (sharedUrlGrainB) {
browser.loginDevAccount()
.url(sharedUrlGrainB.value)
.waitForElementVisible(".grain-interstitial button.reveal-identity-button", short_wait)
.click(".grain-interstitial button.reveal-identity-button")
.grainFrame(grainIdB)
.end();
});
});
});
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1beta1 "k8s.io/api/authentication/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
testing "k8s.io/client-go/testing"
)
// FakeTokenReviews implements TokenReviewInterface
type FakeTokenReviews struct {
Fake *FakeAuthenticationV1beta1
}
var tokenreviewsResource = schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1beta1", Resource: "tokenreviews"}
var tokenreviewsKind = schema.GroupVersionKind{Group: "authentication.k8s.io", Version: "v1beta1", Kind: "TokenReview"}
// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any.
func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1beta1.TokenReview{})
if obj == nil {
return nil, err
}
return obj.(*v1beta1.TokenReview), err
}
|
{
"pile_set_name": "Github"
}
|
{
"title": "Predefined Symbols (Marker)",
"callback": "initMap",
"libraries": [],
"version": "weekly",
"tag": "marker_symbol_predefined",
"name": "marker-symbol-predefined"
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package streaming implements encoder and decoder for streams
// of runtime.Objects over io.Writer/Readers.
package streaming
import (
"bytes"
"fmt"
"io"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Encoder is a runtime.Encoder on a stream.
type Encoder interface {
// Encode will write the provided object to the stream or return an error. It obeys the same
// contract as runtime.VersionedEncoder.
Encode(obj runtime.Object) error
}
// Decoder is a runtime.Decoder from a stream.
type Decoder interface {
// Decode will return io.EOF when no more objects are available.
Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error)
// Close closes the underlying stream.
Close() error
}
// Serializer is a factory for creating encoders and decoders that work over streams.
type Serializer interface {
NewEncoder(w io.Writer) Encoder
NewDecoder(r io.ReadCloser) Decoder
}
type decoder struct {
reader io.ReadCloser
decoder runtime.Decoder
buf []byte
maxBytes int
resetRead bool
}
// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
// an entire object.
func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
return &decoder{
reader: r,
decoder: d,
buf: make([]byte, 1024),
maxBytes: 16 * 1024 * 1024,
}
}
var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
// Decode reads the next object from the stream and decodes it.
func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
base := 0
for {
n, err := d.reader.Read(d.buf[base:])
if err == io.ErrShortBuffer {
if n == 0 {
return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
}
if d.resetRead {
continue
}
// double the buffer size up to maxBytes
if len(d.buf) < d.maxBytes {
base += n
d.buf = append(d.buf, make([]byte, len(d.buf))...)
continue
}
// must read the rest of the frame (until we stop getting ErrShortBuffer)
d.resetRead = true
base = 0
return nil, nil, ErrObjectTooLarge
}
if err != nil {
return nil, nil, err
}
if d.resetRead {
// now that we have drained the large read, continue
d.resetRead = false
continue
}
base += n
break
}
return d.decoder.Decode(d.buf[:base], defaults, into)
}
func (d *decoder) Close() error {
return d.reader.Close()
}
type encoder struct {
writer io.Writer
encoder runtime.Encoder
buf *bytes.Buffer
}
// NewEncoder returns a new streaming encoder.
func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
return &encoder{
writer: w,
encoder: e,
buf: &bytes.Buffer{},
}
}
// Encode writes the provided object to the nested writer.
func (e *encoder) Encode(obj runtime.Object) error {
if err := e.encoder.Encode(obj, e.buf); err != nil {
return err
}
_, err := e.writer.Write(e.buf.Bytes())
e.buf.Reset()
return err
}
|
{
"pile_set_name": "Github"
}
|
#
# Author:: Daniel DeLeo (<dan@chef.io>)
# Copyright:: Copyright (c) Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Chef
class RunList
class RunListItem
QUALIFIED_RECIPE = /^recipe\[([^\]@]+)(@([0-9]+(\.[0-9]+){1,2}))?\]$/.freeze
QUALIFIED_ROLE = /^role\[([^\]]+)\]$/.freeze
VERSIONED_UNQUALIFIED_RECIPE = /^([^@]+)(@([0-9]+(\.[0-9]+){1,2}))$/.freeze
FALSE_FRIEND = /[\[\]]/.freeze
attr_reader :name, :type, :version
def initialize(item)
@version = nil
case item
when Hash
assert_hash_is_valid_run_list_item!(item)
@type = (item["type"] || item[:type]).to_sym
@name = item["name"] || item[:name]
if item.key?("version") || item.key?(:version)
@version = item["version"] || item[:version]
end
when String
if match = QUALIFIED_RECIPE.match(item)
# recipe[recipe_name]
# recipe[recipe_name@1.0.0]
@type = :recipe
@name = match[1]
@version = match[3] if match[3]
elsif match = QUALIFIED_ROLE.match(item)
# role[role_name]
@type = :role
@name = match[1]
elsif match = VERSIONED_UNQUALIFIED_RECIPE.match(item)
# recipe_name@1.0.0
@type = :recipe
@name = match[1]
@version = match[3] if match[3]
elsif match = FALSE_FRIEND.match(item)
# Recipe[recipe_name]
# roles[role_name]
name = match[1]
raise ArgumentError, "Unable to create #{self.class} from #{item.class}:#{item.inspect}: must be recipe[#{name}] or role[#{name}]"
else
# recipe_name
@type = :recipe
@name = item
end
else
raise ArgumentError, "Unable to create #{self.class} from #{item.class}:#{item.inspect}: must be a Hash or String"
end
end
def to_s
"#{@type}[#{@name}#{@version ? "@#{@version}" : ""}]"
end
def role?
@type == :role
end
def recipe?
@type == :recipe
end
def ==(other)
if other.is_a?(String)
to_s == other.to_s
else
other.respond_to?(:type) && other.respond_to?(:name) && other.respond_to?(:version) && other.type == @type && other.name == @name && other.version == @version
end
end
def assert_hash_is_valid_run_list_item!(item)
unless (item.key?("type") || item.key?(:type)) && (item.key?("name") || item.key?(:name))
raise ArgumentError, "Initializing a #{self.class} from a hash requires that it have a 'type' and 'name' key"
end
end
end
end
end
|
{
"pile_set_name": "Github"
}
|
/**
* Copyright 2013-2020 the original author or authors from the JHipster project.
*
* This file is part of the JHipster project, see http://www.jhipster.tech/
* for more information.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const Options = {
DTO: 'dto',
SERVICE: 'service',
PAGINATION: 'pagination',
MICROSERVICE: 'microservice',
SEARCH: 'search',
ANGULAR_SUFFIX: 'angularSuffix',
CLIENT_ROOT_FOLDER: 'clientRootFolder'
};
const optionNames = Object.values(Options);
const Values = {
[Options.DTO]: { MAPSTRUCT: 'mapstruct' },
[Options.SERVICE]: { SERVICE_CLASS: 'serviceClass', SERVICE_IMPL: 'serviceImpl' },
[Options.PAGINATION]: {
PAGINATION: 'pagination',
'INFINITE-SCROLL': 'infinite-scroll'
},
[Options.SEARCH]: { ELASTIC_SEARCH: 'elasticsearch', COUCHBASE: 'couchbase' }
};
function forEach(passedFunction) {
if (!passedFunction) {
throw new Error('A function has to be passed to loop over the binary options.');
}
optionNames.forEach(passedFunction);
}
function exists(passedOption, passedValue) {
return (
!Object.values(Options).includes(passedOption) ||
Object.values(Options).some(
option =>
passedOption === option &&
(passedOption === Options.MICROSERVICE ||
passedOption === Options.ANGULAR_SUFFIX ||
passedOption === Options.CLIENT_ROOT_FOLDER ||
Object.values(Values[option]).includes(passedValue))
)
);
}
module.exports = {
Options,
Values,
exists,
forEach
};
|
{
"pile_set_name": "Github"
}
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
from rlgraph import get_backend
from rlgraph.components.component import Component
from rlgraph.utils.decorators import rlgraph_api
from rlgraph.utils.ops import DataOpDict, DataOpTuple, FLATTEN_SCOPE_PREFIX
class ContainerMerger(Component):
"""
Merges incoming items into one FlattenedDataOp.
"""
def __init__(self, *input_names_or_num_items, **kwargs):
"""
Args:
*input_names_or_num_items (Union[str,int]): List of the names of the different inputs in the
order they will be passed into the `merge` API-method in the returned merged Dict.
Or the number of items in the Tuple to be merged.
Example:
input_names_or_num_items = ["A", "B"]
- merge(Dict(c=1, d=2), Tuple(3, 4))
- returned value: Dict(A=Dict(c=1, d=2), B=Tuple(3, 4))
input_names_or_num_items = 3: 3 items will be merged into a Tuple.
Keyword Args:
merge_tuples_into_one (bool): Whether to merge incoming DataOpTuples into one single DataOpTuple.
If True: tupleA + tupleB -> (tupleA[0] + tupleA[1] + tupleA[...] + tupleB[0] + tupleB[1] ...).
If False: tupleA + tupleB -> (tupleA + tupleB).
is_tuple (bool): Whether we should merge a tuple.
"""
self.merge_tuples_into_one = kwargs.pop("merge_tuples_into_one", False)
self.is_tuple = kwargs.pop("is_tuple", self.merge_tuples_into_one)
super(ContainerMerger, self).__init__(scope=kwargs.pop("scope", "container-merger"), **kwargs)
self.dict_keys = None
if len(input_names_or_num_items) == 1 and isinstance(input_names_or_num_items[0], int):
self.is_tuple = True
else:
# and not re.search(r'/', i)
# or some of them have '/' characters in them, which are not allowed
assert all(isinstance(i, str) for i in input_names_or_num_items), \
"ERROR: Not all input names of DictMerger Component '{}' are strings.".format(self.global_scope)
self.dict_keys = input_names_or_num_items
def check_input_spaces(self, input_spaces, action_space=None):
spaces = []
idx = 0
while True:
key = "inputs[{}]".format(idx)
if key not in input_spaces:
break
spaces.append(input_spaces[key])
idx += 1
# If Tuple -> Incoming inputs could be of any number.
if self.dict_keys:
len_ = len(self.dict_keys)
assert len(spaces) == len_,\
"ERROR: Number of incoming Spaces ({}) does not match number of given `dict_keys` ({}) in" \
"ContainerMerger Component '{}'!".format(len(spaces), len_, self.global_scope)
@rlgraph_api
def _graph_fn_merge(self, *inputs):
"""
Merges the inputs into a single DataOpDict OR DataOpTuple with the flat keys given in `self.dict_keys`.
Args:
*inputs (FlattenedDataOp): The input items to be merged into a ContainerDataOp.
Returns:
ContainerDataOp: The DataOpDict or DataOpTuple as a merger of all *inputs.
"""
if self.is_tuple is True:
ret = []
for op in inputs:
# Merge single items inside a DataOpTuple into resulting tuple.
if self.merge_tuples_into_one and isinstance(op, DataOpTuple):
ret.extend(list(op))
# Strict by-input merging.
else:
ret.append(op)
return DataOpTuple(ret)
else:
ret = DataOpDict()
for i, op in enumerate(inputs):
if get_backend() == "pytorch" and self.execution_mode == "define_by_run":
ret[FLATTEN_SCOPE_PREFIX + self.dict_keys[i]] = op
else:
ret[self.dict_keys[i]] = op
return ret
|
{
"pile_set_name": "Github"
}
|
// RUN: %clang_cc1 -fsyntax-only -verify %s
#include "Inputs/cuda.h"
// expected-no-diagnostics
// Check that we can handle gnu_inline functions when compiling in CUDA mode.
void foo();
inline __attribute__((gnu_inline)) void bar() { foo(); }
|
{
"pile_set_name": "Github"
}
|
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright (c) 1997-2011 Oracle and/or its affiliates. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can
* obtain a copy of the License at
* https://glassfish.dev.java.net/public/CDDL+GPL_1_1.html
* or packager/legal/LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at packager/legal/LICENSE.txt.
*
* GPL Classpath Exception:
* Oracle designates this particular file as subject to the "Classpath"
* exception as provided by Oracle in the GPL Version 2 section of the License
* file that accompanied this code.
*
* Modifications:
* If applicable, add the following below the License Header, with the fields
* enclosed by brackets [] replaced by your own identifying information:
* "Portions Copyright [year] [name of copyright owner]"
*
* Contributor(s):
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
// Portions Copyright [2018] [Payara Foundation and/or its affiliates]
package com.sun.enterprise.security.jauth;
import java.util.*;
import java.lang.reflect.Method;
import java.lang.reflect.InvocationTargetException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.security.auth.login.AppConfigurationEntry;
/**
* Shared logic from Client and ServerAuthContext reside here.
*/
final class AuthContext {
static final String INIT = "initialize";
static final String DISPOSE_SUBJECT = "disposeSubject";
static final String SECURE_REQUEST = "secureRequest";
static final String VALIDATE_RESPONSE = "validateResponse";
static final String VALIDATE_REQUEST = "validateRequest";
static final String SECURE_RESPONSE = "secureResponse";
// managesSessions method is implemented by looking for
// corresponding option value in module configuration
static final String MANAGES_SESSIONS = "managesSessions";
static final String MANAGES_SESSIONS_OPTION = "managessessions";
private ConfigFile.Entry[] entries;
private Logger logger;
AuthContext(ConfigFile.Entry[] entries,
Logger logger) throws AuthException {
this.entries = entries;
this.logger = logger;
}
/**
* Invoke modules according to configuration
*/
Object[] invoke(final String methodName, final Object[] args)
throws AuthException {
// invoke modules in a doPrivileged
final Object rValues[] = new Object[entries.length];
try {
java.security.AccessController.doPrivileged(new java.security.PrivilegedExceptionAction() {
@Override
public Object run() throws AuthException {
invokePriv(methodName, args, rValues);
return null;
}
});
} catch (java.security.PrivilegedActionException pae) {
if (pae.getException() instanceof AuthException) {
throw (AuthException) pae.getException();
} else {
AuthException ae = new AuthException();
ae.initCause(pae.getException());
throw ae;
}
}
return rValues;
}
void invokePriv(String methodName, Object[] args, Object[] rValues)
throws AuthException {
// special treatment for managesSessions until the module
// interface can be extended.
if (methodName.equals(AuthContext.MANAGES_SESSIONS)) {
for (int i = 0; i < entries.length; i++) {
Map options = entries[i].getOptions();
String mS = (String) options.get(AuthContext.MANAGES_SESSIONS_OPTION);
rValues[i] = Boolean.valueOf(mS);
}
return;
}
boolean success = false;
AuthException firstRequiredError = null;
AuthException firstError = null;
// XXX no way to reverse module invocation
for (int i = 0; i < entries.length; i++) {
// get initialized module instance
Object module = entries[i].module;
// invoke the module
try {
Method[] mArray = module.getClass().getMethods();
for (int j = 0; j < mArray.length; j++) {
if (mArray[j].getName().equals(methodName)) {
// invoke module
rValues[i] = mArray[j].invoke(module, args);
// success -
// return if SUFFICIENT and no previous REQUIRED errors
if (firstRequiredError == null &&
entries[i].getControlFlag() == AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT) {
if (logger != null && logger.isLoggable(Level.FINE)) {
logger.fine(entries[i].getLoginModuleName() +
"." +
methodName +
" SUFFICIENT success");
}
return;
}
if (logger != null && logger.isLoggable(Level.FINE)) {
logger.fine(entries[i].getLoginModuleName() +
"." +
methodName +
" success");
}
success = true;
break;
}
}
if (!success) {
// PLEASE NOTE:
// this exception will be thrown if any module
// in the context does not support the method.
NoSuchMethodException nsme = new NoSuchMethodException("module " +
module.getClass().getName() +
" does not implement " +
methodName);
AuthException ae = new AuthException();
ae.initCause(nsme);
throw ae;
}
} catch (IllegalAccessException iae) {
AuthException ae = new AuthException();
ae.initCause(iae);
throw ae;
} catch (InvocationTargetException ite) {
// failure cases
AuthException ae;
if (ite.getCause() instanceof AuthException) {
ae = (AuthException) ite.getCause();
} else {
ae = new AuthException();
ae.initCause(ite.getCause());
}
if (entries[i].getControlFlag() == AppConfigurationEntry.LoginModuleControlFlag.REQUISITE) {
if (logger != null && logger.isLoggable(Level.FINE)) {
logger.fine(entries[i].getLoginModuleName() +
"." +
methodName +
" REQUISITE failure");
}
// immediately throw exception
if (firstRequiredError != null) {
throw firstRequiredError;
} else {
throw ae;
}
} else if (entries[i].getControlFlag() == AppConfigurationEntry.LoginModuleControlFlag.REQUIRED) {
if (logger != null && logger.isLoggable(Level.FINE)) {
logger.fine(entries[i].getLoginModuleName() +
"." +
methodName +
" REQUIRED failure");
}
// save exception and continue
if (firstRequiredError == null) {
firstRequiredError = ae;
}
} else {
if (logger != null && logger.isLoggable(Level.FINE)) {
logger.fine(entries[i].getLoginModuleName() +
"." +
methodName +
" OPTIONAL failure");
}
// save exception and continue
if (firstError == null) {
firstError = ae;
}
}
}
}
// done invoking entire stack of modules
if (firstRequiredError != null) {
throw firstRequiredError;
} else if (firstError != null && !success) {
throw firstError;
}
// if no errors, return gracefully
if (logger != null && logger.isLoggable(Level.FINE)) {
logger.fine("overall " + methodName + " success");
}
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2002-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.phoenixnap.oss.ramlplugin.raml2code.rules.spring;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestHeader;
import org.springframework.web.bind.annotation.RequestParam;
import com.phoenixnap.oss.ramlplugin.raml2code.data.ApiActionMetadata;
import com.phoenixnap.oss.ramlplugin.raml2code.data.ApiParameterMetadata;
import com.phoenixnap.oss.ramlplugin.raml2code.helpers.CodeModelHelper;
import com.sun.codemodel.JAnnotationUse;
import com.sun.codemodel.JClass;
import com.sun.codemodel.JVar;
/**
* Overrides method parameters set by {@link SpringMethodParamsRule}.
*
*
* @author Aleksandar Stojsavljevic (aleksandars@ccbill.com)
* @since 2.0.4
*/
public class SpringFeignClientMethodParamsRule extends SpringMethodParamsRule {
private static final List<String> ANNOTATIONS_TO_OVERRIDE = new ArrayList<String>();
static {
ANNOTATIONS_TO_OVERRIDE.add(RequestParam.class.getName());
ANNOTATIONS_TO_OVERRIDE.add(RequestHeader.class.getName());
ANNOTATIONS_TO_OVERRIDE.add(PathVariable.class.getName());
}
@Override
protected JVar paramQueryForm(ApiParameterMetadata paramMetaData, CodeModelHelper.JExtMethod generatableType,
ApiActionMetadata endpointMetadata) {
JVar paramQueryForm = super.paramQueryForm(paramMetaData, generatableType, endpointMetadata);
// name of request/header/path parameter needs to be set for feign
// client even when it matches method parameter name
// if name is already set this will not override it
Collection<JAnnotationUse> annotations = paramQueryForm.annotations();
for (JAnnotationUse annotation : annotations) {
JClass annotationClass = annotation.getAnnotationClass();
if (ANNOTATIONS_TO_OVERRIDE.contains(annotationClass.fullName())) {
annotation.param("name", paramMetaData.getName());
}
}
return paramQueryForm;
}
}
|
{
"pile_set_name": "Github"
}
|
#!{{pkgPathFor "core/bash"}}/bin/bash
set -e
exec 2>&1
# Call the script to block until user accepts the MLSA via the package's config
{{pkgPathFor "chef/mlsa"}}/bin/accept {{cfg.mlsa.accept}}
pg-helper ensure-service-database chef_license_control_service
# Run the License Control gRPC server
exec license-control-service serve --config {{pkg.svc_config_path}}/config.toml
|
{
"pile_set_name": "Github"
}
|
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!159 &1
EditorSettings:
m_ObjectHideFlags: 0
serializedVersion: 7
m_ExternalVersionControlSupport: Visible Meta Files
m_SerializationMode: 2
m_LineEndingsForNewScripts: 1
m_DefaultBehaviorMode: 0
m_SpritePackerMode: 0
m_SpritePackerPaddingPower: 1
m_EtcTextureCompressorBehavior: 1
m_EtcTextureFastCompressor: 1
m_EtcTextureNormalCompressor: 2
m_EtcTextureBestCompressor: 4
m_ProjectGenerationIncludedExtensions: txt;xml;fnt;cd
m_ProjectGenerationRootNamespace:
m_UserGeneratedProjectSuffix:
m_CollabEditorSettings:
inProgressEnabled: 1
m_EnableTextureStreamingInPlayMode: 1
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2018 Orange.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy ofthe License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specificlanguage governing permissions and
* limitations under the License.
*
*/
/* Parsing OpenVswitch syntax for rules is inherently hard. The main reason
* is the lack of clear lexical class for a few characters notably colon that
* is both an important action token and a character in IPv6 and mac addresses.
*
* We have decided to follow a two phase approach. First we split the text in
* main components so that filters and individual actions are recognized,
* then we split the elementary filters and actions in simpler components. For
* filters it only means checking if there is a mask. For actions, parsing may
* be more involved.
*/
package jsonof
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"strconv"
"strings"
"github.com/skydive-project/skydive/graffiti/logging"
)
// JSONRule is an openflow rule ready for JSON export
type JSONRule struct {
Cookie uint64 `json:"Cookie"` // cookie value of the rule
Table int `json:"Table"` // table containing the rule
Priority int `json:"Priority"` // priority of rule
Meta []*Meta `json:"Meta,omitempty"` // anything that is not a filter.
Filters []*Filter `json:"Filters"` // all the filter
Actions []*Action `json:"Actions"` // all the actions
UUID string `json:"-"` // UUID used by skydive
RawFilter string `json:"-"` // Kept to be respawned
}
// JSONGroup is an openflow group ready for JSON export
type JSONGroup struct {
GroupID uint `json:"GroupId"` // id of the group
Type string `json:"Type"` // group type
Meta []*Meta `json:"Meta,omitempty"` // anything that is not a bucket
Buckets []*Bucket `json:"Buckets"` // buckets
UUID string `json:"-"` // UUID used by skydive
}
// Bucket is the representation of a bucket in an openflow group
type Bucket struct {
ID uint `json:"Id"` // id of bucket
Meta []*Meta `json:"Meta,omitempty"` // anything that is not an action
Actions []*Action `json:"Actions"` // action list
}
// Action represents an atomic action in an openflow rule
type Action struct {
Action string `json:"Function"` // Action name
Arguments []*Action `json:"Arguments,omitempty"` // Arguments if it exists
Key string `json:"Key,omitempty"` // Key for aguments such as k=v
}
// Filter is an elementary filter in an openflow rule
type Filter struct {
Key string `json:"Key"` // left hand side
Value string `json:"Value"` // right hand side
Mask string `json:"Mask,omitempty"` // mask if used
}
// Meta is anything not a filter or an action always as a pair key/value
type Meta struct {
Key string `json:"Key"` // key
Value string `json:"Value"` // raw value
}
// Token is a lexical entity
type Token int
const (
// Token values as recognized by scan
tNt Token = iota
tEOF
tText
tSpace
tComma
tEqual
tClosePar
)
const (
kwActions = "actions"
kwBucket = "bucket"
kwBucketID = "bucket_id"
kwCookie = "cookie"
kwGroupID = "group_id"
kwLoad = "load"
kwMove = "move"
kwPriority = "priority"
kwSetField = "set_field"
kwTable = "table"
kwType = "type"
)
// TokenNames is the array of printable names for Token.
var TokenNames = []string{
"NT",
"EOF",
"TEXT",
"SPACE",
"COMMA",
"EQUAL",
"CPAR",
}
var eof = rune(0)
// Stream represents a text buffer that can be scanned
type Stream struct {
r *bufio.Reader
last rune
token Token
value string
}
// NewStream returns a new instance of Stream.
func NewStream(r io.Reader) *Stream {
return &Stream{r: bufio.NewReader(r), last: eof, token: tNt}
}
// isWhitespace check if the rune is a classical separator
// (space of tab or eol)
func isWhitespace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\n'
}
// read reads the next rune from the bufferred reader.
// Returns the rune(0) if an error occurs (or io.EOF is returned).
func (s *Stream) read() rune {
if s.last != eof {
ch := s.last
s.last = eof
return ch
}
ch, _, err := s.r.ReadRune()
if err != nil {
return eof
}
return ch
}
// unread places the previously read rune back on the reader.
func (s *Stream) unread(r rune) { s.last = r }
// unscan puts back the previously read token.
func (s *Stream) unscan(tok Token, lit string) {
s.token = tok
s.value = lit
}
// scan returns the next token and literal value.
// nolint: gocyclo
func (s *Stream) scan() (tok Token, lit string) {
if s.token != tNt {
tok := s.token
s.token = tNt
return tok, s.value
}
// Read the next rune.
ch := s.read()
// If we see whitespace then consume all contiguous whitespace.
// If we see a letter then consume as an ident or reserved word.
switch ch {
case eof:
return tEOF, ""
case ' ', '\t', '\n':
for {
ch = s.read()
if ch == eof {
break
} else if !isWhitespace(ch) {
s.unread(ch)
break
}
}
return tSpace, ""
case '=':
return tEqual, ""
case ',':
return tComma, ""
case ')':
return tClosePar, ""
default:
var buf bytes.Buffer
buf.WriteRune(ch)
if ch == '(' {
s.fill(&buf, 1)
} else {
s.fill(&buf, 0)
}
return tText, buf.String()
}
}
func (s *Stream) fill(buf *bytes.Buffer, parLevel int) {
fillLoop:
for {
ch := s.read()
switch ch {
case eof:
break fillLoop
case ' ', '\t', '\n', ',', '=':
if parLevel == 0 {
s.unread(ch)
break fillLoop
}
case '(':
parLevel = parLevel + 1
case ')':
if parLevel == 0 {
s.unread(ch)
break fillLoop
}
parLevel = parLevel - 1
}
_, err := buf.WriteRune(ch)
if err != nil {
logging.GetLogger().Errorf(
"jsonof: fill cannot write into buffer: %s", err)
}
}
}
// ParseRule is the main entry point for the rule parser.
func (s *Stream) ParseRule(result *JSONRule) error {
tok, val := s.scan()
if tok == tSpace {
tok, val = s.scan()
}
if tok == tText {
return s.ParseRuleEq(result, []*Meta{}, val)
}
return errors.New("expecting an ident")
}
func makeFilter(pair *Meta) *Filter {
rhs := strings.Split(pair.Value, "/")
if len(rhs) == 2 {
return &Filter{Key: pair.Key, Value: rhs[0], Mask: rhs[1]}
}
return &Filter{Key: pair.Key, Value: pair.Value, Mask: ""}
}
// ParseRuleEq implements the state of the rule parser waiting for an equal
// sign or a break signifying a next block (happens with filter abbreviations
// like ip, tcp, etc.)
func (s *Stream) ParseRuleEq(result *JSONRule, stack []*Meta, lhs string) error {
tok, val := s.scan()
if tok == tEqual {
tok, val = s.scan()
if tok == tText {
return s.parseRulePair(result, stack, lhs, val)
}
return errors.New("expecting a right hand side")
}
if tok == tComma || tok == tSpace {
pair := &Meta{Key: lhs}
stack = append(stack, pair)
switch lhs {
case "reset_counts", "no_packet_counts", "no_byte_counts":
result.Meta = append(result.Meta, stack...)
return s.ParseRule(result)
default:
s.unscan(tok, val)
return s.ParseRuleSep(result, stack)
}
}
return errors.New("expecting = , or ''")
}
func (s *Stream) parseRulePair(
result *JSONRule,
stack []*Meta,
lhs string, rhs string,
) error {
switch lhs {
case kwCookie:
v, err := strconv.ParseUint(rhs, 0, 64)
result.Cookie = v
if err != nil {
logging.GetLogger().Errorf(
"Cannot parse cookie in openflow rule: %s", rhs)
}
case kwTable:
v, err := strconv.ParseUint(rhs, 10, 32)
result.Table = int(v)
if err != nil {
logging.GetLogger().Errorf(
"Cannot parse table in openflow rule: %s", rhs)
}
case kwPriority:
v, err := strconv.ParseUint(rhs, 10, 32)
result.Priority = int(v)
if err != nil {
logging.GetLogger().Errorf(
"Cannot parse priority in openflow rule: %s", rhs)
}
case kwActions:
result.Actions = append(result.Actions, makeAction(rhs))
return s.ParseRuleAction(result)
default:
var pair = &Meta{Key: lhs, Value: rhs}
stack = append(stack, pair)
}
return s.ParseRuleSep(result, stack)
}
// ParseRuleSep implements the state of the parser afer an x=y, a break
// is expected but it may be either a
func (s *Stream) ParseRuleSep(result *JSONRule, stack []*Meta) error {
tok, _ := s.scan()
if tok == tComma {
tok2, val2 := s.scan()
if tok2 == tSpace {
result.Meta = append(result.Meta, stack...)
return s.ParseRule(result)
}
if tok2 == tText {
return s.ParseRuleEq(result, stack, val2)
}
return errors.New("expected text or space after comma")
}
if tok == tSpace {
var raw bytes.Buffer
for i, meta := range stack {
if i > 0 {
raw.WriteByte(',')
}
raw.WriteString(meta.Key)
if meta.Value != "" {
raw.WriteByte(':')
raw.WriteString(meta.Value)
}
result.Filters = append(result.Filters, makeFilter(meta))
}
result.RawFilter = raw.String()
return s.ParseRule(result)
}
return errors.New("expecting a comma or a space")
}
func makeArg(raw string) *Action {
braPos := strings.Index(raw, "[")
if braPos == -1 {
return &Action{Action: raw}
}
if raw[len(raw)-1] != ']' {
logging.GetLogger().Errorf("Expecting a closing bracket in %s", raw)
return nil
}
actRange := Action{Action: "range"}
field := raw[0:braPos]
actRange.Arguments = append(actRange.Arguments, &Action{Action: field})
if len(raw)-braPos > 2 {
content := raw[braPos+1 : len(raw)-1]
dotPos := strings.Index(content, "..")
if dotPos == -1 {
actRange.Arguments = append(
actRange.Arguments, &Action{Action: content})
} else {
start := content[0:dotPos]
end := content[dotPos+2:]
actRange.Arguments = append(
actRange.Arguments,
&Action{Action: start}, &Action{Action: end})
}
}
return &actRange
}
func makeFieldAssign(action *Action, rem string) {
arrowStart := strings.Index(rem, "->")
if arrowStart == -1 {
logging.GetLogger().Errorf(
"Expecting an arrow in action %s:%s", action.Action, rem)
} else {
arg1 := rem[0:arrowStart]
arg2 := rem[arrowStart+2:]
switch action.Action {
case kwLoad:
action.Arguments = append(
action.Arguments,
&Action{Action: arg1}, makeArg(arg2))
case kwMove:
action.Arguments = append(
action.Arguments,
makeArg(arg1), makeArg(arg2))
case kwSetField:
slashPos := strings.Index(arg1, "/")
if slashPos == -1 {
action.Arguments = append(
action.Arguments,
&Action{Action: arg1}, nil, makeArg(arg2))
} else {
arg11 := arg1[0:slashPos]
arg12 := arg1[slashPos+1:]
action.Arguments = append(
action.Arguments,
&Action{Action: arg11}, &Action{Action: arg12},
makeArg(arg2))
}
}
}
}
func makeAction(raw string) *Action {
actionSep := strings.IndexAny(raw, ":(")
if actionSep == -1 {
return makeArg(raw)
}
key := raw[0:actionSep]
action := Action{Action: key}
rem := raw[actionSep+1:]
if raw[actionSep] == ':' {
switch key {
case kwSetField, kwLoad, kwMove:
makeFieldAssign(&action, rem)
case "enqueue":
// This syntax is not consistent, poor choice of ovs
// enqueue(port, queue) should be preferred.
colonPos := strings.Index(rem, ":")
if colonPos == -1 {
// Probably should never happens
action.Arguments = append(action.Arguments, makeArg(rem))
} else {
port := rem[0:colonPos]
queue := rem[colonPos+1:]
action.Arguments = append(
action.Arguments,
&Action{Action: port}, &Action{Action: queue})
}
default:
action.Arguments = append(action.Arguments, makeArg(rem))
}
} else {
s := NewStream(strings.NewReader(rem))
err := s.ParseActionBody(&action, true)
if key == "learn" {
fixLearnArguments(&action)
}
if err != nil {
logging.GetLogger().Errorf("Parsing arguments of %s: %s", raw, err)
}
}
return &action
}
// Learn is a strange beast because some of the k=v arguments are not named
// arguments but field assignment. We need to transform them back in real
// actions and this can only be done if we have a dictionary of named
// arguments for learn.
func fixLearnArguments(action *Action) {
for i, arg := range action.Arguments {
switch arg.Key {
case "", "idle_timeout", "hard_timeout", kwPriority, "cookie",
"fin_idle_timeout", "fin_hard_timeout", kwTable, "limit",
"result_dst":
continue
default:
actAssign := Action{
Action: "=",
Arguments: []*Action{makeArg(arg.Key), arg},
}
arg.Key = ""
action.Arguments[i] = &actAssign
}
}
}
// ParseActionBody reads the arguments of an action using parenthesis.
func (s *Stream) ParseActionBody(act *Action, isFirst bool) error {
tok, v := s.scan()
if tok == tText {
var a *Action
tok1, v1 := s.scan()
if tok1 != tEqual {
s.unscan(tok1, v1)
a = makeAction(v)
} else {
tok2, v2 := s.scan()
if tok2 == tText {
a = makeAction(v2)
a.Key = v
} else {
return errors.New("expecting argument after equal")
}
}
act.Arguments = append(act.Arguments, a)
tok4, _ := s.scan()
if tok4 == tComma {
return s.ParseActionBody(act, false)
} else if tok4 == tClosePar {
return nil
} else {
return errors.New("expecting comma or closing par")
}
} else if tok == tComma {
act.Arguments = append(act.Arguments, nil)
return s.ParseActionBody(act, false)
} else if tok == tClosePar {
if !isFirst {
act.Arguments = append(act.Arguments, nil)
}
return nil
}
return errors.New("expecting argument or argument separator")
}
// ParseRuleAction implements the state of the parser while reading an action
// list. We only expect text separated by commas and ending of EOF.
func (s *Stream) ParseRuleAction(result *JSONRule) error {
tok, _ := s.scan()
if tok == tEOF {
return nil
} else if tok == tComma {
tok, val := s.scan()
if tok == tText {
result.Actions = append(result.Actions, makeAction(val))
return s.ParseRuleAction(result)
}
return errors.New("parseRuleAction: expecting an action after comma")
}
return errors.New("parseRuleAction: expecting a comma or eof")
}
// ParseGroup is the main entry point for the group parser.
func (s *Stream) ParseGroup(result *JSONGroup) error {
tok, lhs := s.scan()
if tok == tSpace {
tok, lhs = s.scan()
}
if tok == tText {
return s.parseGroupEq(result, lhs)
}
if tok == tEOF && len(result.Buckets) > 0 {
return nil
}
return fmt.Errorf("expecting id or eof, got %s", TokenNames[tok])
}
func (s *Stream) parseGroupSep(result *JSONGroup) error {
tok, _ := s.scan()
if tok == tComma {
tok2, lhs := s.scan()
if tok2 == tText {
return s.parseGroupEq(result, lhs)
}
return fmt.Errorf("expecting key after comma, got %s", TokenNames[tok])
}
if tok == tEOF {
return nil
}
return fmt.Errorf("expecting comma or eof, got %s", TokenNames[tok])
}
func (s *Stream) parseGroupEq(result *JSONGroup, lhs string) error {
tok, v := s.scan()
var rhs string
if tok != tEqual {
s.unscan(tok, v)
rhs = ""
} else {
if lhs == kwBucket {
bucket := &Bucket{}
if err := s.parseGroupBucket(bucket); err != nil {
return err
}
result.Buckets = append(result.Buckets, bucket)
return s.ParseGroup(result)
}
tok, rhs = s.scan()
if tok != tText {
return fmt.Errorf("expecting rhs of equal, got %s", TokenNames[tok])
}
}
switch lhs {
case kwGroupID:
v, err := strconv.ParseUint(rhs, 0, 32)
result.GroupID = uint(v)
if err != nil {
logging.GetLogger().Errorf(
"Cannot parse group_id in openflow group: %s", rhs)
}
case kwType:
result.Type = rhs
default:
result.Meta = append(result.Meta, &Meta{Key: lhs, Value: rhs})
}
return s.parseGroupSep(result)
}
func (s *Stream) parseGroupBucket(result *Bucket) error {
for {
tok, v := s.scan()
if tok != tText {
return fmt.Errorf("expecting id in pair, got %s", TokenNames[tok])
}
switch v {
case kwActions:
tok, _ = s.scan()
if tok != tEqual {
return fmt.Errorf("Expecting =, got %s", TokenNames[tok])
}
return s.parseGroupBucketActions(result)
case kwBucket:
s.unscan(tok, v)
return nil
default:
if err := parseMetaBucket(result, v); err != nil {
return err
}
}
tok, _ = s.scan()
if tok == tEOF {
return nil
}
if tok != tComma {
return fmt.Errorf(
"cannot parse bucket, expecting comma or eof, got: %s",
TokenNames[tok])
}
}
}
func parseMetaBucket(result *Bucket, v string) error {
colonPos := strings.Index(v, ":")
if colonPos == -1 {
result.Meta = append(result.Meta, &Meta{Key: v, Value: ""})
} else {
lhs := v[0:colonPos]
rhs := v[colonPos+1:]
if lhs == kwBucketID {
bID, err := strconv.ParseUint(rhs, 0, 32)
if err != nil {
return fmt.Errorf("Cannot parse bucket id: %s", rhs)
}
result.ID = uint(bID)
} else {
result.Meta = append(result.Meta, &Meta{Key: v, Value: ""})
}
}
return nil
}
func (s *Stream) parseGroupBucketActions(result *Bucket) error {
for {
tok, v := s.scan()
if tok != tText {
return fmt.Errorf("expecting id, got %s", TokenNames[tok])
}
if v == kwBucket {
s.unscan(tok, v)
return nil
}
result.Actions = append(result.Actions, makeAction(v))
tok, _ = s.scan()
if tok == tComma {
continue
} else if tok == tEOF {
return nil
} else {
return fmt.Errorf("expecting comma or eof, got %s", TokenNames[tok])
}
}
}
// ToAST transforms a string representing an openflow rule in an
// abstract syntax tree of the rule
func ToAST(rule string) (*JSONRule, error) {
stream := NewStream(strings.NewReader(rule))
var result JSONRule
if err := stream.ParseRule(&result); err != nil {
return nil, err
}
return &result, nil
}
// ToASTGroup transforms a string representing an openflow rule in an
// abstract syntax tree of the rule
func ToASTGroup(group string) (*JSONGroup, error) {
stream := NewStream(strings.NewReader(group))
var result JSONGroup
if err := stream.ParseGroup(&result); err != nil {
return nil, err
}
return &result, nil
}
// ToJSON transforms a string representing an openflow rule in a string
// that is the encoding of the rule.got
func ToJSON(rule string) (string, error) {
ast, err := ToAST(rule)
if err != nil {
return "", err
}
jsBytes, err := json.Marshal(ast)
if err != nil {
return "", fmt.Errorf("cannot jsonify: %s", rule)
}
return string(jsBytes), nil
}
// ToJSONGroup transforms a string representing an openflow group in a string
// that is the encoding of the group.
func ToJSONGroup(group string) (string, error) {
ast, err := ToASTGroup(group)
if err != nil {
return "", err
}
jsBytes, err := json.Marshal(ast)
if err != nil {
return "", fmt.Errorf("cannot jsonify: %s", group)
}
return string(jsBytes), nil
}
func writeAction(s *bytes.Buffer, a *Action) {
if a == nil {
return
}
if a.Key != "" {
s.WriteString(a.Key) // nolint: gas
s.Write([]byte("=")) // nolint: gas
}
if a.Action == "range" {
writeAction(s, a.Arguments[0])
s.Write([]byte("[")) // nolint: gas
switch len(a.Arguments) {
case 2:
writeAction(s, a.Arguments[1])
case 3:
writeAction(s, a.Arguments[1])
s.Write([]byte("..")) // nolint: gas
writeAction(s, a.Arguments[2])
}
s.Write([]byte("]")) // nolint: gas
return
}
if a.Action == "=" {
writeAction(s, a.Arguments[0])
// Design decision not to choose = as ovs which is pretty confusing.
s.Write([]byte(":=")) // nolint: gas
writeAction(s, a.Arguments[1])
return
}
s.WriteString(a.Action) // nolint: gas
if len(a.Arguments) > 0 {
s.Write([]byte("(")) // nolint: gas
for i, arg := range a.Arguments {
if i > 0 {
s.Write([]byte(",")) // nolint: gas
}
writeAction(s, arg)
}
s.Write([]byte(")")) // nolint: gas
}
}
// PrettyAST gives back a string from a AST
//
// The syntax is close to the one used by OVS but without the quirks.
// The most significant differences are: move, load, set_field, enqueue
// (as regular actions) and fields in learn actions (using := instead of =).
func PrettyAST(ast *JSONRule) string {
// TODO: use string buffer when go minimal version bumps to 1.10
var s bytes.Buffer
s.Write([]byte("cookie=0x"))
s.WriteString(strconv.FormatUint(ast.Cookie, 16))
s.Write([]byte(", table="))
s.WriteString(strconv.Itoa(ast.Table))
s.Write([]byte(", "))
for _, meta := range ast.Meta {
s.WriteString(meta.Key)
if meta.Value != "" {
s.Write([]byte("="))
s.WriteString(meta.Value)
}
s.Write([]byte(", "))
}
s.Write([]byte("priority="))
s.WriteString(strconv.Itoa(ast.Priority))
for _, filter := range ast.Filters {
s.Write([]byte(","))
s.WriteString(filter.Key)
if filter.Value != "" {
s.Write([]byte("="))
s.WriteString(filter.Value)
if filter.Mask != "" {
s.Write([]byte("/"))
s.WriteString(filter.Mask)
}
}
}
s.Write([]byte(" actions="))
for i, action := range ast.Actions {
if i > 0 {
s.Write([]byte(","))
}
writeAction(&s, action)
}
return s.String()
}
// PrettyASTGroup gives back a string from a AST
//
// The syntax is close to the one used by OVS but without the quirks.
func PrettyASTGroup(ast *JSONGroup) string {
// TODO: use string buffer when go minimal version bumps to 1.10
var s bytes.Buffer
s.Write([]byte("group_id="))
s.WriteString(strconv.FormatUint(uint64(ast.GroupID), 10))
s.Write([]byte(", type="))
s.WriteString(ast.Type)
for _, meta := range ast.Meta {
s.Write([]byte(", "))
s.WriteString(meta.Key)
if meta.Value != "" {
s.Write([]byte("="))
s.WriteString(meta.Value)
}
}
for _, bucket := range ast.Buckets {
s.Write([]byte(", bucket=bucket_id:"))
s.WriteString(strconv.FormatUint(uint64(bucket.ID), 10))
for _, meta := range bucket.Meta {
s.Write([]byte(","))
s.WriteString(meta.Key)
if meta.Value != "" {
s.Write([]byte(":"))
s.WriteString(meta.Value)
}
}
s.Write([]byte(",actions="))
for i, action := range bucket.Actions {
if i > 0 {
s.Write([]byte(","))
}
writeAction(&s, action)
}
}
return s.String()
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="utf-8"?>
<!--
#
# %CopyrightBegin%
#
# Copyright Ericsson AB 2009-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# %CopyrightEnd%
-->
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exsl="http://exslt.org/common"
xmlns:func="http://exslt.org/functions"
xmlns:erl="http://erlang.org"
extension-element-prefixes="exsl func"
xmlns:fn="http://www.w3.org/2005/02/xpath-functions">
<xsl:include href="db_html_params.xsl"/>
<xsl:include href="db_funcs.xsl"/>
<func:function name="erl:flip_first_char">
<xsl:param name="in"/>
<xsl:variable name="uppercase" select="'ABCDEFGHIJKLMNOPQRSTUVWXYZ'"/>
<xsl:variable name="lowercase" select="'abcdefghijklmnopqrstuvwxyz'"/>
<xsl:variable name="first-char" select="substring($in, 1, 1)"/>
<xsl:variable name="result">
<xsl:choose>
<xsl:when test="contains($uppercase, $first-char)">
<xsl:value-of select="concat(translate($first-char, $uppercase, $lowercase), substring($in, 2))"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="concat(translate($first-char, $lowercase, $uppercase), substring($in, 2))"/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<func:result select="$result"/>
</func:function>
<func:function name="erl:lower-case">
<xsl:param name="str"/>
<xsl:variable name="uppercase" select="'ABCDEFGHIJKLMNOPQRSTUVWXYZ'"/>
<xsl:variable name="lowercase" select="'abcdefghijklmnopqrstuvwxyz'"/>
<xsl:variable name="result">
<xsl:value-of select="translate($str, $uppercase, $lowercase)"/>
</xsl:variable>
<func:result select="$result"/>
</func:function>
<func:function name="erl:to-link">
<xsl:param name="text"/>
<func:result select="translate(erl:lower-case($text),'?: /()" ','--------')"/>
</func:function>
<!-- Used from template menu.funcs to sort a module's functions for the lefthand index list,
from the module's .xml file. Returns a value on which to sort the entity in question
(a <name> element).
Some functions are listed with the name as an attribute, as in string.xml:
<name name="join" arity="2"/>
Others use the element value for the name, as in gen_server.xml:
<name>start_link(Module, Args, Options) -> Result</name>
Additionally, callbacks may be included, as in gen_server.xml:
<name>Module:handle_call(Request, From, State) -> Result</name>
For C reference pages the name tag has a substructure where the nametext tag
is used in the sort, as in erl_nif.xml
<name><ret>void *</ret><nametext>enif_alloc(size_t size)</nametext></name>
So first, get the name from either the attribute or the element value.
Then, reverse the case of the first character. This is because xsltproc, used for processing,
orders uppercase before lowercase (even when the 'case-order="lower-first"' option
is given). But we want the Module callback functions listed after a module's regular
functions, as they are now. This doesn't affect the actual value used in the output, but
just the value used as a sort key. To then ensure that uppercase is indeed sorted before
lower, as we now want it to be, the 'case-order="upper-first"' option is used.
This processing only affect the lefthand index list- the body of the doc page is not
affected.
-->
<func:function name="erl:get_sort_field">
<xsl:param name="elem"/>
<xsl:variable name="base">
<xsl:choose>
<xsl:when test="ancestor::cref">
<xsl:value-of select="$elem/nametext"/>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="string-length($elem/@name) > 0">
<xsl:value-of select="$elem/@name"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="substring-before($elem, '(')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<func:result select="erl:flip_first_char($base)"/>
</func:function>
<!-- Start of Dialyzer type/spec tags.
See also the templates matching "name" and "seealso" as well as
the template "menu.funcs"
-->
<xsl:param name="specs_file" select="''"/>
<xsl:variable name="i" select="document($specs_file)"></xsl:variable>
<xsl:param name="mod2app_file" select="''"/>
<xsl:variable name="m2a" select="document($mod2app_file)"></xsl:variable>
<xsl:key name="mod2app" match="module" use="@name"/>
<xsl:key
name="mfa"
match="func/name[string-length(@arity) > 0 and ancestor::erlref]"
use="concat(ancestor::erlref/module,':',@name, '/', @arity)"/>
<xsl:template name="err">
<xsl:param name="f"/>
<xsl:param name="m"/>
<xsl:param name="n"/>
<xsl:param name="a"/>
<xsl:param name="s"/>
<xsl:message terminate="yes">
Error <xsl:if test="$f != ''">in <xsl:value-of select ="$f"/>:</xsl:if>
<xsl:if test="$m != ''"><xsl:value-of select ="$m"/>:</xsl:if>
<xsl:value-of select="$n"/>
<xsl:if test="$a != ''">/<xsl:value-of
select ="$a"/></xsl:if>: <xsl:value-of select="$s"/>
</xsl:message>
</xsl:template>
<xsl:template name="find_spec">
<xsl:variable name="curModule" select="ancestor::erlref/module"/>
<xsl:variable name="mod" select="@mod"/>
<xsl:variable name="name" select="@name"/>
<xsl:variable name="arity" select="@arity"/>
<xsl:variable name="clause_i" select="@clause_i"/>
<xsl:variable name="spec0" select=
"$i/specs/module[@name=$curModule]/spec
[name=$name and arity=$arity
and (string-length($mod) = 0 or module = $mod)]"/>
<xsl:variable name="spec" select="$spec0[string-length($clause_i) = 0
or position() = $clause_i]"/>
<xsl:if test="count($spec) != 1">
<xsl:variable name="why">
<xsl:choose>
<xsl:when test="count($spec) > 1">ambiguous spec</xsl:when>
<xsl:when test="count($spec) = 0">unknown spec</xsl:when>
</xsl:choose>
</xsl:variable>
<xsl:call-template name="err">
<xsl:with-param name="f" select="$curModule"/>
<xsl:with-param name="m" select="$mod"/>
<xsl:with-param name="n" select="$name"/>
<xsl:with-param name="a" select="$arity"/>
<xsl:with-param name="s" select="$why"/>
</xsl:call-template>
</xsl:if>
<xsl:copy-of select="$spec"/>
</xsl:template>
<xsl:template name="spec_name">
<xsl:variable name="name" select="@name"/>
<xsl:variable name="arity" select="@arity"/>
<xsl:variable name="anchor" select="@anchor"/>
<xsl:variable name="since" select="@since"/>
<xsl:variable name="spec0">
<xsl:call-template name="find_spec"/>
</xsl:variable>
<xsl:variable name="spec" select="exsl:node-set($spec0)/spec"/>
<xsl:choose>
<xsl:when test="ancestor::cref">
<xsl:message terminate="yes">
Error: did not expect a 'name' tag with name/arity attributes here!
</xsl:message>
</xsl:when>
<xsl:when test="ancestor::erlref">
<!-- Do not to use preceding since it is very slow! -->
<xsl:variable name="curModule" select="ancestor::erlref/module"/>
<xsl:variable name="mfas"
select="key('mfa',
concat($curModule,':',$name,'/',$arity))"/>
<xsl:choose>
<xsl:when test="generate-id($mfas[1]) != generate-id(.)">
<!-- Avoid duplicated anchors. See also menu.funcs. -->
</xsl:when>
<xsl:otherwise>
<a name="{$name}-{$arity}"></a>
</xsl:otherwise>
</xsl:choose>
<!-- Insert an anchor for "anchor" attribute -->
<xsl:if test="string-length($anchor) > 0">
<a name="{$anchor}"></a>
</xsl:if>
<xsl:variable name="global_types" select="ancestor::erlref/datatypes"/>
<xsl:variable name="local_types"
select="../type[string-length(@name) > 0]"/>
<xsl:apply-templates select="$spec/contract/clause/head">
<xsl:with-param name="ghlink" select="ancestor-or-self::*[@ghlink]/@ghlink"/>
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
<xsl:with-param name="since" select="$since"/>
</xsl:apply-templates>
</xsl:when>
</xsl:choose>
</xsl:template>
<xsl:template match="head">
<xsl:param name="ghlink"/>
<xsl:param name="local_types"/>
<xsl:param name="global_types"/>
<xsl:param name="since"/>
<xsl:variable name="id" select="concat(concat(concat(concat(../../../name,'-'),../../../arity),'-'),generate-id(.))"/>
<table class="func-table">
<tr class="func-tr">
<td class="func-td">
<div class="bold_code func-head"
onMouseOver="document.getElementById('ghlink-{$id}').style.visibility = 'visible';"
onMouseOut="document.getElementById('ghlink-{$id}').style.visibility = 'hidden';">
<xsl:call-template name="ghlink">
<xsl:with-param name="ghlink" select="$ghlink"/>
<xsl:with-param name="id" select="$id"/>
</xsl:call-template>
<xsl:apply-templates mode="local_type">
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
</xsl:apply-templates>
</div>
</td>
<td class="func-since-td">
<xsl:if test="string-length($since) > 0">
<span class="since"><xsl:value-of select="$since"/>
</span>
</xsl:if>
</td>
</tr>
</table>
</xsl:template>
<!-- The *last* <name name="..." arity=".."/> -->
<xsl:template match="name" mode="types">
<xsl:variable name="name" select="@name"/>
<xsl:variable name="arity" select="@arity"/>
<xsl:variable name="spec0">
<xsl:call-template name="find_spec"/>
</xsl:variable>
<xsl:variable name="spec" select="exsl:node-set($spec0)/spec"/>
<xsl:variable name="clause" select="$spec/contract/clause"/>
<xsl:variable name="global_types" select="ancestor::erlref/datatypes"/>
<xsl:variable name="type_desc" select="../type_desc"/>
<!-- $type is data types to be presented as guards ("local types") -->
<xsl:variable name="type"
select="../type[string-length(@name) > 0
or string-length(@variable) > 0]"/>
<xsl:variable name="type_variables"
select ="$type[string-length(@variable) > 0]"/>
<xsl:variable name="local_types"
select ="$type[string-length(@name) > 0]"/>
<xsl:variable name="output_subtypes" select="count($type_variables) = 0"/>
<!-- It is assumed there is no support for overloaded specs
(there is no spec with more than one clause) -->
<xsl:if test="count($clause/guard) > 0 or count($type) > 0">
<div class="REFBODY fun-types">
<h3 class="func-types-title">Types</h3>
<xsl:choose>
<xsl:when test="$output_subtypes">
<xsl:call-template name="subtype">
<xsl:with-param name="subtype" select="$clause/guard/subtype"/>
<xsl:with-param name="type_desc" select="$type_desc"/>
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="type_variables">
<xsl:with-param name="type_variables" select="$type_variables"/>
<xsl:with-param name="type_desc" select="$type_desc"/>
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
<xsl:with-param name="fname" select="$name"/>
<xsl:with-param name="arity" select="$arity"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
<xsl:call-template name="local_type">
<xsl:with-param name="type_desc" select="$type_desc"/>
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
</xsl:call-template>
</div>
</xsl:if>
</xsl:template>
<!-- Handle <type variable="..." name_i="..."/> -->
<xsl:template name="type_variables">
<xsl:param name="type_variables"/>
<xsl:param name="type_desc"/>
<xsl:param name="local_types"/>
<xsl:param name="global_types"/>
<xsl:param name="fname"/>
<xsl:param name="arity"/>
<xsl:variable name="names" select="../name[string-length(@arity) > 0]"/>
<xsl:for-each select="$type_variables">
<xsl:variable name="name_i">
<xsl:choose>
<xsl:when test="string-length(@name_i) > 0">
<xsl:value-of select="@name_i"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="count($names)"/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="spec0">
<xsl:for-each select="$names[position() = $name_i]">
<xsl:call-template name="find_spec"/>
</xsl:for-each>
</xsl:variable>
<xsl:variable name="spec" select="exsl:node-set($spec0)/spec"/>
<xsl:variable name="clause" select="$spec/contract/clause"/>
<xsl:variable name="variable" select="@variable"/>
<xsl:variable name="subtype"
select="$clause/guard/subtype[typename = $variable]"/>
<xsl:if test="count($subtype) = 0">
<xsl:call-template name="err">
<xsl:with-param name="f" select="ancestor::erlref/module"/>
<xsl:with-param name="n" select="$fname"/>
<xsl:with-param name="a" select="$arity"/>
<xsl:with-param name="s">unknown type variable <xsl:value-of select="$variable"/>
</xsl:with-param>
</xsl:call-template>
</xsl:if>
<xsl:call-template name="subtype">
<xsl:with-param name="subtype" select="$subtype"/>
<xsl:with-param name="type_desc" select="$type_desc"/>
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
</xsl:call-template>
</xsl:for-each>
</xsl:template>
<xsl:template name="subtype">
<xsl:param name="subtype"/>
<xsl:param name="type_desc"/>
<xsl:param name="local_types"/>
<xsl:param name="global_types"/>
<xsl:for-each select="$subtype">
<xsl:variable name="tname" select="typename"/>
<div class="REFTYPES rt-1">
<span class="bold_code bc-2">
<xsl:apply-templates select="string" mode="local_type">
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
</xsl:apply-templates>
</span>
</div>
<xsl:apply-templates select="$type_desc[@variable = $tname]"/>
</xsl:for-each>
</xsl:template>
<xsl:template name="local_type">
<xsl:param name="type_desc"/>
<xsl:param name="local_types"/>
<xsl:param name="global_types"/>
<xsl:for-each select="$local_types">
<div class="REFTYPES rt-2">
<xsl:call-template name="type_name">
<xsl:with-param name="mode" select="'local_type'"/>
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
</xsl:call-template>
</div>
<xsl:variable name="tname" select="@name"/>
<xsl:variable name="tnvars" select="@n_vars"/>
<xsl:apply-templates select=
"$type_desc[@name = $tname
and (@n_vars = $tnvars
or string-length(@n_vars) = 0 and
string-length($tnvars) = 0)]"/>
</xsl:for-each>
</xsl:template>
<!-- Note: <type_desc> has not been implemented for data types. -->
<!-- Similar to <d> -->
<xsl:template match="type_desc">
<div class="REFBODY rb-1">
<xsl:apply-templates/>
</div>
</xsl:template>
<!-- This is for debugging. All modules! -->
<xsl:template match="all_etypes">
<xsl:for-each select= "$i//type">
<pre>
<span class="bold_code bc-3">
<xsl:apply-templates select="typedecl"/>
</span><xsl:text>
</xsl:text>
</pre>
</xsl:for-each>
</xsl:template>
<!-- Datatypes -->
<xsl:template match="datatypes">
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Data Types</xsl:with-param>
</xsl:call-template>
<xsl:apply-templates/>
</div>
</xsl:template>
<!-- Datatype Title, is the really needed? not used by anything -->
<xsl:template match="datatype_title">
<xsl:variable name="title" select="."/>
<h4>
<xsl:call-template name="title_link">
<xsl:with-param name="title"><xsl:apply-templates/></xsl:with-param>
<xsl:with-param name="link" select="$title"/>
</xsl:call-template>
</h4>
</xsl:template>
<!-- Datatype -->
<xsl:template match="datatype">
<div class="data-types-body">
<xsl:choose>
<xsl:when test="string-length(name/@name) > 0">
<xsl:variable name="id" select="concat('type-',name/@name)"/>
<div class="data-type-name"
onMouseOver="document.getElementById('ghlink-{$id}').style.visibility = 'visible';"
onMouseOut="document.getElementById('ghlink-{$id}').style.visibility = 'hidden';">
<xsl:call-template name="ghlink">
<xsl:with-param name="id" select="$id"/>
</xsl:call-template>
<xsl:apply-templates select="name"/>
</div>
</xsl:when>
<xsl:otherwise>
<div class="data-type-name">
<xsl:apply-templates select="name"/>
</div>
</xsl:otherwise>
</xsl:choose>
<div class="data-type-desc"><xsl:apply-templates select="desc"/></div>
</div>
</xsl:template>
<!-- The "mode" attribute of apply has been used to separate the case
when datatypes are copied into specifications' subtypes.
A local type has no anchor. There are no links to local types
from local types or guards/head of the same specification.
-->
<xsl:template name="type_name">
<xsl:param name="mode"/> <!-- '' if <datatype> -->
<xsl:param name="local_types" select="/.."/>
<xsl:param name="global_types" select="/.."/>
<xsl:variable name="curModule" select="ancestor::erlref/module"/>
<xsl:variable name="mod" select="@mod"/>
<xsl:variable name="name" select="@name"/>
<xsl:variable name="n_vars" select="@n_vars"/>
<xsl:choose>
<xsl:when test="string-length($name) > 0">
<xsl:variable name="type" select=
"$i/specs/module[@name=$curModule]/type
[name=$name
and (string-length($n_vars) = 0 or n_vars = $n_vars)
and (string-length($mod) = 0 or module = $mod)]"/>
<xsl:if test="count($type) != 1">
<xsl:variable name="why">
<xsl:choose>
<xsl:when test="count($type) > 1">ambiguous type</xsl:when>
<xsl:when test="count($type) = 0">unknown type</xsl:when>
</xsl:choose>
</xsl:variable>
<xsl:call-template name="err">
<xsl:with-param name="f" select="$curModule"/>
<xsl:with-param name="m" select="$mod"/>
<xsl:with-param name="n" select="$name"/>
<xsl:with-param name="a" select="$n_vars"/>
<xsl:with-param name="s" select="$why"/>
</xsl:call-template>
</xsl:if>
<xsl:choose>
<xsl:when test="$mode = ''">
<xsl:apply-templates select="$type/typedecl"/>
</xsl:when>
<xsl:when test="$mode = 'local_type'">
<xsl:apply-templates select="$type/typedecl" mode="local_type">
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
</xsl:apply-templates>
</xsl:when>
</xsl:choose>
</xsl:when>
<xsl:otherwise> <!-- <datatype> with <name> -->
<xsl:call-template name="name"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="typehead">
<span class="bold_code bc-4">
<xsl:apply-templates/>
</span><br/>
</xsl:template>
<xsl:template match="typehead" mode="local_type">
<xsl:param name="local_types"/>
<xsl:param name="global_types"/>
<span class="bold_code bc-5">
<xsl:apply-templates mode="local_type">
<xsl:with-param name="local_types" select="$local_types"/>
<xsl:with-param name="global_types" select="$global_types"/>
</xsl:apply-templates>
</span><br/>
</xsl:template>
<!-- Not used right now -->
<!-- local_defs -->
<xsl:template match="local_defs">
<div class="REFBODY rb-2">
<xsl:apply-templates>
</xsl:apply-templates>
</div>
</xsl:template>
<!-- Not used right now -->
<xsl:template match="local_def">
<div class="REFTYPES rt-3">
<span class="bold_code bc-6">
<xsl:apply-templates/>
</span>
</div>
</xsl:template>
<!-- Used both in <datatype> and in <func>! -->
<xsl:template match="anno">
<xsl:variable name="curModule" select="ancestor::erlref/module"/>
<xsl:variable name="anno" select="normalize-space(text())"/>
<xsl:variable name="namespec"
select="ancestor::type_desc/preceding-sibling::name
| ancestor::desc/preceding-sibling::name"/>
<xsl:if test="count($namespec) = 0 and string-length($specs_file) > 0">
<xsl:call-template name="err">
<xsl:with-param name="f" select="$curModule"/>
<xsl:with-param name="s">cannot find tag 'name' (anno <xsl:value-of select="$anno"/>)
</xsl:with-param>
</xsl:call-template>
</xsl:if>
<!-- Search "local types" as well -->
<xsl:variable name="local_types"
select="ancestor::desc/preceding-sibling::type
[string-length(@name) > 0]
| ancestor::type_desc/preceding-sibling::type
[string-length(@name) > 0]"/>
<xsl:variable name="has_anno_in_local_type">
<xsl:for-each select="$local_types">
<xsl:call-template name="anno_name">
<xsl:with-param name="curModule" select="$curModule"/>
<xsl:with-param name="anno" select="$anno"/>
</xsl:call-template>
</xsl:for-each>
</xsl:variable>
<xsl:variable name="has_anno">
<xsl:for-each select="$namespec">
<xsl:call-template name="anno_name">
<xsl:with-param name="curModule" select="$curModule"/>
<xsl:with-param name="anno" select="$anno"/>
</xsl:call-template>
</xsl:for-each>
</xsl:variable>
<xsl:if test="$has_anno = '' and $has_anno_in_local_type = ''">
<xsl:call-template name="err">
<xsl:with-param name="f" select="$curModule"/>
<xsl:with-param name="m" select="$namespec/@mod"/>
<xsl:with-param name="n" select="$namespec/@name"/>
<xsl:with-param name="a" select="'-'"/>
<xsl:with-param name="s">unknown annotation <xsl:value-of select="$anno"/>
</xsl:with-param>
</xsl:call-template>
</xsl:if>
<xsl:value-of select="$anno"/>
</xsl:template>
<xsl:template name="anno_name">
<xsl:param name="curModule"/>
<xsl:param name="anno"/>
<xsl:variable name="mod" select="@mod"/>
<xsl:variable name="name" select="@name"/>
<xsl:variable name="arity" select="@arity"/>
<xsl:variable name="n_vars" select="@n_vars"/>
<xsl:variable name="clause_i" select="@clause_i"/>
<xsl:variable name="spec0" select=
"$i/specs/module[@name=$curModule]/spec
[name=$name and arity=$arity
and (string-length($mod) = 0 or module = $mod)]"/>
<xsl:variable name="spec_annos" select=
"$spec0[string-length($clause_i) = 0
or position() = $clause_i]/anno[.=$anno]"/>
<xsl:variable name="type_annos" select=
"$i/specs/module[@name=$curModule]/type
[name=$name
and (string-length($n_vars) = 0 or n_vars=$n_vars)
and (string-length($mod) = 0 or module = $mod)]/anno[.=$anno]"/>
<xsl:if test="count($spec_annos) != 0
or count($type_annos) != 0
or string-length($specs_file) = 0">
<xsl:value-of select="true()"/>
</xsl:if>
</xsl:template>
<!-- Used for indentation of formatted types and specs -->
<xsl:template match="nbsp">
<xsl:text> </xsl:text>
</xsl:template>
<xsl:template match="nbsp" mode="local_type">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="br" mode="local_type">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="marker" mode="local_type">
<xsl:param name="local_types"/>
<xsl:param name="global_types"/>
<!-- Craete no anchor -->
<!-- It would be possible to create a link to the global type
(if there is one), but that would mean even more code...
-->
<xsl:apply-templates/>
</xsl:template>
<!-- Does not look at @n_vars -->
<xsl:template match="node()[starts-with(name(), 'see')]" mode="local_type">
<xsl:param name="local_types"/>
<xsl:param name="global_types"/>
<xsl:variable name="filepart"><xsl:value-of select="substring-before(@marker, '#')"/></xsl:variable>
<xsl:variable name="linkpart"><xsl:value-of select="translate(substring-after(@marker, '#'), '/', '-')"/></xsl:variable>
<xsl:choose>
<xsl:when test="string-length($filepart) > 0">
<xsl:call-template name="seealso"/>
</xsl:when>
<xsl:when test="count($local_types[@name = $linkpart]) = 0">
<xsl:call-template name="seealso"/>
</xsl:when>
<xsl:when test="count($global_types/datatype/name[@name = $linkpart]) > 0">
<!-- The type is both local and global; link to the global type -->
<xsl:call-template name="seealso"/>
</xsl:when>
<xsl:otherwise>
<!-- No link to local type -->
<xsl:apply-templates/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- End of Dialyzer type/spec tags -->
<!-- Cache for each module all the elements used for navigation. -->
<xsl:variable name="erlref.nav" select="exsl:node-set($erlref.nav_rtf)"/>
<xsl:variable name="erlref.nav_rtf">
<xsl:for-each select="//erlref">
<xsl:variable name="cval" select="module"/>
<xsl:variable name="link_cval"><xsl:value-of select="translate($cval, '­', '')"/></xsl:variable>
<module name="{$cval}">
<xsl:call-template name="menu.funcs">
<xsl:with-param name="entries" select="funcs/func/name"/>
<xsl:with-param name="cval" select="$cval"/>
<xsl:with-param name="basename" select="$link_cval"/>
</xsl:call-template>
</module>
</xsl:for-each>
</xsl:variable>
<!-- Page layout -->
<xsl:template name="pagelayout">
<xsl:param name="chapnum"/>
<xsl:param name="curModule"/>
<html>
<head>
<xsl:choose>
<xsl:when test="string-length($stylesheet) > 0">
<link rel="stylesheet" href="{$topdocdir}/{$stylesheet}" type="text/css"/>
</xsl:when>
<xsl:otherwise>
<link rel="stylesheet" href="{$topdocdir}/otp_doc.css" type="text/css"/>
</xsl:otherwise>
</xsl:choose>
<xsl:choose>
<xsl:when test="string-length($winprefix) > 0">
<title><xsl:value-of select="$winprefix"/> -- <xsl:value-of select="header/title"/></title>
</xsl:when>
<xsl:otherwise>
<title>Erlang -- <xsl:value-of select="header/title"/></title>
</xsl:otherwise>
</xsl:choose>
</head>
<body>
<div id="container">
<script id="js" type="text/javascript" language="JavaScript" src="{$topdocdir}/js/flipmenu/flipmenu.js"/>
<script id="js2" type="text/javascript" src="{$topdocdir}/js/erlresolvelinks.js"></script>
<script language="JavaScript" type="text/javascript">
<xsl:text disable-output-escaping="yes"><![CDATA[
<!--
function getWinHeight() {
var myHeight = 0;
if( typeof( window.innerHeight ) == 'number' ) {
//Non-IE
myHeight = window.innerHeight;
} else if( document.documentElement && ( document.documentElement.clientWidth ||
document.documentElement.clientHeight ) ) {
//IE 6+ in 'standards compliant mode'
myHeight = document.documentElement.clientHeight;
} else if( document.body && ( document.body.clientWidth || document.body.clientHeight ) ) {
//IE 4 compatible
myHeight = document.body.clientHeight;
}
return myHeight;
}
function setscrollpos() {
var objf=document.getElementById('loadscrollpos');
document.getElementById("leftnav").scrollTop = objf.offsetTop - getWinHeight()/2;
}
function addEvent(obj, evType, fn){
if (obj.addEventListener){
obj.addEventListener(evType, fn, true);
return true;
} else if (obj.attachEvent){
var r = obj.attachEvent("on"+evType, fn);
return r;
} else {
return false;
}
}
addEvent(window, 'load', setscrollpos);
//-->]]></xsl:text>
</script>
<!-- Generate menu -->
<xsl:call-template name="menu">
<xsl:with-param name="chapnum" select="$chapnum"/>
<xsl:with-param name="curModule" select="$curModule"/>
</xsl:call-template>
<div id="content">
<!-- Insert the node-specific content -->
<xsl:call-template name="content">
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:call-template>
<div class="footer">
<hr/>
<p>
<xsl:value-of select="$copyright"/>
<xsl:value-of select="/book/header/copyright/year[1]"/>
<xsl:text>-</xsl:text>
<xsl:value-of select="substring-after(normalize-space(substring-after($gendate, ' ')), ' ')"/>
<xsl:text> </xsl:text>
<xsl:value-of select="/book/header/copyright/holder"/>
</p>
</div>
</div>
</div>
<script type="text/javascript"><xsl:text>window.__otpTopDocDir = '</xsl:text><xsl:value-of select="$topdocdir"/><xsl:text>/js/';</xsl:text></script>
<script type="text/javascript" src="{$topdocdir}/js/highlight.js"/>
</body>
</html>
</xsl:template>
<!-- Content -->
<xsl:template name="content">
<xsl:param name="chapnum"/>
<xsl:variable name="lname"><xsl:value-of select="local-name()"/></xsl:variable>
<div class="innertube">
<xsl:if test="$lname = 'releasenotes'">
<!-- .../part -->
<xsl:call-template name="releasenotes.content" />
</xsl:if>
<xsl:if test="$lname = 'part'">
<!-- .../part -->
<xsl:call-template name="part.content" />
</xsl:if>
<xsl:if test="$lname = 'internal'">
<!-- .../internals -->
<xsl:call-template name="internal.content" />
</xsl:if>
<xsl:if test="$lname = 'chapter'">
<!-- .../part/chapter -->
<xsl:call-template name="chapter.content">
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:call-template>
</xsl:if>
<xsl:if test="$lname = 'application'">
<!-- .../application -->
<xsl:call-template name="app.content" />
</xsl:if>
</div>
<xsl:if test="$lname = 'erlref' or $lname = 'cref' or $lname= 'comref' or $lname= 'fileref' or $lname= 'appref'">
<!-- .../application/*ref -->
<xsl:comment> refpage </xsl:comment>
<xsl:call-template name="ref.content" />
</xsl:if>
</xsl:template>
<!-- Menu -->
<xsl:template name="menu">
<xsl:param name="chapnum"/>
<xsl:param name="curModule"/>
<xsl:if test="(local-name() = 'part') or ((local-name() = 'chapter') and ancestor::part)">
<!-- .../part or .../part/chapter -->
<xsl:call-template name="menu.ug">
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:call-template>
</xsl:if>
<xsl:if test="(local-name() = 'internal' and descendant::chapter) or ((local-name() = 'chapter') and ancestor::internal)">
<!-- .../internal or .../internal/chapter -->
<xsl:call-template name="menu.internal.ug">
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:call-template>
</xsl:if>
<xsl:if test="(local-name() = 'internal' and descendant::erlref) or (((local-name() = 'erlref') or (local-name() = 'comref') or (local-name() = 'cref') or (local-name() = 'fileref') or (local-name() = 'appref')) and ancestor::internal)">
<!-- .../internal,.../internal/erlref, .../internal/comref or .../internal/cref or .../internal/fileref or .../internal/appref -->
<xsl:call-template name="menu.internal.ref">
<xsl:with-param name="curModule" select="$curModule"/>
</xsl:call-template>
</xsl:if>
<xsl:if test="(local-name() = 'application') or (((local-name() = 'erlref') or (local-name() = 'comref') or (local-name() = 'cref') or (local-name() = 'fileref') or (local-name() = 'appref')) and ancestor::application)">
<!-- .../application,.../application/erlref, .../application/comref or .../application/cref or .../application/fileref or .../application/appref -->
<xsl:call-template name="menu.ref">
<xsl:with-param name="curModule" select="$curModule"/>
</xsl:call-template>
</xsl:if>
<xsl:if test="(local-name() = 'releasenotes') or ((local-name() = 'chapter') and ancestor::releasenotes)">
<!-- releasenotes -->
<xsl:call-template name="menu.rn">
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:call-template>
</xsl:if>
</xsl:template>
<xsl:template name="erlang_logo">
<xsl:choose>
<xsl:when test="string-length($logo) > 0">
<div class="erlang-logo-wrapper">
<a href="{$topdocdir}/index.html"><img alt="Erlang Logo" src="{$topdocdir}/{$logo}" class="erlang-logo"/></a>
</div>
</xsl:when>
<xsl:otherwise>
<div class="erlang-logo-wrapper">
<a href="{$topdocdir}/index.html"><img alt="Erlang Logo" src="{$topdocdir}/erlang-logo.png" class="erlang-logo"/></a>
</div>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template name="menu_top">
<ul class="panel-sections">
<xsl:if test="boolean(/book/parts/part)">
<li><a href="users_guide.html">User's Guide</a></li>
</xsl:if>
<xsl:if test="boolean(/book/applications)">
<li><a href="index.html">Reference Manual</a></li>
</xsl:if>
<xsl:if test="boolean(/book/internals)">
<li><a href="internal_docs.html">Internal Documentation</a></li>
</xsl:if>
<xsl:if test="boolean(/book/releasenotes)">
<li><a href="release_notes.html">Release Notes</a></li>
</xsl:if>
<xsl:choose>
<xsl:when test="string-length($pdfname) > 0">
<li><a href="{$pdfdir}/{$pdfname}.pdf">PDF</a></li>
</xsl:when>
<xsl:otherwise>
<li><a href="{$pdfdir}/{$appname}-{$appver}.pdf">PDF</a></li>
</xsl:otherwise>
</xsl:choose>
<li><a href="{$topdocdir}/index.html">Top</a></li>
</ul>
</xsl:template>
<xsl:template name="menu_middle">
<!-- small>
<xsl:choose>
<xsl:when test="ancestor::parts">
<a href="users_guide_bibliography.html">Bibliography</a><br/>
<a href="users_guide_glossary.html">Glossary</a><br/>
</xsl:when>
<xsl:when test="ancestor::applications">
<a href="ref_man_bibliography.html">Bibliography</a><br/>
<a href="ref_man_glossary.html">Glossary</a><br/>
</xsl:when>
</xsl:choose>
</small -->
<ul class="expand-collapse-items">
<li><a href="javascript:openAllFlips()">Expand All</a></li>
<li><a href="javascript:closeAllFlips()">Contract All</a></li>
</ul>
</xsl:template>
<!-- Book -->
<xsl:template match="/book">
<xsl:apply-templates select="parts"/>
<xsl:apply-templates select="applications"/>
<xsl:apply-templates select="internals"/>
<xsl:apply-templates select="releasenotes"/>
</xsl:template>
<!-- Parts -->
<xsl:template match="parts">
<xsl:apply-templates select="part"/>
</xsl:template>
<!-- Applications -->
<xsl:template match="applications">
<xsl:apply-templates select="application"/>
</xsl:template>
<!-- Internals -->
<xsl:template match="internals">
<xsl:apply-templates select="internal"/>
</xsl:template>
<!-- Header -->
<xsl:template match="header"/>
<!-- Section/Title -->
<xsl:template match="section/title|fsdescription/title"/>
<xsl:template match="pagetext"/>
<!-- Chapter/Section, subsection level 1-->
<xsl:template match="chapter/section">
<xsl:param name="chapnum"/>
<h3>
<xsl:for-each select="marker">
<xsl:call-template name="marker-before-title"/>
</xsl:for-each>
<xsl:call-template name="title_link">
<xsl:with-param name="title">
<xsl:value-of select="$chapnum"/>.<xsl:number/> 
<xsl:value-of select="title"/>
</xsl:with-param>
</xsl:call-template>
</h3>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
<xsl:with-param name="sectnum"><xsl:number/></xsl:with-param>
</xsl:apply-templates>
</xsl:template>
<!-- Subsections lvl 2 -->
<xsl:template match="section/section">
<xsl:param name="chapnum"/>
<xsl:param name="sectnum"/>
<h4>
<xsl:for-each select="marker">
<xsl:call-template name="marker-before-title"/>
</xsl:for-each>
<!-- xsl:value-of select="$partnum"/>.<xsl:value-of select="$chapnum"/>.<xsl:value-of select="$sectnum"/>.<xsl:number/ -->
<xsl:call-template name="title_link">
<xsl:with-param name="title" select="title"/>
</xsl:call-template>
</h4>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</xsl:template>
<!-- Subsections lvl 3 and ... -->
<xsl:template match="section/section/section">
<xsl:param name="chapnum"/>
<xsl:param name="sectnum"/>
<h5>
<xsl:for-each select="marker">
<xsl:call-template name="marker-before-title"/>
</xsl:for-each>
<!-- xsl:value-of select="$partnum"/>.<xsl:value-of select="$chapnum"/>.<xsl:value-of select="$sectnum"/>.<xsl:number/ -->
<xsl:value-of select="title"/>
</h5>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</xsl:template>
<!-- *ref/Section -->
<xsl:template match="erlref/section|cref/section|comref/section|fileref/section|appref/section|funcs/fsdescription">
<xsl:param name="chapnum"/>
<div class="innertube">
<h3>
<xsl:for-each select="marker">
<xsl:call-template name="marker-before-title"/>
</xsl:for-each>
<xsl:call-template name="title_link">
<xsl:with-param name="title" select="title"/>
</xsl:call-template>
</h3>
<div class="REFBODY rb-3">
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- *ref/Subsection -->
<xsl:template match="erlref/section/section|cref/section/section|comref/section/section|fileref/section/section|appref/section/section">
<xsl:param name="chapnum"/>
<xsl:param name="sectnum"/>
<h4>
<xsl:value-of select="title"/>
</h4>
<div class="REFBODY rb-4">
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</div>
</xsl:template>
<!-- Lists -->
<xsl:template match="list">
<xsl:param name="chapnum"/>
<ul>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</ul>
</xsl:template>
<xsl:template match="list/item">
<xsl:param name="chapnum"/>
<li>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</li>
</xsl:template>
<xsl:template match="taglist">
<xsl:param name="chapnum"/>
<dl>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</dl>
</xsl:template>
<xsl:template match="taglist/tag">
<xsl:param name="chapnum"/>
<dt>
<strong>
<xsl:apply-templates/>
</strong>
</dt>
</xsl:template>
<xsl:template match="taglist/item">
<xsl:param name="chapnum"/>
<dd>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</dd>
</xsl:template>
<!-- Note -->
<xsl:template match="note">
<xsl:param name="chapnum"/>
<div class="note">
<div class="label">Note</div>
<div class="content">
<p>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</p>
</div>
</div>
</xsl:template>
<!-- Warning -->
<xsl:template match="warning">
<xsl:param name="chapnum"/>
<div class="warning">
<div class="label">Warning</div>
<div class="content">
<p>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</p>
</div>
</div>
</xsl:template>
<!-- Do -->
<xsl:template match="do">
<xsl:param name="chapnum"/>
<div class="do">
<div class="label">Do</div>
<div class="content">
<p>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</p>
</div>
</div>
</xsl:template>
<!-- Dont -->
<xsl:template match="dont">
<xsl:param name="chapnum"/>
<div class="dont">
<div class="label">Don't</div>
<div class="content">
<p>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</p>
</div>
</div>
</xsl:template>
<!-- Quote -->
<xsl:template match="quote">
<xsl:param name="chapnum"/>
<div class="quote">
<p>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</p>
</div>
</xsl:template>
<!-- Paragraph -->
<xsl:template match="p">
<p>
<xsl:apply-templates/>
</p>
</xsl:template>
<!-- Inline elements -->
<xsl:template match="i">
<i><xsl:apply-templates/></i>
</xsl:template>
<xsl:template match="br">
<br/>
</xsl:template>
<xsl:template match="c">
<span class="code"><xsl:apply-templates/></span>
</xsl:template>
<xsl:template match="em">
<strong><xsl:apply-templates/></strong>
</xsl:template>
<xsl:template match="strong">
<strong><xsl:apply-templates/></strong>
</xsl:template>
<!-- Code -->
<xsl:template match="code">
<xsl:param name="chapnum"/>
<xsl:variable name="type" select="@type"/>
<xsl:variable name="codenum">
<xsl:number level="any" from="chapter" count="code"/>
</xsl:variable>
<xsl:choose>
<xsl:when test="not(descendant::anno)">
<div class="example example-{$type}"><pre><xsl:value-of select="erl:code_trim(text())"/></pre></div>
</xsl:when>
<xsl:otherwise>
<div class="example example-{$type}"><pre><xsl:apply-templates/></pre></div>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Pre -->
<xsl:template match="pre">
<xsl:param name="chapnum"/>
<xsl:variable name="codenum">
<xsl:number level="any" from="chapter" count="code"/>
</xsl:variable>
<div class="example"><pre><xsl:apply-templates/></pre></div>
</xsl:template>
<!-- Table -->
<xsl:template match="table">
<xsl:param name="chapnum"/>
<xsl:variable name="tabnum">
<xsl:number level="any" from="chapter" count="table"/>
</xsl:variable>
<div class="doc-table-wrapper">
<table class="doc-table">
<!-- tbody-->
<xsl:apply-templates select="row">
<xsl:with-param name="chapnum" select="$chapnum"/>
<xsl:with-param name="tabnum" select="$tabnum"/>
</xsl:apply-templates>
<!-- /tbody-->
</table>
<xsl:apply-templates select="tcaption">
<xsl:with-param name="chapnum" select="$chapnum"/>
<xsl:with-param name="tabnum" select="$tabnum"/>
</xsl:apply-templates>
</div>
</xsl:template>
<xsl:template match="row">
<tr>
<xsl:apply-templates/>
</tr>
</xsl:template>
<xsl:template match="cell">
<td align="left" valign="middle">
<xsl:apply-templates/>
</td>
</xsl:template>
<xsl:template match="tcaption">
<xsl:param name="chapnum"/>
<xsl:param name="tabnum"/>
<p class="doc-table-caption">Table
<xsl:value-of select="$chapnum"/>.<xsl:value-of select="$tabnum"/>:
 
<xsl:apply-templates/>
</p>
</xsl:template>
<!-- Image -->
<xsl:template match="image">
<xsl:param name="chapnum"/>
<xsl:variable name="fignum">
<xsl:number level="any" from="chapter" count="image"/>
</xsl:variable>
<div class="doc-image-wrapper">
<xsl:choose>
<xsl:when test="substring(@file, (string-length(@file) - string-length('.svg')) + 1) = '.svg'">
<object alt="IMAGE MISSING" data="{@file}" class="doc-svg doc-image">
</object>
</xsl:when>
<xsl:when test="@width">
<img alt="IMAGE MISSING" width="{@width}" src="{@file}" class="doc-image"/>
</xsl:when>
<xsl:otherwise>
<img alt="IMAGE MISSING" src="{@file}" class="doc-image"/>
</xsl:otherwise>
</xsl:choose>
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
<xsl:with-param name="fignum" select="$fignum"/>
</xsl:apply-templates>
</div>
</xsl:template>
<xsl:template match="icaption">
<xsl:param name="chapnum"/>
<xsl:param name="fignum"/>
<p class="doc-image-caption">Figure
<xsl:value-of select="$chapnum"/>.<xsl:value-of select="$fignum"/>:
 
<xsl:apply-templates/>
</p>
</xsl:template>
<!-- Internal Docs -->
<!-- Part -->
<xsl:template match="internal">
<xsl:document href="{$outdir}/internal_docs.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout"/>
</xsl:document>
</xsl:template>
<!-- Part content-->
<xsl:template name="internal.content">
<div class="frontpage"/>
<center><h1><xsl:value-of select="/book/header/title"/> Internal Docs</h1></center>
<center><h4>Version <xsl:value-of select="$appver"/></h4></center>
<center><h4><xsl:value-of select="$gendate"/></h4></center>
<div class="extrafrontpageinfo">
<center><xsl:value-of select="$extra_front_page_info"/></center>
</div>
<xsl:apply-templates select="chapter|erlref"/>
</xsl:template>
<!-- Menu.internal.chapter -->
<xsl:template name="menu.internal.ug">
<xsl:param name="chapnum"/>
<div id="leftnav">
<div class="leftnav-tube">
<xsl:call-template name="erlang_logo"/>
<p class="section-title"><xsl:value-of select="/book/header/title"/></p>
<p class="section-subtitle">Internal Documentation</p>
<p class="section-version">Version <xsl:value-of select="$appver"/></p>
<xsl:call-template name="menu_top"/>
<xsl:call-template name="menu_middle"/>
<h3>Chapters</h3>
<ul class="flipMenu" imagepath="{$topdocdir}/js/flipmenu">
<xsl:call-template name="menu.chapter">
<xsl:with-param name="entries" select="/book/internals/internal/chapter[header/title]"/>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:call-template>
</ul>
</div>
</div>
</xsl:template>
<!-- Menu.internal.ref -->
<xsl:template name="menu.internal.ref">
<xsl:param name="curModule"/>
<div id="leftnav">
<div class="leftnav-tube">
<xsl:call-template name="erlang_logo"/>
<p class="section-title"><xsl:value-of select="/book/header/title"/></p>
<p class="section-subtitle">Reference Manual</p>
<p class="section-version">Version <xsl:value-of select="$appver"/></p>
<xsl:call-template name="menu_top"/>
<xsl:call-template name="menu_middle"/>
<h3>Table of Contents</h3>
<ul class="flipMenu">
<xsl:call-template name="menu.ref2">
<xsl:with-param name="entries" select="/book/internals/internal/erlref[module]|/book/internals/internal/cref[lib]|/book/internals/internal/comref[com]|/book/internals/internal/fileref[file]|/book/internals/internal/appref[app]"/>
<!--xsl:with-param name="genFuncMenu" select="true"/-->
<xsl:with-param name="curModule" select="$curModule"/>
</xsl:call-template>
</ul>
</div>
</div>
</xsl:template>
<!--Users Guide -->
<!-- Part -->
<xsl:template match="part">
<!-- Generate Glossary for Users Guide -->
<!--xsl:call-template name="glossary">
<xsl:with-param name="type">users_guide</xsl:with-param>
</xsl:call-template-->
<!-- Generate Bibliography for Users Guide -->
<!--xsl:call-template name="bibliography">
<xsl:with-param name="type">users_guide</xsl:with-param>
</xsl:call-template-->
<xsl:document href="{$outdir}/users_guide.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout"/>
</xsl:document>
</xsl:template>
<!-- Part content-->
<xsl:template name="part.content">
<div class="frontpage"/>
<center><h1><xsl:value-of select="/book/header/title"/> User's Guide</h1></center>
<center><h4>Version <xsl:value-of select="$appver"/></h4></center>
<center><h4><xsl:value-of select="$gendate"/></h4></center>
<div class="extrafrontpageinfo">
<center><xsl:value-of select="$extra_front_page_info"/></center>
</div>
<xsl:apply-templates select="chapter"/>
</xsl:template>
<!-- Menu.ug -->
<xsl:template name="menu.ug">
<xsl:param name="chapnum"/>
<div id="leftnav">
<div class="leftnav-tube">
<xsl:call-template name="erlang_logo"/>
<p class="section-title"><xsl:value-of select="/book/header/title"/></p>
<p class="section-subtitle">User's Guide</p>
<p class="section-version">Version <xsl:value-of select="$appver"/></p>
<xsl:call-template name="menu_top"/>
<xsl:call-template name="menu_middle"/>
<h3>Chapters</h3>
<ul class="flipMenu" imagepath="{$topdocdir}/js/flipmenu">
<xsl:call-template name="menu.chapter">
<xsl:with-param name="entries" select="/book/parts/part/chapter[header/title]"/>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:call-template>
</ul>
</div>
</div>
</xsl:template>
<xsl:template name="menu.chapter">
<xsl:param name="entries"/>
<xsl:param name="chapnum"/>
<xsl:for-each select="$entries">
<xsl:variable name="chapter_file">
<xsl:value-of select='substring-before(header/file, ".xml")'/>
</xsl:variable>
<xsl:variable name="curchapnum"><xsl:number/></xsl:variable>
<xsl:variable name="expanded">
<xsl:choose>
<xsl:when test="$chapnum = $curchapnum">true</xsl:when>
<xsl:otherwise>false</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="loadscrollpos">
<xsl:choose>
<xsl:when test="$chapnum = $curchapnum">loadscrollpos</xsl:when>
<xsl:otherwise>no</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<li id="{$loadscrollpos}" title="{header/title}" expanded="{$expanded}">
<xsl:value-of select="header/title"/>
<ul>
<li>
<a href="{$chapter_file}.html">
Top of chapter
</a>
</li>
<xsl:call-template name="menu.section">
<xsl:with-param name="entries"
select="section[title]"/>
<xsl:with-param name="chapter_file"><xsl:value-of select="$chapter_file"/></xsl:with-param>
</xsl:call-template>
</ul>
</li>
</xsl:for-each>
</xsl:template>
<xsl:template name="menu.section">
<xsl:param name="entries"/>
<xsl:param name="chapter_file"/>
<xsl:for-each select="$entries">
<li title="{title}">
<a href="{$chapter_file}.html#{erl:to-link(title)}">
<xsl:value-of select="title"/>
</a>
</li>
</xsl:for-each>
</xsl:template>
<!-- Chapter (if top tag)-->
<xsl:template match="/chapter">
<xsl:document href="{substring-before(header/file, '.xml')}.html" method="html" encoding="UTF-8" indent="yes"
doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout">
<xsl:with-param name="chapnum"><xsl:number/></xsl:with-param>
</xsl:call-template>
</xsl:document>
</xsl:template>
<!-- Chapter -->
<xsl:template match="chapter">
<xsl:document href="{substring-before(header/file, '.xml')}.html" method="html" encoding="UTF-8" indent="yes"
doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout">
<xsl:with-param name="chapnum"><xsl:number/></xsl:with-param>
</xsl:call-template>
</xsl:document>
</xsl:template>
<!-- Chapter content-->
<xsl:template name="chapter.content">
<xsl:param name="chapnum"/>
<!-- center-->
<h1>
<xsl:value-of select="$chapnum"/> <xsl:value-of select="header/title"/>
</h1>
<!-- /center-->
<xsl:apply-templates>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:apply-templates>
</xsl:template>
<!-- Reference Manual -->
<!-- Application -->
<xsl:template match="application">
<!-- Generate Glossary for Ref. Manual -->
<!--xsl:call-template name="glossary">
<xsl:with-param name="type">ref_man</xsl:with-param>
</xsl:call-template-->
<!-- Generate Bibliography for Ref. Manual -->
<!--xsl:call-template name="bibliography">
<xsl:with-param name="type">ref_man</xsl:with-param>
</xsl:call-template-->
<xsl:document href="{$outdir}/index.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout"/>
</xsl:document>
</xsl:template>
<!-- Application content-->
<xsl:template name="app.content">
<div class="frontpage"/>
<center><h1><xsl:value-of select="/book/header/title"/> Reference Manual</h1></center>
<center><h4>Version <xsl:value-of select="$appver"/></h4></center>
<center><h4><xsl:value-of select="$gendate"/></h4></center>
<div class="extrafrontpageinfo">
<center><xsl:value-of select="$extra_front_page_info"/></center>
</div>
<xsl:apply-templates select="erlref|cref|comref|fileref|appref"/>
</xsl:template>
<!-- Menu.ref -->
<xsl:template name="menu.ref">
<xsl:param name="curModule"/>
<div id="leftnav">
<div class="leftnav-tube">
<xsl:call-template name="erlang_logo"/>
<p class="section-title"><xsl:value-of select="/book/header/title"/></p>
<p class="section-subtitle">Reference Manual</p>
<p class="section-version">Version <xsl:value-of select="$appver"/></p>
<xsl:call-template name="menu_top"/>
<xsl:call-template name="menu_middle"/>
<h3>Table of Contents</h3>
<ul class="flipMenu">
<xsl:call-template name="menu.ref2">
<xsl:with-param name="entries" select="/book/applications/application/erlref[module]|/book/applications/application/cref[lib]|/book/applications/application/comref[com]|/book/applications/application/fileref[file]|/book/applications/application/appref[app]"/>
<!--xsl:with-param name="genFuncMenu" select="true"/-->
<xsl:with-param name="curModule" select="$curModule"/>
</xsl:call-template>
</ul>
</div>
</div>
</xsl:template>
<xsl:template name="menu.ref2">
<xsl:param name="entries"/>
<!--xsl:param name="genFuncMenu"/-->
<xsl:param name="curModule"/>
<xsl:for-each select="$entries">
<xsl:variable name="cval">
<xsl:choose>
<xsl:when test="local-name() = 'erlref'">
<xsl:value-of select="module"/>
</xsl:when>
<xsl:when test="local-name() = 'cref'">
<xsl:value-of select="lib"/>
</xsl:when>
<xsl:when test="local-name() = 'comref'">
<xsl:value-of select="com"/>
</xsl:when>
<xsl:when test="local-name() = 'fileref'">
<xsl:value-of select="file"/>
</xsl:when>
<xsl:when test="local-name() = 'appref'">
<xsl:value-of select="app"/>
</xsl:when>
</xsl:choose>
</xsl:variable>
<xsl:variable name="genFuncMenu">
<xsl:choose>
<xsl:when test="local-name() = 'comref'">false</xsl:when>
<xsl:when test="local-name() = 'appref'">false</xsl:when>
<xsl:when test="local-name() = 'fileref'">false</xsl:when>
<xsl:when test="descendant::funcs">true</xsl:when>
<xsl:otherwise>false</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="expanded">
<xsl:choose>
<xsl:when test="$curModule = $cval">true</xsl:when>
<xsl:otherwise>false</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="loadscrollpos">
<xsl:choose>
<xsl:when test="$curModule = $cval">loadscrollpos</xsl:when>
<xsl:otherwise>no</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="link_cval"><xsl:value-of select="translate($cval, '­', '')"/></xsl:variable>
<xsl:choose>
<xsl:when test="$genFuncMenu = 'true'">
<li id="{$loadscrollpos}" title="{$cval} " expanded="{$expanded}">
<xsl:value-of select="$cval"/>
<ul>
<li>
<a href="{$link_cval}.html">
Top of manual page
</a>
</li>
<xsl:call-template name="nl"/>
<xsl:choose>
<xsl:when test="local-name() = 'erlref'">
<!-- Use the cached value in order to save time.
value-of a string node is _much_ faster than
copy-of a rtf -->
<xsl:value-of
disable-output-escaping="yes"
select="$erlref.nav/module[@name = $cval]"/>
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="menu.funcs">
<xsl:with-param name="entries"
select="funcs/func/name"/>
<xsl:with-param name="basename"><xsl:value-of select="$link_cval"/></xsl:with-param>
<xsl:with-param name="cval" select="$cval"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</ul>
</li>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="local-name() = 'appref'">
<li title="{$cval} (App)">
<a href="{$link_cval}_app.html">
<xsl:value-of select="$cval"/> (App)
</a>
</li>
</xsl:when>
<xsl:otherwise>
<li title="{$cval}">
<a href="{$link_cval}.html">
<xsl:value-of select="$cval"/>
</a>
</li>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:for-each>
</xsl:template>
<xsl:template name="menu.funcs">
<xsl:param name="entries"/>
<xsl:param name="basename"/>
<xsl:param name="cval"/>
<xsl:for-each select="$entries">
<!-- Sort on function name, so the index list in lefthand frame is ordered. -->
<xsl:sort select="erl:get_sort_field(.)" data-type="text" case-order="upper-first"/>
<xsl:choose>
<xsl:when test="ancestor::cref">
<xsl:variable name="fname"><xsl:value-of select="substring-before(nametext, '(')"/></xsl:variable>
<xsl:choose>
<xsl:when test="string-length($fname) > 0">
<li title="{$fname}">
<a href="{$basename}.html#{$fname}">
<xsl:value-of select="$fname"/>()
</a>
</li>
</xsl:when>
<xsl:otherwise>
<li title="{name/nametext}">
<a href="{$basename}.html#{name/nametext}">
<xsl:value-of select="nametext"/>()
</a>
</li>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="ancestor::erlref">
<xsl:variable name="tmpstring">
<xsl:value-of select="substring-before(substring-after(., '('), '->')"/>
</xsl:variable>
<xsl:variable name="ustring">
<xsl:choose>
<xsl:when test="string-length($tmpstring) > 0">
<xsl:call-template name="remove-paren">
<xsl:with-param name="string" select="$tmpstring"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="remove-paren">
<xsl:with-param name="string" select="substring-after(., '(')"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="arity">
<xsl:choose>
<xsl:when test="string-length(@arity) > 0">
<!-- Dialyzer spec -->
<xsl:value-of select="@arity"/>
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="calc-arity">
<xsl:with-param name="string" select="substring-before($ustring, ')')"/>
<xsl:with-param name="no-of-pars" select="0"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="fname">
<xsl:choose>
<xsl:when test="string-length(@name) > 0">
<!-- Dialyzer spec -->
<xsl:value-of select="@name"/>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="fname1">
<xsl:value-of select="substring-before(., '(')"/>
</xsl:variable>
<xsl:variable name="fname2">
<xsl:value-of select="substring-after($fname1, 'erlang:')"/>
</xsl:variable>
<xsl:choose>
<xsl:when test="string-length($fname2) > 0">
<xsl:value-of select="$fname2"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$fname1"/>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<!-- Avoid duplicated entries. See also template "spec_name" -->
<!-- Do not to use preceding since it is very slow! -->
<xsl:variable name="mfas"
select="key('mfa',
concat($cval,':',$fname,'/',$arity))"/>
<xsl:choose>
<xsl:when test="string-length(@name) > 0 and
generate-id($mfas[1]) != generate-id(.)">
<!-- Skip. Only works for Dialyzer specs. -->
</xsl:when>
<xsl:otherwise>
<!--
<li title="{$fname}-{$arity}">
<a href="{$basename}.html#{$fname}-{$arity}">
<xsl:value-of select="$fname"/>/<xsl:value-of select="$arity"/>
</a>
</li>
-->
<!-- Generate a text node -->
<xsl:text><li title="</xsl:text>
<xsl:value-of select="$fname"/>
<xsl:text>-</xsl:text>
<xsl:value-of select="$arity"/>
<xsl:text>"><a href="</xsl:text>
<xsl:value-of select="$basename"/>
<xsl:text>.html#</xsl:text>
<xsl:value-of select="$fname"/>
<xsl:text>-</xsl:text>
<xsl:value-of select="$arity"/>
<xsl:text>"></xsl:text>
<xsl:value-of select="$fname"/>
<xsl:text>/</xsl:text>
<xsl:value-of select="$arity"/>
<xsl:text></a></li></xsl:text>
<xsl:call-template name="nl"/>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
</xsl:choose>
</xsl:for-each>
</xsl:template>
<!-- Erlref -->
<xsl:template match="erlref">
<xsl:variable name="filename"><xsl:value-of select="translate(module, '­', '')"/></xsl:variable>
<xsl:document href="{$filename}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout">
<xsl:with-param name="curModule" select="module"/>
</xsl:call-template>
</xsl:document>
</xsl:template>
<!-- Cref -->
<xsl:template match="cref">
<xsl:document href="{lib}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout">
<xsl:with-param name="curModule" select="lib"/>
</xsl:call-template>
</xsl:document>
</xsl:template>
<!-- Comref -->
<xsl:template match="comref">
<xsl:document href="{com}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout">
<xsl:with-param name="curModule" select="com"/>
</xsl:call-template>
</xsl:document>
</xsl:template>
<!-- Fileref -->
<xsl:template match="fileref">
<xsl:document href="{file}.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout">
<xsl:with-param name="curModule" select="file"/>
</xsl:call-template>
</xsl:document>
</xsl:template>
<!-- Appref -->
<xsl:template match="appref">
<xsl:document href="{app}_app.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout">
<xsl:with-param name="curModule" select="app"/>
</xsl:call-template>
</xsl:document>
</xsl:template>
<!-- *ref content-->
<xsl:template name="ref.content">
<xsl:param name="partnum"/>
<div class="innertube">
<center>
<h1>
<xsl:choose>
<xsl:when test="local-name() = 'erlref'">
<xsl:value-of select="module"/>
</xsl:when>
<xsl:when test="local-name() = 'cref'">
<xsl:value-of select="lib"/>
</xsl:when>
<xsl:when test="local-name() = 'comref'">
<xsl:value-of select="com"/>
</xsl:when>
<xsl:when test="local-name() = 'fileref'">
<xsl:value-of select="file"/>
</xsl:when>
<xsl:when test="local-name() = 'appref'">
<xsl:value-of select="app"/>
</xsl:when>
</xsl:choose>
</h1>
</center>
</div>
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</xsl:template>
<!-- Module -->
<xsl:template match="module">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Module</xsl:with-param>
</xsl:call-template>
<div class="REFBODY module-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- Modulesummary -->
<xsl:template match="modulesummary">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Module Summary</xsl:with-param>
</xsl:call-template>
<div class="REFBODY module-summary-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
<!-- Since -->
<xsl:if test="string-length(../module/@since) > 0">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Since</xsl:with-param>
</xsl:call-template>
<div class="REFBODY module-since">
Module <xsl:value-of select="../module"/> was introduced in
<xsl:value-of select="../module/@since"/>.
</div>
</xsl:if>
</div>
</xsl:template>
<!-- Lib -->
<xsl:template match="lib">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">C Library</xsl:with-param>
</xsl:call-template>
<div class="REFBODY c-library-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- Libsummary -->
<xsl:template match="libsummary">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Library Summary</xsl:with-param>
</xsl:call-template>
<div class="REFBODY library-summary-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- Com -->
<xsl:template match="com">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Command</xsl:with-param>
</xsl:call-template>
<div class="REFBODY command-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- Comsummary -->
<xsl:template match="comsummary">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Command Summary</xsl:with-param>
</xsl:call-template>
<div class="REFBODY command-summary-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- File -->
<xsl:template match="file">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">File</xsl:with-param>
</xsl:call-template>
<div class="REFBODY file-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- Filesummary -->
<xsl:template match="filesummary">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">File Summary</xsl:with-param>
</xsl:call-template>
<div class="REFBODY file-summary-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- App -->
<xsl:template match="app">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Application</xsl:with-param>
</xsl:call-template>
<div class="REFBODY application-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- Appsummary -->
<xsl:template match="appsummary">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Application Summary</xsl:with-param>
</xsl:call-template>
<div class="REFBODY application-summary-body">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</div>
</xsl:template>
<!-- Description -->
<xsl:template match="description">
<xsl:param name="partnum"/>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Description</xsl:with-param>
</xsl:call-template>
<div class="REFBODY description-body">
<p>
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</p>
</div>
</div>
</xsl:template>
<!-- Funcs -->
<xsl:template match="funcs">
<xsl:param name="partnum"/>
<xsl:apply-templates select="fsdescription">
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
<div class="innertube">
<xsl:call-template name="h3_title_link">
<xsl:with-param name="title">Exports</xsl:with-param>
</xsl:call-template>
</div>
<div class="exports-body">
<xsl:apply-templates select="func">
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</xsl:template>
<!-- Func -->
<xsl:template match="func">
<xsl:param name="partnum"/>
<xsl:apply-templates select="name"/>
<xsl:apply-templates
select="name[string-length(@arity) > 0 and position()=last()]"
mode="types"/>
<div class="exports-tube">
<xsl:apply-templates select="fsummary|type|desc">
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</xsl:template>
<xsl:template match="name">
<xsl:choose>
<!-- @arity is mandatory when referring to a specification -->
<xsl:when test="string-length(@arity) > 0">
<xsl:call-template name="spec_name"/>
</xsl:when>
<xsl:when test="ancestor::datatype">
<xsl:call-template name="type_name"/>
</xsl:when>
<xsl:when test="string-length(text()) = 0 and ancestor::erlref">
<xsl:message terminate="yes">
Error <xsl:value-of select="@name"/>: arity is mandatory when referring to specifications!
</xsl:message>
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="name"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Used both in <datatype> and in <func>! -->
<xsl:template name="name">
<xsl:variable name="tmpstring">
<xsl:value-of select="substring-before(substring-after(., '('), '->')"/>
</xsl:variable>
<xsl:variable name="ustring">
<xsl:choose>
<xsl:when test="string-length($tmpstring) > 0">
<xsl:call-template name="remove-paren">
<xsl:with-param name="string" select="$tmpstring"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="remove-paren">
<xsl:with-param name="string" select="substring-after(., '(')"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="arity">
<xsl:call-template name="calc-arity">
<xsl:with-param name="string" select="substring-before($ustring, ')')"/>
<xsl:with-param name="no-of-pars" select="0"/>
</xsl:call-template>
</xsl:variable>
<xsl:choose>
<xsl:when test="ancestor::cref">
<table class="func-table">
<tr class="func-tr">
<td class="cfunc-td">
<span class="bold_code bc-7">
<xsl:call-template name="title_link">
<xsl:with-param name="link" select="substring-before(nametext, '(')"/>
</xsl:call-template>
</span>
</td>
<td class="func-since-td">
<xsl:if test="string-length(@since) > 0">
<span class="since"><xsl:value-of select="@since"/></span>
</xsl:if>
</td>
</tr>
</table>
</xsl:when>
<xsl:when test="ancestor::erlref">
<xsl:variable name="fname">
<xsl:variable name="fname1">
<xsl:value-of select="substring-before(., '(')"/>
</xsl:variable>
<xsl:variable name="fname2">
<xsl:value-of select="substring-after($fname1, 'erlang:')"/>
</xsl:variable>
<xsl:choose>
<xsl:when test="string-length($fname2) > 0">
<xsl:value-of select="normalize-space($fname2)"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="normalize-space($fname1)"/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:choose>
<xsl:when test="ancestor::datatype">
<div class="bold_code bc-8">
<xsl:call-template name="title_link">
<xsl:with-param name="link" select="concat('type-',$fname)"/>
<xsl:with-param name="title">
<xsl:apply-templates/>
</xsl:with-param>
</xsl:call-template>
</div>
</xsl:when>
<xsl:otherwise>
<table class="func-table">
<tr class="func-tr">
<td class="func-td">
<div class="bold_code fun-type">
<xsl:call-template name="title_link">
<xsl:with-param name="link" select="concat(concat($fname,'-'),$arity)"/>
<xsl:with-param name="title">
<xsl:apply-templates/>
</xsl:with-param>
</xsl:call-template>
</div>
</td>
<td class="func-since-td">
<xsl:if test="string-length(@since) > 0">
<span class="since"><xsl:value-of select="@since"/></span>
</xsl:if>
</td>
</tr>
</table>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:otherwise>
<div class="bold_code bc-10"><xsl:value-of select="."/></div>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Type -->
<xsl:template match="type">
<xsl:param name="partnum"/>
<!-- The case where @name != 0 is taken care of in "type_name" -->
<xsl:if test="string-length(@name) = 0 and string-length(@variable) = 0">
<div class="REFBODY rb-5">
<h3 class="func-types-title">Types</h3>
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</xsl:if>
</xsl:template>
<!-- V -->
<xsl:template match="v">
<xsl:param name="partnum"/>
<div class="REFTYPES rt-4">
<span class="bold_code fun-param-type">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</span><br/>
</div>
</xsl:template>
<!-- D -->
<xsl:template match="d">
<xsl:param name="partnum"/>
<div class="REFBODY rb-6">
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</div>
</xsl:template>
<xsl:template name="h3_title_link">
<xsl:param name="title"/>
<h3>
<xsl:call-template name="title_link">
<xsl:with-param name="title" select="$title"/>
<xsl:with-param name="link" select="erl:to-link($title)"/>
</xsl:call-template>
</h3>
</xsl:template>
<xsl:template name="title_link">
<xsl:param name="title" select="'APPLY'"/>
<xsl:param name="link" select="erl:to-link(title)"/>
<xsl:param name="ghlink" select="ancestor-or-self::*[@ghlink][position() = 1]/@ghlink"/>
<xsl:variable name="id" select="concat(concat($link,'-'), generate-id(.))"/>
<span onMouseOver="document.getElementById('ghlink-{$id}').style.visibility = 'visible';"
onMouseOut="document.getElementById('ghlink-{$id}').style.visibility = 'hidden';">
<xsl:call-template name="ghlink">
<xsl:with-param name="id" select="$id"/>
<xsl:with-param name="ghlink" select="$ghlink"/>
</xsl:call-template>
<a class="title_link" name="{$link}" href="#{$link}">
<xsl:choose>
<xsl:when test="$title = 'APPLY'">
<xsl:apply-templates/> <!-- like <ret> and <nametext> -->
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$title"/>
</xsl:otherwise>
</xsl:choose>
</a>
</span>
</xsl:template>
<xsl:template name="ghlink">
<xsl:param name="id"/>
<xsl:param name="ghlink" select="ancestor-or-self::*[@ghlink][position() = 1]/@ghlink"/>
<xsl:choose>
<xsl:when test="string-length($ghlink) > 0">
<span id="ghlink-{$id}" class="ghlink">
<a href="https://github.com/erlang/otp/edit/{$ghlink}"
title="Found an issue with the documentation? Fix it by clicking here!">
<span class="pencil"/>
</a>
</span>
</xsl:when>
<xsl:otherwise>
<span id="ghlink-{$id}"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Desc -->
<xsl:template match="desc">
<xsl:param name="partnum"/>
<div class="REFBODY rb-7">
<p>
<xsl:apply-templates>
<xsl:with-param name="partnum" select="$partnum"/>
</xsl:apply-templates>
</p>
</div>
</xsl:template>
<!-- Fsummary -->
<xsl:template match="fsummary">
<!-- This tag is skipped for now. -->
</xsl:template>
<xsl:template match="input">
<span class="bold_code bc-12"><xsl:apply-templates/></span>
</xsl:template>
<xsl:template match="node()[starts-with(name(), 'see')]">
<xsl:call-template name="seealso"/>
</xsl:template>
<xsl:template name="seealso">
<xsl:variable name="app_part">
<xsl:variable name="base">
<xsl:value-of select="substring-before(substring-before(concat(@marker,'#'), '#'),':')"/>
</xsl:variable>
<xsl:choose>
<xsl:when test="starts-with($base,'system/')">
<xsl:text>doc/</xsl:text>
<xsl:value-of select="substring-after($base,'/')"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$base"/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="mod_part">
<xsl:variable name="filepart">
<!-- Get everything before the first #. We concat a # so that if there
is no # we will get the entire string -->
<xsl:value-of select="substring-before(concat(@marker,'#'), '#')"/>
</xsl:variable>
<xsl:variable name="base">
<!-- Remove the app part of there is any -->
<xsl:choose>
<xsl:when test="string-length($app_part) > 0">
<xsl:value-of select="substring-after($filepart, ':')"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$filepart"/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:choose>
<!-- If this is a <seeguide> and name is index then we change it to users_guide -->
<xsl:when test="node()[starts-with(name(parent::*), 'seeguide')] and $base = 'index'">
<xsl:text>users_guide</xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$base"/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="linkpart">
<xsl:variable name="base">
<xsl:value-of select="substring-after(@marker, '#')"/>
</xsl:variable>
<xsl:choose>
<!-- If this is a <seetype> we prepend type- to the anchor -->
<xsl:when test="node()[starts-with(name(parent::*), 'seetype')]">
<xsl:text>type-</xsl:text><xsl:value-of select="$base"/>
</xsl:when>
<xsl:when test="node()[starts-with(name(parent::*), 'seemfa')]">
<xsl:value-of select="translate($base, '/', '-')"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$base"/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="extension">
<xsl:choose>
<xsl:when test="substring($mod_part, (string-length($mod_part) - string-length('.svg')) + 1) = '.svg'">
<xsl:text></xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text>.html</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:choose>
<xsl:when test="starts-with(@marker,'#')">
<!-- "#Linkpart" -->
<span class="bold_code bc-17"><a href="#{$linkpart}"><xsl:apply-templates/></a></span>
</xsl:when>
<xsl:when test="contains(@marker,'#')">
<!-- "Filepart#Linkpart" (or "Filepart#") -->
<xsl:choose>
<xsl:when test="string-length($app_part) > 0">
<!-- "AppPart:ModPart#Linkpart" -->
<span class="bold_code bc-13"><a href="javascript:erlhref('{$topdocdir}/../','{$app_part}','{$mod_part}{$extension}#{$linkpart}');"><xsl:apply-templates/></a></span>
</xsl:when>
<xsl:otherwise>
<!-- "Filepart#Linkpart (there is no ':' in Filepart) -->
<xsl:variable name="minus_prefix"
select="substring-before($linkpart, '-')"/>
<xsl:choose>
<xsl:when test="$minus_prefix = 'type'
and string-length($specs_file) > 0
and count($i/specs/module[@name=$mod_part]) = 0">
<!-- Dialyzer seealso (the application is unknown) -->
<!-- Following code deemed too slow; use key() instead
<xsl:variable name="app"
select="$m2a/mod2app/module[@name=$filepart]"/>
-->
<xsl:variable name="this" select="."/>
<xsl:for-each select="$m2a">
<xsl:variable name="app" select="key('mod2app', $mod_part)"/>
<xsl:choose>
<xsl:when test="string-length($app) > 0">
<span class="bold_code bc-14"><a href="javascript:erlhref('{$topdocdir}/../','{$app}','{$mod_part}{$extension}#{$linkpart}');"><xsl:value-of select="$this"/></a></span>
</xsl:when>
<xsl:otherwise>
<!-- Unknown application -->
<xsl:message terminate="yes">
Error <xsl:value-of select="$mod_part"/>: cannot find module exporting type
<xsl:value-of select="$app_part"/> -
<xsl:value-of select="$linkpart"/>
</xsl:message>
</xsl:otherwise>
</xsl:choose>
</xsl:for-each>
</xsl:when>
<xsl:when test="string-length($linkpart) > 0">
<!-- Still Filepart#Linkpart (there is no ':' in Filepart) -->
<span class="bold_code bc-15"><a href="{$mod_part}{$extension}#{$linkpart}"><xsl:apply-templates/></a></span>
</xsl:when>
<xsl:otherwise>
<!-- "Filepart#" (there is no ':' in Filepart) -->
<span class="bold_code bc-16"><a href="{$mod_part}{$extension}"><xsl:apply-templates/></a></span>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:otherwise>
<!-- "AppPart:Mod" or "Mod" (there is no '#') -->
<xsl:choose>
<xsl:when test="string-length($app_part) > 0">
<!-- "App:Mod" -->
<span class="bold_code bc-18"><a href="javascript:erlhref('{$topdocdir}/../','{$app_part}','{$mod_part}{$extension}');"><xsl:apply-templates/></a></span>
</xsl:when>
<xsl:otherwise>
<!-- "Mod" -->
<span class="bold_code bc-19"><a href="{$mod_part}{$extension}"><xsl:apply-templates/></a></span>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="url">
<span class="bold_code bc-20"><a href="{@href}"><xsl:apply-templates/></a></span>
</xsl:template>
<xsl:template match="marker">
<xsl:choose>
<xsl:when test="not(parent::section and following-sibling::title)">
<a name="{@id}"><xsl:apply-templates/></a>
</xsl:when>
</xsl:choose>
</xsl:template>
<xsl:template name="marker-before-title">
<xsl:choose>
<xsl:when test="self::marker and parent::section and following-sibling::title">
<a name="{@id}"><xsl:apply-templates/></a>
</xsl:when>
</xsl:choose>
</xsl:template>
<!-- Release Notes -->
<xsl:template match="releasenotes">
<xsl:document href="{$outdir}/release_notes.html" method="html" encoding="UTF-8" indent="yes" doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN">
<xsl:call-template name="pagelayout"/>
</xsl:document>
</xsl:template>
<!-- Rel notes content-->
<xsl:template name="releasenotes.content">
<div class="frontpage"/>
<center><h1><xsl:value-of select="/book/header/title"/> Release Notes</h1></center>
<center><h4>Version <xsl:value-of select="$appver"/></h4></center>
<center><h4><xsl:value-of select="$gendate"/></h4></center>
<div class="extrafrontpageinfo">
<center><xsl:value-of select="$extra_front_page_info"/></center>
</div>
<xsl:apply-templates select="chapter"/>
</xsl:template>
<!-- Menu.rn -->
<xsl:template name="menu.rn">
<xsl:param name="chapnum"/>
<div id="leftnav">
<div class="leftnav-tube">
<xsl:call-template name="erlang_logo"/>
<p class="section-title"><xsl:value-of select="/book/header/title"/></p>
<p class="section-subtitle">Release Notes</p>
<p class="section-version">Version <xsl:value-of select="$appver"/></p>
<xsl:call-template name="menu_top"/>
<xsl:call-template name="menu_middle"/>
<h3>Chapters</h3>
<ul class="flipMenu" imagepath="{$topdocdir}/js/flipmenu">
<xsl:call-template name="menu.chapter">
<xsl:with-param name="entries" select="/book/releasenotes/chapter[header/title]"/>
<xsl:with-param name="chapnum" select="$chapnum"/>
</xsl:call-template>
</ul>
</div>
</div>
</xsl:template>
<!-- Special templates to calculate the arity of functions -->
<xsl:template name="calc-arity">
<xsl:param name="string"/>
<xsl:param name="no-of-pars"/>
<xsl:variable name="length">
<xsl:value-of select="string-length($string)"/>
</xsl:variable>
<xsl:choose>
<xsl:when test="$length > 0">
<xsl:call-template name="calc-arity">
<xsl:with-param name="string" select="substring-after($string, ',')"/>
<xsl:with-param name="no-of-pars" select="$no-of-pars+1"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$no-of-pars"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template name="remove-paren">
<xsl:param name="string"/>
<xsl:variable name="str1">
<xsl:call-template name="remove-paren-1">
<xsl:with-param name="string" select="$string"/>
<xsl:with-param name="start">(</xsl:with-param>
<xsl:with-param name="end">)</xsl:with-param>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="str2">
<xsl:call-template name="remove-paren-1">
<xsl:with-param name="string" select="$str1"/>
<xsl:with-param name="start">{</xsl:with-param>
<xsl:with-param name="end">}</xsl:with-param>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="str3">
<xsl:call-template name="remove-paren-1">
<xsl:with-param name="string" select="$str2"/>
<xsl:with-param name="start">[</xsl:with-param>
<xsl:with-param name="end">]</xsl:with-param>
</xsl:call-template>
</xsl:variable>
<xsl:value-of select="$str3"/>
</xsl:template>
<xsl:template name="remove-paren-1">
<xsl:param name="string"/>
<xsl:param name="start"/>
<xsl:param name="end"/>
<xsl:variable name="tmp1">
<xsl:value-of select="substring-before($string, $start)"/>
</xsl:variable>
<xsl:choose>
<xsl:when test="string-length($tmp1) > 0 or starts-with($string, $start)">
<xsl:variable name="tmp2">
<xsl:value-of select="substring-after(substring-after($string, $start), $end)"/>
</xsl:variable>
<xsl:variable name="retstring">
<xsl:call-template name="remove-paren">
<xsl:with-param name="string" select="$tmp2"/>
</xsl:call-template>
</xsl:variable>
<xsl:value-of select="concat(concat($tmp1, 'x'), $retstring)"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$string"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template name="nl">
<xsl:text>
</xsl:text>
</xsl:template>
<xsl:template match="node()[starts-with(name(), 'see')]//text()">
<xsl:value-of select="normalize-space(.)"/>
</xsl:template>
<xsl:template match="ret">
<xsl:value-of select="."/>
<xsl:variable name="last_char" select="substring(., string-length(.), 1)"/>
<xsl:if test="$last_char != '*'">
<xsl:text> </xsl:text>
</xsl:if>
</xsl:template>
<xsl:template match="nametext">
<xsl:value-of select="substring-before(.,'(')"/>
<xsl:text>(</xsl:text>
<xsl:variable name="arglist" select="substring-after(.,'(')"/>
<xsl:choose>
<xsl:when test="$arglist = ')' or $arglist = 'void)'">
<xsl:value-of select="$arglist"/>
</xsl:when>
<xsl:otherwise>
<br/>
<xsl:call-template name="cfunc-arglist">
<xsl:with-param name="text" select="$arglist"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Format C function argument list with <br> after comma -->
<xsl:template name="cfunc-arglist">
<xsl:param name="text"/>
<xsl:variable name="line" select="normalize-space($text)"/>
<xsl:choose>
<xsl:when test="contains($line,',')">
<xsl:value-of select="substring-before($line,',')"/>,<br/>
<xsl:call-template name="cfunc-arglist">
<xsl:with-param name="text" select="substring-after($line,',')"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$line"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>
|
{
"pile_set_name": "Github"
}
|
#
# Copyright (c) 2010-2020. Axon Framework
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name=AxonTestConfiguration
appenders = console
appender.console.type = Console
appender.console.name = STDOUT
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d [%t] %-5p %-30.30c{1} %x - %m%n
rootLogger.level = info
rootLogger.appenderRefs = stdout
rootLogger.appenderRef.stdout.ref = STDOUT
logger.axon.name = org.axonframework
logger.axon.level = info
logger.axon.additivity = false
logger.axon.appenderRefs = stdout
logger.axon.appenderRef.stdout.ref = STDOUT
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
The contents of this file are subject to the terms of the Common Development and
Distribution License (the License). You may not use this file except in compliance with the
License.
You can obtain a copy of the License at legal/CDDLv1.0.txt. See the License for the
specific language governing permission and limitations under the License.
When distributing Covered Software, include this CDDL Header Notice in each file and include
the License file at legal/CDDLv1.0.txt. If applicable, add the following below the CDDL
Header, with the fields enclosed by brackets [] replaced by your own identifying
information: "Portions copyright [year] [name of copyright owner]".
Copyright 2014 ForgeRock AS.
-->
<!DOCTYPE ModuleProperties PUBLIC "=//iPlanet//Authentication Module Properties XML Interface 1.0 DTD//EN"
"jar://com/sun/identity/authentication/Auth_Module_Properties.dtd">
<ModuleProperties moduleName="Scripted" version="1.0" >
<Callbacks length="0" order="1" timeout="600" header="#WILL NOT BE SHOWN#" />
<Callbacks length="2" order="2" timeout="120" header="Sign in to OpenAM" >
<HiddenValueCallback>
<Id>clientScriptOutputData</Id>
</HiddenValueCallback>
<TextOutputCallback messageType="script">PLACEHOLDER</TextOutputCallback>
</Callbacks>
</ModuleProperties>
|
{
"pile_set_name": "Github"
}
|
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
@class NSData;
@protocol SETransceiver
- (NSData *)transceive:(NSData *)arg1 error:(id *)arg2;
@end
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code is a modified version of the original Spark 1.0.2 implementation.
*
*/
package com.massivedatascience.clusterer
import com.massivedatascience.clusterer.MultiKMeansClusterer.ClusteringWithDistortion
import com.massivedatascience.linalg.{ MutableWeightedVector, WeightedVector }
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import scala.collection.mutable.ArrayBuffer
/**
* A K-Means clustering implementation that performs multiple K-means clusterings simultaneously,
* returning the one with the lowest cost.
*
*/
//scalastyle:off
@deprecated("use ColumnTrackingKMeans", "1.2.0")
class MultiKMeans extends MultiKMeansClusterer {
def cluster(
maxIterations: Int,
pointOps: BregmanPointOps,
data: RDD[BregmanPoint],
c: Seq[IndexedSeq[BregmanCenter]]): Seq[ClusteringWithDistortion] = {
val centers = c.map(_.toArray).toArray
def cluster(): Seq[ClusteringWithDistortion] = {
val runs = centers.length
val active = Array.fill(runs)(true)
val costs = Array.fill(runs)(0.0)
var activeRuns = new ArrayBuffer[Int] ++ (0 until runs)
var iteration = 0
/*
* Execute iterations of Lloyd's algorithm until all runs have converged.
*/
while (iteration < maxIterations && activeRuns.nonEmpty) {
// remove the empty clusters
logInfo(s"iteration $iteration")
val activeCenters = activeRuns.map(r => centers(r)).toArray
if (log.isInfoEnabled) {
for (r <- 0 until activeCenters.length)
logInfo(s"run ${activeRuns(r)} has ${activeCenters(r).length} centers")
}
// Find the sum and count of points mapping to each center
val (centroids: Array[((Int, Int), WeightedVector)], runDistortion) = getCentroids(data, activeCenters)
if (log.isInfoEnabled) {
for (run <- activeRuns) logInfo(s"run $run distortion ${runDistortion(run)}")
}
for (run <- activeRuns) active(run) = false
for (((runIndex: Int, clusterIndex: Int), cn: MutableWeightedVector) <- centroids) {
val run = activeRuns(runIndex)
if (cn.weight == 0.0) {
active(run) = true
centers(run)(clusterIndex) = null.asInstanceOf[BregmanCenter]
} else {
val centroid = cn.asImmutable
active(run) = active(run) || pointOps.centerMoved(pointOps.toPoint(centroid), centers(run)(clusterIndex))
centers(run)(clusterIndex) = pointOps.toCenter(centroid)
}
}
// filter out null centers
for (r <- activeRuns) centers(r) = centers(r).filter(_ != null)
// update distortions and print log message if run completed during this iteration
for ((run, runIndex) <- activeRuns.zipWithIndex) {
costs(run) = runDistortion(runIndex)
if (!active(run)) logInfo(s"run $run finished in ${iteration + 1} iterations")
}
activeRuns = activeRuns.filter(active(_))
iteration += 1
}
costs.zip(centers).map { case (x, y) => ClusteringWithDistortion(x, y.toIndexedSeq) }
}
def getCentroids(
data: RDD[BregmanPoint],
activeCenters: Array[Array[BregmanCenter]]): (Array[((Int, Int), WeightedVector)], Array[Double]) = {
val sc = data.sparkContext
val runDistortion = Array.fill(activeCenters.length)(sc.accumulator(0.0))
val bcActiveCenters = sc.broadcast(activeCenters)
val result = data.mapPartitions[((Int, Int), WeightedVector)] { points =>
val bcCenters = bcActiveCenters.value
val centers = bcCenters.map(c => Array.fill(c.length)(pointOps.make()))
for (point <- points; (clusters, run) <- bcCenters.zipWithIndex) {
val (cluster, cost) = pointOps.findClosest(clusters, point)
runDistortion(run) += cost
centers(run)(cluster).add(point)
}
val contribution = for (
(clusters, run) <- bcCenters.zipWithIndex;
(contrib, cluster) <- clusters.zipWithIndex
) yield {
((run, cluster), centers(run)(cluster).asImmutable)
}
contribution.iterator
}.aggregateByKey(pointOps.make())(
(x, y) => x.add(y),
(x, y) => x.add(y)
).map(x => (x._1, x._2.asImmutable)).collect()
bcActiveCenters.unpersist()
(result, runDistortion.map(x => x.localValue))
}
cluster()
}
}
//scalastyle:on
|
{
"pile_set_name": "Github"
}
|
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2_3
import (
"reflect"
"github.com/coreos/ignition/config/v2_3/types"
)
// Append appends newConfig to oldConfig and returns the result. Appending one
// config to another is accomplished by iterating over every field in the
// config structure, appending slices, recursively appending structs, and
// overwriting old values with new values for all other types.
func Append(oldConfig, newConfig types.Config) types.Config {
vOld := reflect.ValueOf(oldConfig)
vNew := reflect.ValueOf(newConfig)
vResult := appendStruct(vOld, vNew)
return vResult.Interface().(types.Config)
}
// appendStruct is an internal helper function to AppendConfig. Given two values
// of structures (assumed to be the same type), recursively iterate over every
// field in the struct, appending slices, recursively appending structs, and
// overwriting old values with the new for all other types. Some individual
// struct fields have alternate merge strategies, determined by the field name.
// Currently these fields are "ignition.version", which uses the old value, and
// "ignition.config" which uses the new value.
func appendStruct(vOld, vNew reflect.Value) reflect.Value {
tOld := vOld.Type()
vRes := reflect.New(tOld)
for i := 0; i < tOld.NumField(); i++ {
vfOld := vOld.Field(i)
vfNew := vNew.Field(i)
vfRes := vRes.Elem().Field(i)
switch tOld.Field(i).Name {
case "Version":
vfRes.Set(vfOld)
continue
case "Config":
vfRes.Set(vfNew)
continue
}
switch vfOld.Type().Kind() {
case reflect.Struct:
vfRes.Set(appendStruct(vfOld, vfNew))
case reflect.Slice:
vfRes.Set(reflect.AppendSlice(vfOld, vfNew))
default:
if vfNew.Kind() == reflect.Ptr && vfNew.IsNil() {
vfRes.Set(vfOld)
} else {
vfRes.Set(vfNew)
}
}
}
return vRes.Elem()
}
|
{
"pile_set_name": "Github"
}
|
/******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#pragma once
#include "modules/drivers/canbus/can_comm/protocol_data.h"
#include "modules/drivers/proto/conti_radar.pb.h"
namespace apollo {
namespace drivers {
namespace conti_radar {
using apollo::drivers::ContiRadar;
class ClusterQualityInfo702
: public apollo::drivers::canbus::ProtocolData<ContiRadar> {
public:
static const uint32_t ID;
ClusterQualityInfo702();
void Parse(const std::uint8_t* bytes, int32_t length,
ContiRadar* conti_radar) const override;
private:
int target_id(const std::uint8_t* bytes, int32_t length) const;
int longitude_dist_rms(const std::uint8_t* bytes, int32_t length) const;
int lateral_dist_rms(const std::uint8_t* bytes, int32_t length) const;
int longitude_vel_rms(const std::uint8_t* bytes, int32_t length) const;
int pdh0(const std::uint8_t* bytes, int32_t length) const;
int ambig_state(const std::uint8_t* bytes, int32_t length) const;
int invalid_state(const std::uint8_t* bytes, int32_t length) const;
int lateral_vel_rms(const std::uint8_t* bytes, int32_t length) const;
};
} // namespace conti_radar
} // namespace drivers
} // namespace apollo
|
{
"pile_set_name": "Github"
}
|
/**
* @file wizwiki_w7500.c
* @brief board ID for the WIZnet WIZwiki-W7500 board
*
* DAPLink Interface Firmware
* Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const char *board_id = "2201";
|
{
"pile_set_name": "Github"
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.ads.googleads.v4.resources;
import "google/ads/googleads/v4/common/matching_function.proto";
import "google/ads/googleads/v4/enums/feed_link_status.proto";
import "google/ads/googleads/v4/enums/placeholder_type.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/protobuf/wrappers.proto";
import "google/api/annotations.proto";
option csharp_namespace = "Google.Ads.GoogleAds.V4.Resources";
option go_package = "google.golang.org/genproto/googleapis/ads/googleads/v4/resources;resources";
option java_multiple_files = true;
option java_outer_classname = "AdGroupFeedProto";
option java_package = "com.google.ads.googleads.v4.resources";
option objc_class_prefix = "GAA";
option php_namespace = "Google\\Ads\\GoogleAds\\V4\\Resources";
option ruby_package = "Google::Ads::GoogleAds::V4::Resources";
// Proto file describing the AdGroupFeed resource.
// An ad group feed.
message AdGroupFeed {
option (google.api.resource) = {
type: "googleads.googleapis.com/AdGroupFeed"
pattern: "customers/{customer}/adGroupFeeds/{ad_group_feed}"
};
// Immutable. The resource name of the ad group feed.
// Ad group feed resource names have the form:
//
// `customers/{customer_id}/adGroupFeeds/{ad_group_id}~{feed_id}
string resource_name = 1 [
(google.api.field_behavior) = IMMUTABLE,
(google.api.resource_reference) = {
type: "googleads.googleapis.com/AdGroupFeed"
}
];
// Immutable. The feed being linked to the ad group.
google.protobuf.StringValue feed = 2 [
(google.api.field_behavior) = IMMUTABLE,
(google.api.resource_reference) = {
type: "googleads.googleapis.com/Feed"
}
];
// Immutable. The ad group being linked to the feed.
google.protobuf.StringValue ad_group = 3 [
(google.api.field_behavior) = IMMUTABLE,
(google.api.resource_reference) = {
type: "googleads.googleapis.com/AdGroup"
}
];
// Indicates which placeholder types the feed may populate under the connected
// ad group. Required.
repeated google.ads.googleads.v4.enums.PlaceholderTypeEnum.PlaceholderType placeholder_types = 4;
// Matching function associated with the AdGroupFeed.
// The matching function is used to filter the set of feed items selected.
// Required.
google.ads.googleads.v4.common.MatchingFunction matching_function = 5;
// Output only. Status of the ad group feed.
// This field is read-only.
google.ads.googleads.v4.enums.FeedLinkStatusEnum.FeedLinkStatus status = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
}
|
{
"pile_set_name": "Github"
}
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
org.apache.ws.security.crypto.provider=org.apache.ws.security.components.crypto.Merlin
org.apache.ws.security.crypto.merlin.keystore.type=jks
org.apache.ws.security.crypto.merlin.keystore.password=password
org.apache.ws.security.crypto.merlin.keystore.alias=bob
org.apache.ws.security.crypto.merlin.keystore.file=keys/bob.jks
|
{
"pile_set_name": "Github"
}
|
-- Copyright 2020 Stanford University
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
import "regent"
local c = regentlib.c
struct ret
{
v : int,
id : uint64,
}
__demand(__inline)
task inc1(x : int) : int
return x + 1
end
__demand(__inline)
task dec1(x : int) : ret
return ret { v = x - 1, id = c.legion_context_get_unique_id(__context()) }
end
__demand(__inline)
task f(x : int) : ret
return dec1(inc1(x + 5))
end
__forbid(__inline)
task g(x : int) : ret
return ret { v = x + 5, id = c.legion_context_get_unique_id(__context()) }
end
__demand(__inline)
task h()
regentlib.c.printf("called h\n")
return c.legion_context_get_unique_id(__context())
end
task main()
var id_main = c.legion_context_get_unique_id(__context())
var id_h = h()
regentlib.assert(id_h == id_main, "test failed")
for i = 0, 10 do
var ret_f, ret_g = f(i), g(i)
regentlib.assert(ret_f.v == ret_g.v, "test failed")
regentlib.assert(id_main == ret_f.id, "test failed")
regentlib.assert(id_main ~= ret_g.id, "test failed")
end
end
regentlib.start(main)
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
authorizationapi "k8s.io/api/authorization/v1beta1"
)
type LocalSubjectAccessReviewExpansion interface {
Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error)
}
func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
result = &authorizationapi.LocalSubjectAccessReview{}
err = c.client.Post().
Namespace(c.ns).
Resource("localsubjectaccessreviews").
Body(sar).
Do().
Into(result)
return
}
|
{
"pile_set_name": "Github"
}
|
/*
[auto_generated]
boost/numeric/odeint/integrate/detail/integrate_n_steps.hpp
[begin_description]
integrate steps implementation
[end_description]
Copyright 2012-2015 Mario Mulansky
Copyright 2012 Christoph Koke
Copyright 2012 Karsten Ahnert
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_NUMERIC_ODEINT_INTEGRATE_DETAIL_INTEGRATE_N_STEPS_HPP_INCLUDED
#define BOOST_NUMERIC_ODEINT_INTEGRATE_DETAIL_INTEGRATE_N_STEPS_HPP_INCLUDED
#include <boost/numeric/odeint/util/unwrap_reference.hpp>
#include <boost/numeric/odeint/stepper/stepper_categories.hpp>
#include <boost/numeric/odeint/integrate/detail/integrate_adaptive.hpp>
#include <boost/numeric/odeint/util/unit_helper.hpp>
#include <boost/numeric/odeint/util/detail/less_with_sign.hpp>
namespace boost {
namespace numeric {
namespace odeint {
namespace detail {
// forward declaration
template< class Stepper , class System , class State , class Time , class Observer >
size_t integrate_adaptive_checked(
Stepper stepper , System system , State &start_state ,
Time &start_time , Time end_time , Time &dt ,
Observer observer, controlled_stepper_tag
);
/* basic version */
template< class Stepper , class System , class State , class Time , class Observer>
Time integrate_n_steps(
Stepper stepper , System system , State &start_state ,
Time start_time , Time dt , size_t num_of_steps ,
Observer observer , stepper_tag )
{
typename odeint::unwrap_reference< Observer >::type &obs = observer;
typename odeint::unwrap_reference< Stepper >::type &st = stepper;
Time time = start_time;
for( size_t step = 0; step < num_of_steps ; ++step )
{
obs( start_state , time );
st.do_step( system , start_state , time , dt );
// direct computation of the time avoids error propagation happening when using time += dt
// we need clumsy type analysis to get boost units working here
time = start_time + static_cast< typename unit_value_type<Time>::type >( step+1 ) * dt;
}
obs( start_state , time );
return time;
}
/* controlled version */
template< class Stepper , class System , class State , class Time , class Observer >
Time integrate_n_steps(
Stepper stepper , System system , State &start_state ,
Time start_time , Time dt , size_t num_of_steps ,
Observer observer , controlled_stepper_tag )
{
typename odeint::unwrap_reference< Observer >::type &obs = observer;
Time time = start_time;
Time time_step = dt;
for( size_t step = 0; step < num_of_steps ; ++step )
{
obs( start_state , time );
// integrate_adaptive_checked uses the given checker to throw if an overflow occurs
detail::integrate_adaptive(stepper, system, start_state, time, static_cast<Time>(time + time_step), dt,
null_observer(), controlled_stepper_tag());
// direct computation of the time avoids error propagation happening when using time += dt
// we need clumsy type analysis to get boost units working here
time = start_time + static_cast< typename unit_value_type<Time>::type >(step+1) * time_step;
}
obs( start_state , time );
return time;
}
/* dense output version */
template< class Stepper , class System , class State , class Time , class Observer >
Time integrate_n_steps(
Stepper stepper , System system , State &start_state ,
Time start_time , Time dt , size_t num_of_steps ,
Observer observer , dense_output_stepper_tag )
{
typename odeint::unwrap_reference< Observer >::type &obs = observer;
typename odeint::unwrap_reference< Stepper >::type &st = stepper;
Time time = start_time;
const Time end_time = start_time + static_cast< typename unit_value_type<Time>::type >(num_of_steps) * dt;
st.initialize( start_state , time , dt );
size_t step = 0;
while( step < num_of_steps )
{
while( less_with_sign( time , st.current_time() , st.current_time_step() ) )
{
st.calc_state( time , start_state );
obs( start_state , time );
++step;
// direct computation of the time avoids error propagation happening when using time += dt
// we need clumsy type analysis to get boost units working here
time = start_time + static_cast< typename unit_value_type<Time>::type >(step) * dt;
}
// we have not reached the end, do another real step
if( less_with_sign( static_cast<Time>(st.current_time()+st.current_time_step()) ,
end_time ,
st.current_time_step() ) )
{
st.do_step( system );
}
else if( less_with_sign( st.current_time() , end_time , st.current_time_step() ) )
{ // do the last step ending exactly on the end point
st.initialize( st.current_state() , st.current_time() , static_cast<Time>(end_time - st.current_time()) );
st.do_step( system );
}
}
// make sure we really end exactly where we should end
while( st.current_time() < end_time )
{
if( less_with_sign( end_time ,
static_cast<Time>(st.current_time()+st.current_time_step()) ,
st.current_time_step() ) )
st.initialize( st.current_state() , st.current_time() , static_cast<Time>(end_time - st.current_time()) );
st.do_step( system );
}
// observation at final point
obs( st.current_state() , end_time );
return time;
}
}
}
}
}
#endif /* BOOST_NUMERIC_ODEINT_INTEGRATE_DETAIL_INTEGRATE_N_STEPS_HPP_INCLUDED */
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2017 Google
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#import <Foundation/Foundation.h>
@class FIRInstallations;
// A class for wrapping the interactions for retrieving client side info to be used in
// request parameter for interacting with Firebase iam servers.
NS_ASSUME_NONNULL_BEGIN
@interface FIRIAMClientInfoFetcher : NSObject
- (instancetype)initWithFirebaseInstallations:(nullable FIRInstallations *)installations;
- (instancetype)init NS_UNAVAILABLE;
// Fetch the up-to-date Firebase Installation ID (FID) and Firebase Installation Service (FIS) token
// data. Since it involves a server interaction, completion callback is provided for receiving the
// result.
- (void)fetchFirebaseInstallationDataWithProjectNumber:(NSString *)projectNumber
withCompletion:
(void (^)(NSString *_Nullable FID,
NSString *_Nullable FISToken,
NSError *_Nullable error))completion;
// Following are synchronous methods for fetching data
- (nullable NSString *)getDeviceLanguageCode;
- (nullable NSString *)getAppVersion;
- (nullable NSString *)getOSVersion;
- (nullable NSString *)getOSMajorVersion;
- (nullable NSString *)getTimezone;
- (NSString *)getIAMSDKVersion;
@end
NS_ASSUME_NONNULL_END
|
{
"pile_set_name": "Github"
}
|
# ------------------------------------------------------------------------------
# NOTE: THIS DOCKERFILE IS GENERATED VIA "build_latest.sh" or "update_multiarch.sh"
#
# PLEASE DO NOT EDIT IT DIRECTLY.
# ------------------------------------------------------------------------------
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM mcr.microsoft.com/windows/servercore:1909
# $ProgressPreference: https://github.com/PowerShell/PowerShell/issues/2138#issuecomment-251261324
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
ENV JAVA_VERSION jdk8u
RUN Write-Host ('Downloading https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u-2020-09-26-11-07/OpenJDK8U-jre_x64_windows_openj9_2020-09-26-11-07.msi ...'); \
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; \
wget https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u-2020-09-26-11-07/OpenJDK8U-jre_x64_windows_openj9_2020-09-26-11-07.msi -O 'openjdk.msi'; \
Write-Host ('Verifying sha256 (6da5207d7a068baf849bd7af591a915425bf5010e63652f9be45bdea3158a81f) ...'); \
if ((Get-FileHash openjdk.msi -Algorithm sha256).Hash -ne '6da5207d7a068baf849bd7af591a915425bf5010e63652f9be45bdea3158a81f') { \
Write-Host 'FAILED!'; \
exit 1; \
}; \
\
New-Item -ItemType Directory -Path C:\temp | Out-Null; \
\
Write-Host 'Installing using MSI ...'; \
Start-Process -FilePath "msiexec.exe" -ArgumentList '/i', 'openjdk.msi', '/L*V', 'C:\temp\OpenJDK.log', \
'/quiet', 'ADDLOCAL=FeatureEnvironment,FeatureJarFileRunWith,FeatureJavaHome' -Wait -Passthru; \
Remove-Item -Path C:\temp -Recurse | Out-Null; \
Write-Host 'Removing openjdk.msi ...'; \
Remove-Item openjdk.msi -Force
ENV JAVA_TOOL_OPTIONS="-XX:+IgnoreUnrecognizedVMOptions -XX:+UseContainerSupport -XX:+IdleTuningCompactOnIdle -XX:+IdleTuningGcOnIdle"
|
{
"pile_set_name": "Github"
}
|
//
// HomeRowReminderTests.swift
// DuckDuckGo
//
// Copyright © 2018 DuckDuckGo. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
import XCTest
@testable import Core
@testable import DuckDuckGo
class HomeRowReminderTests: XCTestCase {
var storage: MockHomeRowReminderStorage!
override func setUp() {
storage = MockHomeRowReminderStorage()
}
func testWhenFeatureFirstAccessedThenDateIsStored() {
let feature = HomeRowReminder(storage: storage)
_ = feature.showNow(isDefaultBrowserSupported: false)
XCTAssertNotNil(storage.firstAccessDate)
}
func testWhenTimeHasElapseAndAlreadyShownThenDontShow() {
setReminderTimeElapsed()
let feature = HomeRowReminder(storage: storage)
feature.setShown()
XCTAssertFalse(feature.showNow(isDefaultBrowserSupported: false))
}
func testWhenIsNewAndTimeHasElapsedThenShow() {
setReminderTimeElapsed()
let feature = HomeRowReminder(storage: storage)
XCTAssertTrue(feature.showNow(isDefaultBrowserSupported: false))
}
func testWhenIsNewAndTimeNotElapsedThenDontShow() {
let feature = HomeRowReminder(storage: storage)
XCTAssertFalse(feature.showNow(isDefaultBrowserSupported: false))
}
private func setReminderTimeElapsed() {
let threeAndABitDaysAgo = -(60 * 60 * 24 * HomeRowReminder.Constants.reminderTimeInDays * 1.1)
storage.firstAccessDate = Date(timeIntervalSinceNow: threeAndABitDaysAgo)
}
}
class MockHomeRowReminderStorage: HomeRowReminderStorage {
var firstAccessDate: Date?
var shown: Bool = false
}
|
{
"pile_set_name": "Github"
}
|
@REM ----------------------------------------------------------------------------
@REM Licensed to the Apache Software Foundation (ASF) under one
@REM or more contributor license agreements. See the NOTICE file
@REM distributed with this work for additional information
@REM regarding copyright ownership. The ASF licenses this file
@REM to you under the Apache License, Version 2.0 (the
@REM "License"); you may not use this file except in compliance
@REM with the License. You may obtain a copy of the License at
@REM
@REM https://www.apache.org/licenses/LICENSE-2.0
@REM
@REM Unless required by applicable law or agreed to in writing,
@REM software distributed under the License is distributed on an
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@REM KIND, either express or implied. See the License for the
@REM specific language governing permissions and limitations
@REM under the License.
@REM ----------------------------------------------------------------------------
@REM ----------------------------------------------------------------------------
@REM Maven2 Start Up Batch script
@REM
@REM Required ENV vars:
@REM JAVA_HOME - location of a JDK home dir
@REM
@REM Optional ENV vars
@REM M2_HOME - location of maven2's installed home dir
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
@REM e.g. to debug Maven itself, use
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
@REM ----------------------------------------------------------------------------
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
@echo off
@REM set title of command window
title %0
@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
@REM set %HOME% to equivalent of $HOME
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
@REM Execute a user defined script before this one
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
:skipRcPre
@setlocal
set ERROR_CODE=0
@REM To isolate internal variables from possible post scripts, we use another setlocal
@setlocal
@REM ==== START VALIDATION ====
if not "%JAVA_HOME%" == "" goto OkJHome
echo.
echo Error: JAVA_HOME not found in your environment. >&2
echo Please set the JAVA_HOME variable in your environment to match the >&2
echo location of your Java installation. >&2
echo.
goto error
:OkJHome
if exist "%JAVA_HOME%\bin\java.exe" goto init
echo.
echo Error: JAVA_HOME is set to an invalid directory. >&2
echo JAVA_HOME = "%JAVA_HOME%" >&2
echo Please set the JAVA_HOME variable in your environment to match the >&2
echo location of your Java installation. >&2
echo.
goto error
@REM ==== END VALIDATION ====
:init
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
@REM Fallback to current working directory if not found.
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
set EXEC_DIR=%CD%
set WDIR=%EXEC_DIR%
:findBaseDir
IF EXIST "%WDIR%"\.mvn goto baseDirFound
cd ..
IF "%WDIR%"=="%CD%" goto baseDirNotFound
set WDIR=%CD%
goto findBaseDir
:baseDirFound
set MAVEN_PROJECTBASEDIR=%WDIR%
cd "%EXEC_DIR%"
goto endDetectBaseDir
:baseDirNotFound
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
cd "%EXEC_DIR%"
:endDetectBaseDir
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
@setlocal EnableExtensions EnableDelayedExpansion
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
:endReadAdditionalConfig
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
)
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
if exist %WRAPPER_JAR% (
echo Found %WRAPPER_JAR%
) else (
echo Couldn't find %WRAPPER_JAR%, downloading it ...
echo Downloading from: %DOWNLOAD_URL%
powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
echo Finished downloading %WRAPPER_JAR%
)
@REM End of extension
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
if ERRORLEVEL 1 goto error
goto end
:error
set ERROR_CODE=1
:end
@endlocal & set ERROR_CODE=%ERROR_CODE%
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
@REM check for post script, once with legacy .bat ending and once with .cmd ending
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
:skipRcPost
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
if "%MAVEN_BATCH_PAUSE%" == "on" pause
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
exit /B %ERROR_CODE%
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.setupwizardlib.view;
import android.annotation.TargetApi;
import android.content.Context;
import android.os.Build.VERSION_CODES;
import android.util.AttributeSet;
import android.widget.Checkable;
import android.widget.LinearLayout;
import androidx.annotation.Nullable;
/**
* A LinearLayout which is checkable. This will set the checked state when
* {@link #onCreateDrawableState(int)} is called, and can be used with
* {@code android:duplicateParentState} to propagate the drawable state to child views.
*/
public class CheckableLinearLayout extends LinearLayout implements Checkable {
private boolean mChecked = false;
public CheckableLinearLayout(Context context) {
super(context);
}
public CheckableLinearLayout(Context context, @Nullable AttributeSet attrs) {
super(context, attrs);
}
@TargetApi(VERSION_CODES.HONEYCOMB)
public CheckableLinearLayout(
Context context,
@Nullable AttributeSet attrs,
int defStyleAttr) {
super(context, attrs, defStyleAttr);
}
@TargetApi(VERSION_CODES.LOLLIPOP)
public CheckableLinearLayout(
Context context,
AttributeSet attrs,
int defStyleAttr,
int defStyleRes) {
super(context, attrs, defStyleAttr, defStyleRes);
}
{
setFocusable(true);
}
@Override
protected int[] onCreateDrawableState(int extraSpace) {
if (mChecked) {
final int[] superStates = super.onCreateDrawableState(extraSpace + 1);
final int[] checked = new int[] { android.R.attr.state_checked };
return mergeDrawableStates(superStates, checked);
} else {
return super.onCreateDrawableState(extraSpace);
}
}
@Override
public void setChecked(boolean checked) {
mChecked = checked;
refreshDrawableState();
}
@Override
public boolean isChecked() {
return mChecked;
}
@Override
public void toggle() {
setChecked(!isChecked());
}
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2017, 2018 Ankyra
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
. "gopkg.in/check.v1"
)
func (s *suite) Test_GetDeploymentStateDAG_empty_env(c *C) {
prj, _ := NewProjectState("my-project")
env, err := prj.GetEnvironmentStateOrMakeNew("my-env")
c.Assert(err, IsNil)
dag, err := env.GetDeploymentStateDAG(BuildStage)
c.Assert(err, IsNil)
c.Assert(dag, HasLen, 0)
}
func (s *suite) Test_GetDeploymentStateDAG_one_deployment(c *C) {
stage := DeployStage
prj, _ := NewProjectState("my-project")
env, err := prj.GetEnvironmentStateOrMakeNew("my-env")
c.Assert(err, IsNil)
depl1, err := env.GetOrCreateDeploymentState("depl1")
c.Assert(err, IsNil)
depl1.GetStageOrCreateNew(stage)
dag, err := env.GetDeploymentStateDAG(stage)
c.Assert(err, IsNil)
c.Assert(dag, HasLen, 1)
c.Assert(dag[0].Node, DeepEquals, depl1)
c.Assert(dag[0].AndThen, HasLen, 0)
}
func (s *suite) Test_GetDeploymentStateDAG_two_deployments_one_provider(c *C) {
stage := DeployStage
prj, _ := NewProjectState("my-project")
env, err := prj.GetEnvironmentStateOrMakeNew("my-env")
c.Assert(err, IsNil)
depl1, err := env.GetOrCreateDeploymentState("depl1")
c.Assert(err, IsNil)
depl2, err := env.GetOrCreateDeploymentState("depl2")
c.Assert(err, IsNil)
st := depl1.GetStageOrCreateNew(stage)
st.Providers["whatever"] = "depl2"
depl2.GetStageOrCreateNew(stage)
dag, err := env.GetDeploymentStateDAG(stage)
c.Assert(err, IsNil)
c.Assert(dag, HasLen, 1)
c.Assert(dag[0].Node, DeepEquals, depl2)
c.Assert(dag[0].AndThen, HasLen, 1)
c.Assert(dag[0].AndThen[0].Node, DeepEquals, depl1)
c.Assert(dag[0].AndThen[0].AndThen, HasLen, 0)
tsort, err := env.GetDeploymentStateTopologicalSort(stage)
c.Assert(err, IsNil)
c.Assert(tsort, HasLen, 2)
c.Assert(tsort[0], DeepEquals, depl2)
c.Assert(tsort[1], DeepEquals, depl1)
}
func (s *suite) Test_GetDeploymentStateDAG(c *C) {
// For deployment graph:
//
// A -> B, E
// B -> C, D
// C -> D
// D
// E
stage := DeployStage
prj, _ := NewProjectState("my-project")
env, err := prj.GetEnvironmentStateOrMakeNew("my-env")
c.Assert(err, IsNil)
deplA, err := env.GetOrCreateDeploymentState("deplA")
c.Assert(err, IsNil)
deplB, err := env.GetOrCreateDeploymentState("deplB")
c.Assert(err, IsNil)
deplC, err := env.GetOrCreateDeploymentState("deplC")
c.Assert(err, IsNil)
deplD, err := env.GetOrCreateDeploymentState("deplD")
c.Assert(err, IsNil)
deplE, err := env.GetOrCreateDeploymentState("deplE")
c.Assert(err, IsNil)
stA := deplA.GetStageOrCreateNew(stage)
stA.Providers["b"] = "deplB"
stA.Providers["e"] = "deplE"
stB := deplB.GetStageOrCreateNew(stage)
stB.Providers["c"] = "deplC"
stB.Providers["d"] = "deplD"
stC := deplC.GetStageOrCreateNew(stage)
stC.Providers["d"] = "deplD"
deplD.GetStageOrCreateNew(stage)
deplE.GetStageOrCreateNew(stage)
dag, err := env.GetDeploymentStateDAG(stage)
c.Assert(err, IsNil)
c.Assert(dag, HasLen, 2)
var bDag, cDag, dDag, eDag *DAGNode
if dag[0].Node.Name == "deplD" {
dDag = dag[0]
eDag = dag[1]
} else {
dDag = dag[1]
eDag = dag[0]
}
c.Assert(dDag.Node, DeepEquals, deplD)
c.Assert(dDag.AndThen, HasLen, 2)
if dDag.AndThen[0].Node.Name == "deplB" {
bDag = dDag.AndThen[0]
cDag = dDag.AndThen[1]
} else {
bDag = dDag.AndThen[1]
cDag = dDag.AndThen[0]
}
c.Assert(bDag.Node, DeepEquals, deplB)
c.Assert(bDag.AndThen, HasLen, 1)
c.Assert(bDag.AndThen[0].Node, DeepEquals, deplA)
c.Assert(cDag.Node, DeepEquals, deplC)
c.Assert(cDag.AndThen, HasLen, 1)
c.Assert(cDag.AndThen[0].Node, DeepEquals, deplB)
c.Assert(eDag.Node, DeepEquals, deplE)
c.Assert(eDag.AndThen, HasLen, 1)
c.Assert(eDag.AndThen[0].Node, DeepEquals, deplA)
i := 0
for i < 1000 {
tsort, err := env.GetDeploymentStateTopologicalSort(stage)
c.Assert(err, IsNil)
for ix, depl := range tsort {
st := depl.GetStageOrCreateNew(stage)
for _, deplName := range st.Providers {
found := false
for depIx, depDepl := range tsort {
if depDepl.Name == deplName {
found = true
c.Assert(depIx < ix, Equals, true, Commentf("Deployment '%s' should happen before '%s'", deplName, depl.Name))
}
}
c.Assert(found, Equals, true, Commentf("Missing deployment '%s' in topological sort", depl.Name))
}
}
i += 1
}
}
type hasItemChecker struct{}
var HasItem = &hasItemChecker{}
func (*hasItemChecker) Info() *CheckerInfo {
return &CheckerInfo{Name: "HasItem", Params: []string{"obtained", "expected to have item"}}
}
func (*hasItemChecker) Check(params []interface{}, names []string) (bool, string) {
obtained := params[0]
expectedItem := params[1]
switch obtained.(type) {
case []interface{}:
for _, v := range obtained.([]interface{}) {
if v == expectedItem {
return true, ""
}
}
case []string:
for _, v := range obtained.([]string) {
if v == expectedItem {
return true, ""
}
}
default:
return false, "Unexpected type."
}
return false, "Item not found"
}
|
{
"pile_set_name": "Github"
}
|
package jetbrains.mps.lang.plugin.standalone.editor;
/*Generated by MPS */
import jetbrains.mps.editor.runtime.descriptor.AbstractEditorBuilder;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.mps.openapi.model.SNode;
import jetbrains.mps.openapi.editor.EditorContext;
import jetbrains.mps.openapi.editor.cells.EditorCell;
import jetbrains.mps.nodeEditor.cells.EditorCell_Collection;
import jetbrains.mps.nodeEditor.cellLayout.CellLayout_Vertical;
import jetbrains.mps.openapi.editor.style.Style;
import jetbrains.mps.editor.runtime.style.StyleImpl;
import jetbrains.mps.editor.runtime.style.StyleAttributes;
import jetbrains.mps.nodeEditor.BlockCells;
import jetbrains.mps.nodeEditor.cellLayout.CellLayout_Horizontal;
import jetbrains.mps.nodeEditor.cells.EditorCell_Constant;
import org.jetbrains.mps.openapi.language.SProperty;
import jetbrains.mps.openapi.editor.menus.transformation.SPropertyInfo;
import jetbrains.mps.nodeEditor.cells.EditorCell_Property;
import jetbrains.mps.nodeEditor.cells.SPropertyAccessor;
import jetbrains.mps.nodeEditor.cellMenu.SPropertySubstituteInfo;
import jetbrains.mps.lang.smodel.generator.smodelAdapter.SNodeOperations;
import jetbrains.mps.lang.smodel.generator.smodelAdapter.IAttributeDescriptor;
import jetbrains.mps.internal.collections.runtime.Sequence;
import jetbrains.mps.internal.collections.runtime.IWhereFilter;
import java.util.Objects;
import jetbrains.mps.lang.core.behavior.PropertyAttribute__BehaviorDescriptor;
import jetbrains.mps.nodeEditor.EditorManager;
import jetbrains.mps.openapi.editor.update.AttributeKind;
import jetbrains.mps.nodeEditor.MPSFonts;
import jetbrains.mps.nodeEditor.cells.EditorCell_Indent;
import jetbrains.mps.nodeEditor.cellProviders.AbstractCellListHandler;
import jetbrains.mps.lang.editor.cellProviders.RefNodeListHandler;
import org.jetbrains.mps.openapi.language.SContainmentLink;
import org.jetbrains.mps.openapi.language.SAbstractConcept;
import jetbrains.mps.openapi.editor.menus.transformation.SNodeLocation;
import jetbrains.mps.openapi.editor.cells.DefaultSubstituteInfo;
import jetbrains.mps.nodeEditor.cellMenu.SEmptyContainmentSubstituteInfo;
import jetbrains.mps.nodeEditor.cellMenu.SChildSubstituteInfo;
import jetbrains.mps.openapi.editor.cells.CellActionType;
import jetbrains.mps.nodeEditor.cellActions.CellAction_DeleteNode;
import jetbrains.mps.lang.editor.cellProviders.SingleRoleCellProvider;
import jetbrains.mps.editor.runtime.impl.cellActions.CellAction_DeleteSmart;
import jetbrains.mps.smodel.adapter.structure.MetaAdapterFactory;
import org.jetbrains.mps.openapi.language.SConcept;
/*package*/ class ApplicationPluginDeclaration_EditorBuilder_a extends AbstractEditorBuilder {
@NotNull
private SNode myNode;
public ApplicationPluginDeclaration_EditorBuilder_a(@NotNull EditorContext context, @NotNull SNode node) {
super(context);
myNode = node;
}
@NotNull
@Override
public SNode getNode() {
return myNode;
}
/*package*/ EditorCell createCell() {
return createCollection_0();
}
private EditorCell createCollection_0() {
EditorCell_Collection editorCell = new EditorCell_Collection(getEditorContext(), myNode, new CellLayout_Vertical());
editorCell.setCellId("Collection_n7kiqy_a");
editorCell.setBig(true);
setCellContext(editorCell);
Style style = new StyleImpl();
style.set(StyleAttributes.SELECTABLE, false);
editorCell.getStyle().putAll(style);
editorCell.addEditorCell(createCollection_1());
editorCell.addEditorCell(createCollection_3());
if (nodeCondition_n7kiqy_a2a()) {
editorCell.addEditorCell(createConstant_4());
}
return editorCell;
}
private boolean nodeCondition_n7kiqy_a2a() {
return BlockCells.useBraces();
}
private EditorCell createCollection_1() {
EditorCell_Collection editorCell = new EditorCell_Collection(getEditorContext(), myNode, new CellLayout_Horizontal());
editorCell.setCellId("Collection_n7kiqy_a0");
Style style = new StyleImpl();
style.set(StyleAttributes.SELECTABLE, false);
editorCell.getStyle().putAll(style);
editorCell.addEditorCell(createCollection_2());
if (nodeCondition_n7kiqy_a1a0()) {
editorCell.addEditorCell(createConstant_1());
}
return editorCell;
}
private boolean nodeCondition_n7kiqy_a1a0() {
return BlockCells.useBraces();
}
private EditorCell createCollection_2() {
EditorCell_Collection editorCell = new EditorCell_Collection(getEditorContext(), myNode, new CellLayout_Horizontal());
editorCell.setCellId("Collection_n7kiqy_a0a");
Style style = new StyleImpl();
style.set(StyleAttributes.SELECTABLE, false);
editorCell.getStyle().putAll(style);
editorCell.addEditorCell(createConstant_0());
editorCell.addEditorCell(createProperty_0());
return editorCell;
}
private EditorCell createConstant_0() {
EditorCell_Constant editorCell = new EditorCell_Constant(getEditorContext(), myNode, "application plugin");
editorCell.setCellId("Constant_n7kiqy_a0a0");
editorCell.setDefaultText("");
return editorCell;
}
private EditorCell createProperty_0() {
getCellFactory().pushCellContext();
try {
final SProperty property = PROPS.name$MnvL;
getCellFactory().setPropertyInfo(new SPropertyInfo(myNode, property));
EditorCell_Property editorCell = EditorCell_Property.create(getEditorContext(), new SPropertyAccessor(myNode, property, false, false), myNode);
editorCell.setDefaultText("<name>");
editorCell.setCellId("property_name");
editorCell.setSubstituteInfo(new SPropertySubstituteInfo(editorCell, property));
setCellContext(editorCell);
Iterable<SNode> propertyAttributes = SNodeOperations.ofConcept(new IAttributeDescriptor.AllAttributes().list(myNode), CONCEPTS.PropertyAttribute$Gb);
Iterable<SNode> currentPropertyAttributes = Sequence.fromIterable(propertyAttributes).where(new IWhereFilter<SNode>() {
public boolean accept(SNode it) {
return Objects.equals(PropertyAttribute__BehaviorDescriptor.getProperty_id1avfQ4BBzOo.invoke(it), property);
}
});
if (Sequence.fromIterable(currentPropertyAttributes).isNotEmpty()) {
EditorManager manager = EditorManager.getInstanceFromContext(getEditorContext());
return manager.createNodeRoleAttributeCell(Sequence.fromIterable(currentPropertyAttributes).first(), AttributeKind.PROPERTY, editorCell);
} else
return editorCell;
} finally {
getCellFactory().popCellContext();
}
}
private EditorCell createConstant_1() {
EditorCell_Constant editorCell = new EditorCell_Constant(getEditorContext(), myNode, "{");
editorCell.setCellId("Constant_n7kiqy_b0a");
Style style = new StyleImpl();
style.set(StyleAttributes.MATCHING_LABEL, "brace");
style.set(StyleAttributes.INDENT_LAYOUT_NO_WRAP, true);
style.set(StyleAttributes.FONT_STYLE, MPSFonts.PLAIN);
editorCell.getStyle().putAll(style);
editorCell.setDefaultText("");
return editorCell;
}
private EditorCell createCollection_3() {
EditorCell_Collection editorCell = new EditorCell_Collection(getEditorContext(), myNode, new CellLayout_Horizontal());
editorCell.setCellId("Collection_n7kiqy_b0");
Style style = new StyleImpl();
style.set(StyleAttributes.SELECTABLE, false);
editorCell.getStyle().putAll(style);
editorCell.addEditorCell(createIndentCell_0());
editorCell.addEditorCell(createCollection_4());
return editorCell;
}
private EditorCell createIndentCell_0() {
EditorCell_Indent editorCell = new EditorCell_Indent(getEditorContext(), myNode);
return editorCell;
}
private EditorCell createCollection_4() {
EditorCell_Collection editorCell = new EditorCell_Collection(getEditorContext(), myNode, new CellLayout_Vertical());
editorCell.setCellId("Collection_n7kiqy_b1a");
editorCell.addEditorCell(createRefNodeList_0());
editorCell.addEditorCell(createConstant_2());
editorCell.addEditorCell(createRefNode_0());
editorCell.addEditorCell(createConstant_3());
editorCell.addEditorCell(createRefNode_1());
return editorCell;
}
private EditorCell createRefNodeList_0() {
AbstractCellListHandler handler = new fieldDeclarationListHandler_n7kiqy_a1b0(myNode, getEditorContext());
EditorCell_Collection editorCell = handler.createCells(new CellLayout_Vertical(), false);
editorCell.setCellId("refNodeList_fieldDeclaration");
editorCell.setSRole(handler.getElementSRole());
return editorCell;
}
private static class fieldDeclarationListHandler_n7kiqy_a1b0 extends RefNodeListHandler {
@NotNull
private SNode myNode;
public fieldDeclarationListHandler_n7kiqy_a1b0(SNode ownerNode, EditorContext context) {
super(context, false);
myNode = ownerNode;
}
@NotNull
public SNode getNode() {
return myNode;
}
public SContainmentLink getSLink() {
return LINKS.fieldDeclaration$HYka;
}
public SAbstractConcept getChildSConcept() {
return CONCEPTS.DefaultClassifierFieldDeclaration$Hv;
}
public EditorCell createNodeCell(SNode elementNode) {
EditorCell elementCell = getUpdateSession().updateChildNodeCell(elementNode);
installElementCellActions(elementNode, elementCell, false);
return elementCell;
}
public EditorCell createEmptyCell() {
getCellFactory().pushCellContext();
getCellFactory().setNodeLocation(new SNodeLocation.FromParentAndLink(fieldDeclarationListHandler_n7kiqy_a1b0.this.getNode(), LINKS.fieldDeclaration$HYka));
try {
EditorCell emptyCell = null;
emptyCell = super.createEmptyCell();
installElementCellActions(null, emptyCell, true);
setCellContext(emptyCell);
return emptyCell;
} finally {
getCellFactory().popCellContext();
}
}
private static final Object OBJ = new Object();
public void installElementCellActions(SNode elementNode, EditorCell elementCell, boolean isEmptyCell) {
if (elementCell.getUserObject(AbstractCellListHandler.ELEMENT_CELL_COMPLETE_SET) == null) {
if (elementCell.getSubstituteInfo() == null || elementCell.getSubstituteInfo() instanceof DefaultSubstituteInfo) {
elementCell.putUserObject(AbstractCellListHandler.ELEMENT_CELL_COMPLETE_SET, OBJ);
elementCell.setSubstituteInfo((isEmptyCell ? new SEmptyContainmentSubstituteInfo(elementCell) : new SChildSubstituteInfo(elementCell)));
}
}
if (elementCell.getUserObject(AbstractCellListHandler.ELEMENT_CELL_DELETE_SET) == null) {
if (elementNode != null) {
elementCell.putUserObject(AbstractCellListHandler.ELEMENT_CELL_DELETE_SET, OBJ);
elementCell.setAction(CellActionType.DELETE, new CellAction_DeleteNode(elementNode, CellAction_DeleteNode.DeleteDirection.FORWARD));
}
}
if (elementCell.getUserObject(ELEMENT_CELL_BACKSPACE_SET) == null) {
if (elementNode != null) {
elementCell.putUserObject(ELEMENT_CELL_BACKSPACE_SET, OBJ);
elementCell.setAction(CellActionType.BACKSPACE, new CellAction_DeleteNode(elementNode, CellAction_DeleteNode.DeleteDirection.BACKWARD));
}
}
if (elementCell.getUserObject(AbstractCellListHandler.ELEMENT_CELL_ACTIONS_SET) == null) {
if (elementNode != null) {
elementCell.putUserObject(AbstractCellListHandler.ELEMENT_CELL_ACTIONS_SET, OBJ);
}
}
}
}
private EditorCell createConstant_2() {
EditorCell_Constant editorCell = new EditorCell_Constant(getEditorContext(), myNode, "");
editorCell.setCellId("Constant_n7kiqy_b1b0");
Style style = new StyleImpl();
style.set(StyleAttributes.SELECTABLE, false);
editorCell.getStyle().putAll(style);
editorCell.setDefaultText("");
return editorCell;
}
private EditorCell createRefNode_0() {
SingleRoleCellProvider provider = new initBlockSingleRoleHandler_n7kiqy_c1b0(myNode, LINKS.initBlock$HXQ8, getEditorContext());
return provider.createCell();
}
private static class initBlockSingleRoleHandler_n7kiqy_c1b0 extends SingleRoleCellProvider {
@NotNull
private SNode myNode;
public initBlockSingleRoleHandler_n7kiqy_c1b0(SNode ownerNode, SContainmentLink containmentLink, EditorContext context) {
super(containmentLink, context);
myNode = ownerNode;
}
@Override
@NotNull
public SNode getNode() {
return myNode;
}
protected EditorCell createChildCell(SNode child) {
EditorCell editorCell = getUpdateSession().updateChildNodeCell(child);
editorCell.setAction(CellActionType.DELETE, new CellAction_DeleteSmart(getNode(), LINKS.initBlock$HXQ8, child));
editorCell.setAction(CellActionType.BACKSPACE, new CellAction_DeleteSmart(getNode(), LINKS.initBlock$HXQ8, child));
installCellInfo(child, editorCell, false);
return editorCell;
}
private void installCellInfo(SNode child, EditorCell editorCell, boolean isEmpty) {
if (editorCell.getSubstituteInfo() == null || editorCell.getSubstituteInfo() instanceof DefaultSubstituteInfo) {
editorCell.setSubstituteInfo((isEmpty ? new SEmptyContainmentSubstituteInfo(editorCell) : new SChildSubstituteInfo(editorCell)));
}
if (editorCell.getSRole() == null) {
editorCell.setSRole(LINKS.initBlock$HXQ8);
}
}
@Override
protected EditorCell createEmptyCell() {
getCellFactory().pushCellContext();
getCellFactory().setNodeLocation(new SNodeLocation.FromParentAndLink(getNode(), LINKS.initBlock$HXQ8));
try {
EditorCell editorCell = super.createEmptyCell();
editorCell.setCellId("empty_initBlock");
installCellInfo(null, editorCell, true);
setCellContext(editorCell);
return editorCell;
} finally {
getCellFactory().popCellContext();
}
}
protected String getNoTargetText() {
return "<init block>";
}
}
private EditorCell createConstant_3() {
EditorCell_Constant editorCell = new EditorCell_Constant(getEditorContext(), myNode, "");
editorCell.setCellId("Constant_n7kiqy_d1b0");
Style style = new StyleImpl();
style.set(StyleAttributes.SELECTABLE, false);
editorCell.getStyle().putAll(style);
editorCell.setDefaultText("");
return editorCell;
}
private EditorCell createRefNode_1() {
SingleRoleCellProvider provider = new disposeBlockSingleRoleHandler_n7kiqy_e1b0(myNode, LINKS.disposeBlock$HY59, getEditorContext());
return provider.createCell();
}
private static class disposeBlockSingleRoleHandler_n7kiqy_e1b0 extends SingleRoleCellProvider {
@NotNull
private SNode myNode;
public disposeBlockSingleRoleHandler_n7kiqy_e1b0(SNode ownerNode, SContainmentLink containmentLink, EditorContext context) {
super(containmentLink, context);
myNode = ownerNode;
}
@Override
@NotNull
public SNode getNode() {
return myNode;
}
protected EditorCell createChildCell(SNode child) {
EditorCell editorCell = getUpdateSession().updateChildNodeCell(child);
editorCell.setAction(CellActionType.DELETE, new CellAction_DeleteSmart(getNode(), LINKS.disposeBlock$HY59, child));
editorCell.setAction(CellActionType.BACKSPACE, new CellAction_DeleteSmart(getNode(), LINKS.disposeBlock$HY59, child));
installCellInfo(child, editorCell, false);
return editorCell;
}
private void installCellInfo(SNode child, EditorCell editorCell, boolean isEmpty) {
if (editorCell.getSubstituteInfo() == null || editorCell.getSubstituteInfo() instanceof DefaultSubstituteInfo) {
editorCell.setSubstituteInfo((isEmpty ? new SEmptyContainmentSubstituteInfo(editorCell) : new SChildSubstituteInfo(editorCell)));
}
if (editorCell.getSRole() == null) {
editorCell.setSRole(LINKS.disposeBlock$HY59);
}
}
@Override
protected EditorCell createEmptyCell() {
getCellFactory().pushCellContext();
getCellFactory().setNodeLocation(new SNodeLocation.FromParentAndLink(getNode(), LINKS.disposeBlock$HY59));
try {
EditorCell editorCell = super.createEmptyCell();
editorCell.setCellId("empty_disposeBlock");
installCellInfo(null, editorCell, true);
setCellContext(editorCell);
return editorCell;
} finally {
getCellFactory().popCellContext();
}
}
protected String getNoTargetText() {
return "<dispose block>";
}
}
private EditorCell createConstant_4() {
EditorCell_Constant editorCell = new EditorCell_Constant(getEditorContext(), myNode, "}");
editorCell.setCellId("Constant_n7kiqy_c0");
Style style = new StyleImpl();
style.set(StyleAttributes.MATCHING_LABEL, "brace");
style.set(StyleAttributes.INDENT_LAYOUT_NO_WRAP, true);
style.set(StyleAttributes.FONT_STYLE, MPSFonts.PLAIN);
editorCell.getStyle().putAll(style);
editorCell.setDefaultText("");
return editorCell;
}
private static final class PROPS {
/*package*/ static final SProperty name$MnvL = MetaAdapterFactory.getProperty(0xceab519525ea4f22L, 0x9b92103b95ca8c0cL, 0x110396eaaa4L, 0x110396ec041L, "name");
}
private static final class CONCEPTS {
/*package*/ static final SConcept PropertyAttribute$Gb = MetaAdapterFactory.getConcept(0xceab519525ea4f22L, 0x9b92103b95ca8c0cL, 0x2eb1ad060897da56L, "jetbrains.mps.lang.core.structure.PropertyAttribute");
/*package*/ static final SConcept DefaultClassifierFieldDeclaration$Hv = MetaAdapterFactory.getConcept(0x443f4c36fcf54eb6L, 0x95008d06ed259e3eL, 0x11aa7fc0293L, "jetbrains.mps.baseLanguage.classifiers.structure.DefaultClassifierFieldDeclaration");
}
private static final class LINKS {
/*package*/ static final SContainmentLink fieldDeclaration$HYka = MetaAdapterFactory.getContainmentLink(0xef7bf5acd06c4342L, 0xb11de42104eb9343L, 0x6b059b0986f2058L, 0x6b059b0986f205cL, "fieldDeclaration");
/*package*/ static final SContainmentLink initBlock$HXQ8 = MetaAdapterFactory.getContainmentLink(0xef7bf5acd06c4342L, 0xb11de42104eb9343L, 0x6b059b0986f2058L, 0x6b059b0986f205aL, "initBlock");
/*package*/ static final SContainmentLink disposeBlock$HY59 = MetaAdapterFactory.getContainmentLink(0xef7bf5acd06c4342L, 0xb11de42104eb9343L, 0x6b059b0986f2058L, 0x6b059b0986f205bL, "disposeBlock");
}
}
|
{
"pile_set_name": "Github"
}
|
//
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#include "pxr/pxr.h"
#include "pxr/usd/pcp/layerPrefetchRequest.h"
#include "pxr/usd/pcp/layerStackRegistry.h"
#include "pxr/usd/sdf/layerUtils.h"
#include "pxr/base/work/arenaDispatcher.h"
#include "pxr/base/work/threadLimits.h"
#include <tbb/spin_mutex.h>
PXR_NAMESPACE_OPEN_SCOPE
namespace {
struct _Opener
{
explicit _Opener(const Pcp_MutedLayers& mutedLayers,
std::set<SdfLayerRefPtr> *retainedLayers)
: _mutedLayers(mutedLayers)
, _retainedLayers(retainedLayers) {}
~_Opener() { _dispatcher.Wait(); }
void OpenSublayers(const SdfLayerRefPtr &layer,
const SdfLayer::FileFormatArguments &layerArgs) {
TF_FOR_ALL(path, layer->GetSubLayerPaths()) {
_dispatcher.Run(
&_Opener::_OpenSublayer, this, *path, layer, layerArgs);
}
}
private:
void _OpenSublayer(std::string path,
const SdfLayerRefPtr &anchorLayer,
const SdfLayer::FileFormatArguments &layerArgs) {
if (_mutedLayers.IsLayerMuted(anchorLayer, path)) {
return;
}
// Open this specific sublayer path.
// The call to SdfFindOrOpenRelativeToLayer() may take some time,
// potentially multiple seconds.
if (SdfLayerRefPtr sublayer =
SdfFindOrOpenRelativeToLayer(anchorLayer, &path, layerArgs)) {
// Retain this sublayer.
bool didInsert;
{
tbb::spin_mutex::scoped_lock lock(_retainedLayersMutex);
didInsert = _retainedLayers->insert(sublayer).second;
}
// Open the nested sublayers. Only do this if we haven't seen this
// layer before, i.e. didInsert is true.
if (didInsert)
OpenSublayers(sublayer, layerArgs);
}
}
WorkArenaDispatcher _dispatcher;
const Pcp_MutedLayers& _mutedLayers;
std::set<SdfLayerRefPtr> *_retainedLayers;
mutable tbb::spin_mutex _retainedLayersMutex;
};
} // anon
void
PcpLayerPrefetchRequest::RequestSublayerStack(
const SdfLayerRefPtr &layer,
const SdfLayer::FileFormatArguments &args)
{
_sublayerRequests.insert(std::make_pair(layer, args));
}
void
PcpLayerPrefetchRequest::Run(const Pcp_MutedLayers& mutedLayers)
{
if (WorkGetConcurrencyLimit() <= 1) {
// Do not bother pre-fetching if we do not have extra threads
// available.
return;
}
// Release the GIL so we don't deadlock when Sd tries to get a path
// resolver (which does ref-counting on the resolver, which requires
// the GIL to manage TfRefBase identity-uniqueness).
TF_PY_ALLOW_THREADS_IN_SCOPE();
std::set<_Request> requests;
requests.swap(_sublayerRequests);
// Open all the sublayers in the request.
_Opener opener(mutedLayers, &_retainedLayers);
TF_FOR_ALL(req, requests)
opener.OpenSublayers(req->first, req->second);
}
PXR_NAMESPACE_CLOSE_SCOPE
|
{
"pile_set_name": "Github"
}
|
// mkerrors.sh -Wall -Werror -static -I/tmp/include -m64
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,linux
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go
package unix
import "syscall"
const (
AF_ALG = 0x26
AF_APPLETALK = 0x5
AF_ASH = 0x12
AF_ATMPVC = 0x8
AF_ATMSVC = 0x14
AF_AX25 = 0x3
AF_BLUETOOTH = 0x1f
AF_BRIDGE = 0x7
AF_CAIF = 0x25
AF_CAN = 0x1d
AF_DECnet = 0xc
AF_ECONET = 0x13
AF_FILE = 0x1
AF_IB = 0x1b
AF_IEEE802154 = 0x24
AF_INET = 0x2
AF_INET6 = 0xa
AF_IPX = 0x4
AF_IRDA = 0x17
AF_ISDN = 0x22
AF_IUCV = 0x20
AF_KCM = 0x29
AF_KEY = 0xf
AF_LLC = 0x1a
AF_LOCAL = 0x1
AF_MAX = 0x2c
AF_MPLS = 0x1c
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
AF_NETROM = 0x6
AF_NFC = 0x27
AF_PACKET = 0x11
AF_PHONET = 0x23
AF_PPPOX = 0x18
AF_QIPCRTR = 0x2a
AF_RDS = 0x15
AF_ROSE = 0xb
AF_ROUTE = 0x10
AF_RXRPC = 0x21
AF_SECURITY = 0xe
AF_SMC = 0x2b
AF_SNA = 0x16
AF_TIPC = 0x1e
AF_UNIX = 0x1
AF_UNSPEC = 0x0
AF_VSOCK = 0x28
AF_WANPIPE = 0x19
AF_X25 = 0x9
ALG_OP_DECRYPT = 0x0
ALG_OP_ENCRYPT = 0x1
ALG_SET_AEAD_ASSOCLEN = 0x4
ALG_SET_AEAD_AUTHSIZE = 0x5
ALG_SET_IV = 0x2
ALG_SET_KEY = 0x1
ALG_SET_OP = 0x3
ARPHRD_6LOWPAN = 0x339
ARPHRD_ADAPT = 0x108
ARPHRD_APPLETLK = 0x8
ARPHRD_ARCNET = 0x7
ARPHRD_ASH = 0x30d
ARPHRD_ATM = 0x13
ARPHRD_AX25 = 0x3
ARPHRD_BIF = 0x307
ARPHRD_CAIF = 0x336
ARPHRD_CAN = 0x118
ARPHRD_CHAOS = 0x5
ARPHRD_CISCO = 0x201
ARPHRD_CSLIP = 0x101
ARPHRD_CSLIP6 = 0x103
ARPHRD_DDCMP = 0x205
ARPHRD_DLCI = 0xf
ARPHRD_ECONET = 0x30e
ARPHRD_EETHER = 0x2
ARPHRD_ETHER = 0x1
ARPHRD_EUI64 = 0x1b
ARPHRD_FCAL = 0x311
ARPHRD_FCFABRIC = 0x313
ARPHRD_FCPL = 0x312
ARPHRD_FCPP = 0x310
ARPHRD_FDDI = 0x306
ARPHRD_FRAD = 0x302
ARPHRD_HDLC = 0x201
ARPHRD_HIPPI = 0x30c
ARPHRD_HWX25 = 0x110
ARPHRD_IEEE1394 = 0x18
ARPHRD_IEEE802 = 0x6
ARPHRD_IEEE80211 = 0x321
ARPHRD_IEEE80211_PRISM = 0x322
ARPHRD_IEEE80211_RADIOTAP = 0x323
ARPHRD_IEEE802154 = 0x324
ARPHRD_IEEE802154_MONITOR = 0x325
ARPHRD_IEEE802_TR = 0x320
ARPHRD_INFINIBAND = 0x20
ARPHRD_IP6GRE = 0x337
ARPHRD_IPDDP = 0x309
ARPHRD_IPGRE = 0x30a
ARPHRD_IRDA = 0x30f
ARPHRD_LAPB = 0x204
ARPHRD_LOCALTLK = 0x305
ARPHRD_LOOPBACK = 0x304
ARPHRD_METRICOM = 0x17
ARPHRD_NETLINK = 0x338
ARPHRD_NETROM = 0x0
ARPHRD_NONE = 0xfffe
ARPHRD_PHONET = 0x334
ARPHRD_PHONET_PIPE = 0x335
ARPHRD_PIMREG = 0x30b
ARPHRD_PPP = 0x200
ARPHRD_PRONET = 0x4
ARPHRD_RAWHDLC = 0x206
ARPHRD_ROSE = 0x10e
ARPHRD_RSRVD = 0x104
ARPHRD_SIT = 0x308
ARPHRD_SKIP = 0x303
ARPHRD_SLIP = 0x100
ARPHRD_SLIP6 = 0x102
ARPHRD_TUNNEL = 0x300
ARPHRD_TUNNEL6 = 0x301
ARPHRD_VOID = 0xffff
ARPHRD_VSOCKMON = 0x33a
ARPHRD_X25 = 0x10f
B0 = 0x0
B1000000 = 0x1008
B110 = 0x3
B115200 = 0x1002
B1152000 = 0x1009
B1200 = 0x9
B134 = 0x4
B150 = 0x5
B1500000 = 0x100a
B1800 = 0xa
B19200 = 0xe
B200 = 0x6
B2000000 = 0x100b
B230400 = 0x1003
B2400 = 0xb
B2500000 = 0x100c
B300 = 0x7
B3000000 = 0x100d
B3500000 = 0x100e
B38400 = 0xf
B4000000 = 0x100f
B460800 = 0x1004
B4800 = 0xc
B50 = 0x1
B500000 = 0x1005
B57600 = 0x1001
B576000 = 0x1006
B600 = 0x8
B75 = 0x2
B921600 = 0x1007
B9600 = 0xd
BLKBSZGET = 0x80081270
BLKBSZSET = 0x40081271
BLKFLSBUF = 0x1261
BLKFRAGET = 0x1265
BLKFRASET = 0x1264
BLKGETSIZE = 0x1260
BLKGETSIZE64 = 0x80081272
BLKPBSZGET = 0x127b
BLKRAGET = 0x1263
BLKRASET = 0x1262
BLKROGET = 0x125e
BLKROSET = 0x125d
BLKRRPART = 0x125f
BLKSECTGET = 0x1267
BLKSECTSET = 0x1266
BLKSSZGET = 0x1268
BOTHER = 0x1000
BPF_A = 0x10
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALU = 0x4
BPF_AND = 0x50
BPF_B = 0x10
BPF_DIV = 0x30
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
BPF_JA = 0x0
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
BPF_JMP = 0x5
BPF_JSET = 0x40
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LL_OFF = -0x200000
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXINSNS = 0x1000
BPF_MEM = 0x60
BPF_MEMWORDS = 0x10
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MOD = 0x90
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
BPF_OR = 0x40
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAX = 0x0
BPF_TXA = 0x80
BPF_W = 0x0
BPF_X = 0x8
BPF_XOR = 0xa0
BRKINT = 0x2
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RAW_FILTER_MAX = 0x200
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
CIBAUD = 0x100f0000
CLOCAL = 0x800
CLOCK_BOOTTIME = 0x7
CLOCK_BOOTTIME_ALARM = 0x9
CLOCK_DEFAULT = 0x0
CLOCK_EXT = 0x1
CLOCK_INT = 0x2
CLOCK_MONOTONIC = 0x1
CLOCK_MONOTONIC_COARSE = 0x6
CLOCK_MONOTONIC_RAW = 0x4
CLOCK_PROCESS_CPUTIME_ID = 0x2
CLOCK_REALTIME = 0x0
CLOCK_REALTIME_ALARM = 0x8
CLOCK_REALTIME_COARSE = 0x5
CLOCK_TAI = 0xb
CLOCK_THREAD_CPUTIME_ID = 0x3
CLOCK_TXFROMRX = 0x4
CLOCK_TXINT = 0x3
CLONE_CHILD_CLEARTID = 0x200000
CLONE_CHILD_SETTID = 0x1000000
CLONE_DETACHED = 0x400000
CLONE_FILES = 0x400
CLONE_FS = 0x200
CLONE_IO = 0x80000000
CLONE_NEWCGROUP = 0x2000000
CLONE_NEWIPC = 0x8000000
CLONE_NEWNET = 0x40000000
CLONE_NEWNS = 0x20000
CLONE_NEWPID = 0x20000000
CLONE_NEWUSER = 0x10000000
CLONE_NEWUTS = 0x4000000
CLONE_PARENT = 0x8000
CLONE_PARENT_SETTID = 0x100000
CLONE_PTRACE = 0x2000
CLONE_SETTLS = 0x80000
CLONE_SIGHAND = 0x800
CLONE_SYSVSEM = 0x40000
CLONE_THREAD = 0x10000
CLONE_UNTRACED = 0x800000
CLONE_VFORK = 0x4000
CLONE_VM = 0x100
CMSPAR = 0x40000000
CR0 = 0x0
CR1 = 0x200
CR2 = 0x400
CR3 = 0x600
CRDLY = 0x600
CREAD = 0x80
CRTSCTS = 0x80000000
CS5 = 0x0
CS6 = 0x10
CS7 = 0x20
CS8 = 0x30
CSIGNAL = 0xff
CSIZE = 0x30
CSTART = 0x11
CSTATUS = 0x0
CSTOP = 0x13
CSTOPB = 0x40
CSUSP = 0x1a
DT_BLK = 0x6
DT_CHR = 0x2
DT_DIR = 0x4
DT_FIFO = 0x1
DT_LNK = 0xa
DT_REG = 0x8
DT_SOCK = 0xc
DT_UNKNOWN = 0x0
DT_WHT = 0xe
ECHO = 0x8
ECHOCTL = 0x200
ECHOE = 0x10
ECHOK = 0x20
ECHOKE = 0x800
ECHONL = 0x40
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
EFD_SEMAPHORE = 0x1
ENCODING_DEFAULT = 0x0
ENCODING_FM_MARK = 0x3
ENCODING_FM_SPACE = 0x4
ENCODING_MANCHESTER = 0x5
ENCODING_NRZ = 0x1
ENCODING_NRZI = 0x2
EPOLLERR = 0x8
EPOLLET = 0x80000000
EPOLLEXCLUSIVE = 0x10000000
EPOLLHUP = 0x10
EPOLLIN = 0x1
EPOLLMSG = 0x400
EPOLLONESHOT = 0x40000000
EPOLLOUT = 0x4
EPOLLPRI = 0x2
EPOLLRDBAND = 0x80
EPOLLRDHUP = 0x2000
EPOLLRDNORM = 0x40
EPOLLWAKEUP = 0x20000000
EPOLLWRBAND = 0x200
EPOLLWRNORM = 0x100
EPOLL_CLOEXEC = 0x80000
EPOLL_CTL_ADD = 0x1
EPOLL_CTL_DEL = 0x2
EPOLL_CTL_MOD = 0x3
ETH_P_1588 = 0x88f7
ETH_P_8021AD = 0x88a8
ETH_P_8021AH = 0x88e7
ETH_P_8021Q = 0x8100
ETH_P_80221 = 0x8917
ETH_P_802_2 = 0x4
ETH_P_802_3 = 0x1
ETH_P_802_3_MIN = 0x600
ETH_P_802_EX1 = 0x88b5
ETH_P_AARP = 0x80f3
ETH_P_AF_IUCV = 0xfbfb
ETH_P_ALL = 0x3
ETH_P_AOE = 0x88a2
ETH_P_ARCNET = 0x1a
ETH_P_ARP = 0x806
ETH_P_ATALK = 0x809b
ETH_P_ATMFATE = 0x8884
ETH_P_ATMMPOA = 0x884c
ETH_P_AX25 = 0x2
ETH_P_BATMAN = 0x4305
ETH_P_BPQ = 0x8ff
ETH_P_CAIF = 0xf7
ETH_P_CAN = 0xc
ETH_P_CANFD = 0xd
ETH_P_CONTROL = 0x16
ETH_P_CUST = 0x6006
ETH_P_DDCMP = 0x6
ETH_P_DEC = 0x6000
ETH_P_DIAG = 0x6005
ETH_P_DNA_DL = 0x6001
ETH_P_DNA_RC = 0x6002
ETH_P_DNA_RT = 0x6003
ETH_P_DSA = 0x1b
ETH_P_ECONET = 0x18
ETH_P_EDSA = 0xdada
ETH_P_FCOE = 0x8906
ETH_P_FIP = 0x8914
ETH_P_HDLC = 0x19
ETH_P_HSR = 0x892f
ETH_P_IBOE = 0x8915
ETH_P_IEEE802154 = 0xf6
ETH_P_IEEEPUP = 0xa00
ETH_P_IEEEPUPAT = 0xa01
ETH_P_IP = 0x800
ETH_P_IPV6 = 0x86dd
ETH_P_IPX = 0x8137
ETH_P_IRDA = 0x17
ETH_P_LAT = 0x6004
ETH_P_LINK_CTL = 0x886c
ETH_P_LOCALTALK = 0x9
ETH_P_LOOP = 0x60
ETH_P_LOOPBACK = 0x9000
ETH_P_MACSEC = 0x88e5
ETH_P_MOBITEX = 0x15
ETH_P_MPLS_MC = 0x8848
ETH_P_MPLS_UC = 0x8847
ETH_P_MVRP = 0x88f5
ETH_P_NCSI = 0x88f8
ETH_P_PAE = 0x888e
ETH_P_PAUSE = 0x8808
ETH_P_PHONET = 0xf5
ETH_P_PPPTALK = 0x10
ETH_P_PPP_DISC = 0x8863
ETH_P_PPP_MP = 0x8
ETH_P_PPP_SES = 0x8864
ETH_P_PRP = 0x88fb
ETH_P_PUP = 0x200
ETH_P_PUPAT = 0x201
ETH_P_QINQ1 = 0x9100
ETH_P_QINQ2 = 0x9200
ETH_P_QINQ3 = 0x9300
ETH_P_RARP = 0x8035
ETH_P_SCA = 0x6007
ETH_P_SLOW = 0x8809
ETH_P_SNAP = 0x5
ETH_P_TDLS = 0x890d
ETH_P_TEB = 0x6558
ETH_P_TIPC = 0x88ca
ETH_P_TRAILER = 0x1c
ETH_P_TR_802_2 = 0x11
ETH_P_TSN = 0x22f0
ETH_P_WAN_PPP = 0x7
ETH_P_WCCP = 0x883e
ETH_P_X25 = 0x805
ETH_P_XDSA = 0xf8
EXTA = 0xe
EXTB = 0xf
EXTPROC = 0x10000
FALLOC_FL_COLLAPSE_RANGE = 0x8
FALLOC_FL_INSERT_RANGE = 0x20
FALLOC_FL_KEEP_SIZE = 0x1
FALLOC_FL_NO_HIDE_STALE = 0x4
FALLOC_FL_PUNCH_HOLE = 0x2
FALLOC_FL_UNSHARE_RANGE = 0x40
FALLOC_FL_ZERO_RANGE = 0x10
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHO = 0x1000
FS_ENCRYPTION_MODE_AES_128_CBC = 0x5
FS_ENCRYPTION_MODE_AES_128_CTS = 0x6
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4
F_GETFD = 0x1
F_GETFL = 0x3
F_GETLEASE = 0x401
F_GETLK = 0x5
F_GETLK64 = 0x5
F_GETOWN = 0x9
F_GETOWN_EX = 0x10
F_GETPIPE_SZ = 0x408
F_GETSIG = 0xb
F_LOCK = 0x1
F_NOTIFY = 0x402
F_OFD_GETLK = 0x24
F_OFD_SETLK = 0x25
F_OFD_SETLKW = 0x26
F_OK = 0x0
F_RDLCK = 0x0
F_SETFD = 0x2
F_SETFL = 0x4
F_SETLEASE = 0x400
F_SETLK = 0x6
F_SETLK64 = 0x6
F_SETLKW = 0x7
F_SETLKW64 = 0x7
F_SETOWN = 0x8
F_SETOWN_EX = 0xf
F_SETPIPE_SZ = 0x407
F_SETSIG = 0xa
F_SHLCK = 0x8
F_TEST = 0x3
F_TLOCK = 0x2
F_ULOCK = 0x0
F_UNLCK = 0x2
F_WRLCK = 0x1
GENL_ADMIN_PERM = 0x1
GENL_CMD_CAP_DO = 0x2
GENL_CMD_CAP_DUMP = 0x4
GENL_CMD_CAP_HASPOL = 0x8
GENL_HDRLEN = 0x4
GENL_ID_CTRL = 0x10
GENL_ID_PMCRAID = 0x12
GENL_ID_VFS_DQUOT = 0x11
GENL_MAX_ID = 0x3ff
GENL_MIN_ID = 0x10
GENL_NAMSIZ = 0x10
GENL_START_ALLOC = 0x13
GENL_UNS_ADMIN_PERM = 0x10
GRND_NONBLOCK = 0x1
GRND_RANDOM = 0x2
HUPCL = 0x400
IBSHIFT = 0x10
ICANON = 0x2
ICMPV6_FILTER = 0x1
ICRNL = 0x100
IEXTEN = 0x8000
IFA_F_DADFAILED = 0x8
IFA_F_DEPRECATED = 0x20
IFA_F_HOMEADDRESS = 0x10
IFA_F_MANAGETEMPADDR = 0x100
IFA_F_MCAUTOJOIN = 0x400
IFA_F_NODAD = 0x2
IFA_F_NOPREFIXROUTE = 0x200
IFA_F_OPTIMISTIC = 0x4
IFA_F_PERMANENT = 0x80
IFA_F_SECONDARY = 0x1
IFA_F_STABLE_PRIVACY = 0x800
IFA_F_TEMPORARY = 0x1
IFA_F_TENTATIVE = 0x40
IFA_MAX = 0x8
IFF_ALLMULTI = 0x200
IFF_ATTACH_QUEUE = 0x200
IFF_AUTOMEDIA = 0x4000
IFF_BROADCAST = 0x2
IFF_DEBUG = 0x4
IFF_DETACH_QUEUE = 0x400
IFF_DORMANT = 0x20000
IFF_DYNAMIC = 0x8000
IFF_ECHO = 0x40000
IFF_LOOPBACK = 0x8
IFF_LOWER_UP = 0x10000
IFF_MASTER = 0x400
IFF_MULTICAST = 0x1000
IFF_MULTI_QUEUE = 0x100
IFF_NOARP = 0x80
IFF_NOFILTER = 0x1000
IFF_NOTRAILERS = 0x20
IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000
IFF_PERSIST = 0x800
IFF_POINTOPOINT = 0x10
IFF_PORTSEL = 0x2000
IFF_PROMISC = 0x100
IFF_RUNNING = 0x40
IFF_SLAVE = 0x800
IFF_TAP = 0x2
IFF_TUN = 0x1
IFF_TUN_EXCL = 0x8000
IFF_UP = 0x1
IFF_VNET_HDR = 0x4000
IFF_VOLATILE = 0x70c5a
IFNAMSIZ = 0x10
IGNBRK = 0x1
IGNCR = 0x80
IGNPAR = 0x4
IMAXBEL = 0x2000
INLCR = 0x40
INPCK = 0x10
IN_ACCESS = 0x1
IN_ALL_EVENTS = 0xfff
IN_ATTRIB = 0x4
IN_CLASSA_HOST = 0xffffff
IN_CLASSA_MAX = 0x80
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 0x18
IN_CLASSB_HOST = 0xffff
IN_CLASSB_MAX = 0x10000
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 0x10
IN_CLASSC_HOST = 0xff
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 0x8
IN_CLOEXEC = 0x80000
IN_CLOSE = 0x18
IN_CLOSE_NOWRITE = 0x10
IN_CLOSE_WRITE = 0x8
IN_CREATE = 0x100
IN_DELETE = 0x200
IN_DELETE_SELF = 0x400
IN_DONT_FOLLOW = 0x2000000
IN_EXCL_UNLINK = 0x4000000
IN_IGNORED = 0x8000
IN_ISDIR = 0x40000000
IN_LOOPBACKNET = 0x7f
IN_MASK_ADD = 0x20000000
IN_MODIFY = 0x2
IN_MOVE = 0xc0
IN_MOVED_FROM = 0x40
IN_MOVED_TO = 0x80
IN_MOVE_SELF = 0x800
IN_NONBLOCK = 0x800
IN_ONESHOT = 0x80000000
IN_ONLYDIR = 0x1000000
IN_OPEN = 0x20
IN_Q_OVERFLOW = 0x4000
IN_UNMOUNT = 0x2000
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPPROTO_AH = 0x33
IPPROTO_BEETPH = 0x5e
IPPROTO_COMP = 0x6c
IPPROTO_DCCP = 0x21
IPPROTO_DSTOPTS = 0x3c
IPPROTO_EGP = 0x8
IPPROTO_ENCAP = 0x62
IPPROTO_ESP = 0x32
IPPROTO_FRAGMENT = 0x2c
IPPROTO_GRE = 0x2f
IPPROTO_HOPOPTS = 0x0
IPPROTO_ICMP = 0x1
IPPROTO_ICMPV6 = 0x3a
IPPROTO_IDP = 0x16
IPPROTO_IGMP = 0x2
IPPROTO_IP = 0x0
IPPROTO_IPIP = 0x4
IPPROTO_IPV6 = 0x29
IPPROTO_MH = 0x87
IPPROTO_MPLS = 0x89
IPPROTO_MTP = 0x5c
IPPROTO_NONE = 0x3b
IPPROTO_PIM = 0x67
IPPROTO_PUP = 0xc
IPPROTO_RAW = 0xff
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_SCTP = 0x84
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
IPPROTO_UDPLITE = 0x88
IPV6_2292DSTOPTS = 0x4
IPV6_2292HOPLIMIT = 0x8
IPV6_2292HOPOPTS = 0x3
IPV6_2292PKTINFO = 0x2
IPV6_2292PKTOPTIONS = 0x6
IPV6_2292RTHDR = 0x5
IPV6_ADDRFORM = 0x1
IPV6_ADDR_PREFERENCES = 0x48
IPV6_ADD_MEMBERSHIP = 0x14
IPV6_AUTHHDR = 0xa
IPV6_AUTOFLOWLABEL = 0x46
IPV6_CHECKSUM = 0x7
IPV6_DONTFRAG = 0x3e
IPV6_DROP_MEMBERSHIP = 0x15
IPV6_DSTOPTS = 0x3b
IPV6_HDRINCL = 0x24
IPV6_HOPLIMIT = 0x34
IPV6_HOPOPTS = 0x36
IPV6_IPSEC_POLICY = 0x22
IPV6_JOIN_ANYCAST = 0x1b
IPV6_JOIN_GROUP = 0x14
IPV6_LEAVE_ANYCAST = 0x1c
IPV6_LEAVE_GROUP = 0x15
IPV6_MINHOPCOUNT = 0x49
IPV6_MTU = 0x18
IPV6_MTU_DISCOVER = 0x17
IPV6_MULTICAST_HOPS = 0x12
IPV6_MULTICAST_IF = 0x11
IPV6_MULTICAST_LOOP = 0x13
IPV6_NEXTHOP = 0x9
IPV6_ORIGDSTADDR = 0x4a
IPV6_PATHMTU = 0x3d
IPV6_PKTINFO = 0x32
IPV6_PMTUDISC_DO = 0x2
IPV6_PMTUDISC_DONT = 0x0
IPV6_PMTUDISC_INTERFACE = 0x4
IPV6_PMTUDISC_OMIT = 0x5
IPV6_PMTUDISC_PROBE = 0x3
IPV6_PMTUDISC_WANT = 0x1
IPV6_RECVDSTOPTS = 0x3a
IPV6_RECVERR = 0x19
IPV6_RECVFRAGSIZE = 0x4d
IPV6_RECVHOPLIMIT = 0x33
IPV6_RECVHOPOPTS = 0x35
IPV6_RECVORIGDSTADDR = 0x4a
IPV6_RECVPATHMTU = 0x3c
IPV6_RECVPKTINFO = 0x31
IPV6_RECVRTHDR = 0x38
IPV6_RECVTCLASS = 0x42
IPV6_ROUTER_ALERT = 0x16
IPV6_RTHDR = 0x39
IPV6_RTHDRDSTOPTS = 0x37
IPV6_RTHDR_LOOSE = 0x0
IPV6_RTHDR_STRICT = 0x1
IPV6_RTHDR_TYPE_0 = 0x0
IPV6_RXDSTOPTS = 0x3b
IPV6_RXHOPOPTS = 0x36
IPV6_TCLASS = 0x43
IPV6_TRANSPARENT = 0x4b
IPV6_UNICAST_HOPS = 0x10
IPV6_UNICAST_IF = 0x4c
IPV6_V6ONLY = 0x1a
IPV6_XFRM_POLICY = 0x23
IP_ADD_MEMBERSHIP = 0x23
IP_ADD_SOURCE_MEMBERSHIP = 0x27
IP_BIND_ADDRESS_NO_PORT = 0x18
IP_BLOCK_SOURCE = 0x26
IP_CHECKSUM = 0x17
IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000
IP_DROP_MEMBERSHIP = 0x24
IP_DROP_SOURCE_MEMBERSHIP = 0x28
IP_FREEBIND = 0xf
IP_HDRINCL = 0x3
IP_IPSEC_POLICY = 0x10
IP_MAXPACKET = 0xffff
IP_MAX_MEMBERSHIPS = 0x14
IP_MF = 0x2000
IP_MINTTL = 0x15
IP_MSFILTER = 0x29
IP_MSS = 0x240
IP_MTU = 0xe
IP_MTU_DISCOVER = 0xa
IP_MULTICAST_ALL = 0x31
IP_MULTICAST_IF = 0x20
IP_MULTICAST_LOOP = 0x22
IP_MULTICAST_TTL = 0x21
IP_NODEFRAG = 0x16
IP_OFFMASK = 0x1fff
IP_OPTIONS = 0x4
IP_ORIGDSTADDR = 0x14
IP_PASSSEC = 0x12
IP_PKTINFO = 0x8
IP_PKTOPTIONS = 0x9
IP_PMTUDISC = 0xa
IP_PMTUDISC_DO = 0x2
IP_PMTUDISC_DONT = 0x0
IP_PMTUDISC_INTERFACE = 0x4
IP_PMTUDISC_OMIT = 0x5
IP_PMTUDISC_PROBE = 0x3
IP_PMTUDISC_WANT = 0x1
IP_RECVERR = 0xb
IP_RECVFRAGSIZE = 0x19
IP_RECVOPTS = 0x6
IP_RECVORIGDSTADDR = 0x14
IP_RECVRETOPTS = 0x7
IP_RECVTOS = 0xd
IP_RECVTTL = 0xc
IP_RETOPTS = 0x7
IP_RF = 0x8000
IP_ROUTER_ALERT = 0x5
IP_TOS = 0x1
IP_TRANSPARENT = 0x13
IP_TTL = 0x2
IP_UNBLOCK_SOURCE = 0x25
IP_UNICAST_IF = 0x32
IP_XFRM_POLICY = 0x11
ISIG = 0x1
ISTRIP = 0x20
IUCLC = 0x200
IUTF8 = 0x4000
IXANY = 0x800
IXOFF = 0x1000
IXON = 0x400
KEYCTL_ASSUME_AUTHORITY = 0x10
KEYCTL_CHOWN = 0x4
KEYCTL_CLEAR = 0x7
KEYCTL_DESCRIBE = 0x6
KEYCTL_DH_COMPUTE = 0x17
KEYCTL_GET_KEYRING_ID = 0x0
KEYCTL_GET_PERSISTENT = 0x16
KEYCTL_GET_SECURITY = 0x11
KEYCTL_INSTANTIATE = 0xc
KEYCTL_INSTANTIATE_IOV = 0x14
KEYCTL_INVALIDATE = 0x15
KEYCTL_JOIN_SESSION_KEYRING = 0x1
KEYCTL_LINK = 0x8
KEYCTL_NEGATE = 0xd
KEYCTL_READ = 0xb
KEYCTL_REJECT = 0x13
KEYCTL_RESTRICT_KEYRING = 0x1d
KEYCTL_REVOKE = 0x3
KEYCTL_SEARCH = 0xa
KEYCTL_SESSION_TO_PARENT = 0x12
KEYCTL_SETPERM = 0x5
KEYCTL_SET_REQKEY_KEYRING = 0xe
KEYCTL_SET_TIMEOUT = 0xf
KEYCTL_UNLINK = 0x9
KEYCTL_UPDATE = 0x2
KEY_REQKEY_DEFL_DEFAULT = 0x0
KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6
KEY_REQKEY_DEFL_NO_CHANGE = -0x1
KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2
KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7
KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3
KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1
KEY_REQKEY_DEFL_USER_KEYRING = 0x4
KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5
KEY_SPEC_GROUP_KEYRING = -0x6
KEY_SPEC_PROCESS_KEYRING = -0x2
KEY_SPEC_REQKEY_AUTH_KEY = -0x7
KEY_SPEC_REQUESTOR_KEYRING = -0x8
KEY_SPEC_SESSION_KEYRING = -0x3
KEY_SPEC_THREAD_KEYRING = -0x1
KEY_SPEC_USER_KEYRING = -0x4
KEY_SPEC_USER_SESSION_KEYRING = -0x5
LINUX_REBOOT_CMD_CAD_OFF = 0x0
LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef
LINUX_REBOOT_CMD_HALT = 0xcdef0123
LINUX_REBOOT_CMD_KEXEC = 0x45584543
LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc
LINUX_REBOOT_CMD_RESTART = 0x1234567
LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4
LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2
LINUX_REBOOT_MAGIC1 = 0xfee1dead
LINUX_REBOOT_MAGIC2 = 0x28121969
LOCK_EX = 0x2
LOCK_NB = 0x4
LOCK_SH = 0x1
LOCK_UN = 0x8
MADV_DODUMP = 0x11
MADV_DOFORK = 0xb
MADV_DONTDUMP = 0x10
MADV_DONTFORK = 0xa
MADV_DONTNEED = 0x4
MADV_FREE = 0x8
MADV_HUGEPAGE = 0xe
MADV_HWPOISON = 0x64
MADV_MERGEABLE = 0xc
MADV_NOHUGEPAGE = 0xf
MADV_NORMAL = 0x0
MADV_RANDOM = 0x1
MADV_REMOVE = 0x9
MADV_SEQUENTIAL = 0x2
MADV_UNMERGEABLE = 0xd
MADV_WILLNEED = 0x3
MAP_32BIT = 0x40
MAP_ANON = 0x20
MAP_ANONYMOUS = 0x20
MAP_DENYWRITE = 0x800
MAP_EXECUTABLE = 0x1000
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_GROWSDOWN = 0x100
MAP_HUGETLB = 0x40000
MAP_HUGE_MASK = 0x3f
MAP_HUGE_SHIFT = 0x1a
MAP_LOCKED = 0x2000
MAP_NONBLOCK = 0x10000
MAP_NORESERVE = 0x4000
MAP_POPULATE = 0x8000
MAP_PRIVATE = 0x2
MAP_SHARED = 0x1
MAP_STACK = 0x20000
MAP_TYPE = 0xf
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
MNT_DETACH = 0x2
MNT_EXPIRE = 0x4
MNT_FORCE = 0x1
MSG_BATCH = 0x40000
MSG_CMSG_CLOEXEC = 0x40000000
MSG_CONFIRM = 0x800
MSG_CTRUNC = 0x8
MSG_DONTROUTE = 0x4
MSG_DONTWAIT = 0x40
MSG_EOR = 0x80
MSG_ERRQUEUE = 0x2000
MSG_FASTOPEN = 0x20000000
MSG_FIN = 0x200
MSG_MORE = 0x8000
MSG_NOSIGNAL = 0x4000
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_PROXY = 0x10
MSG_RST = 0x1000
MSG_SYN = 0x400
MSG_TRUNC = 0x20
MSG_TRYHARD = 0x4
MSG_WAITALL = 0x100
MSG_WAITFORONE = 0x10000
MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1
MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000
MS_KERNMOUNT = 0x400000
MS_LAZYTIME = 0x2000000
MS_MANDLOCK = 0x40
MS_MGC_MSK = 0xffff0000
MS_MGC_VAL = 0xc0ed0000
MS_MOVE = 0x2000
MS_NOATIME = 0x400
MS_NODEV = 0x4
MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2
MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000
MS_PRIVATE = 0x40000
MS_RDONLY = 0x1
MS_REC = 0x4000
MS_RELATIME = 0x200000
MS_REMOUNT = 0x20
MS_RMT_MASK = 0x2800051
MS_SHARED = 0x100000
MS_SILENT = 0x8000
MS_SLAVE = 0x80000
MS_STRICTATIME = 0x1000000
MS_SUBMOUNT = 0x4000000
MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9
NETLINK_BROADCAST_ERROR = 0x4
NETLINK_CAP_ACK = 0xa
NETLINK_CONNECTOR = 0xb
NETLINK_CRYPTO = 0x15
NETLINK_DNRTMSG = 0xe
NETLINK_DROP_MEMBERSHIP = 0x2
NETLINK_ECRYPTFS = 0x13
NETLINK_EXT_ACK = 0xb
NETLINK_FIB_LOOKUP = 0xa
NETLINK_FIREWALL = 0x3
NETLINK_GENERIC = 0x10
NETLINK_INET_DIAG = 0x4
NETLINK_IP6_FW = 0xd
NETLINK_ISCSI = 0x8
NETLINK_KOBJECT_UEVENT = 0xf
NETLINK_LISTEN_ALL_NSID = 0x8
NETLINK_LIST_MEMBERSHIPS = 0x9
NETLINK_NETFILTER = 0xc
NETLINK_NFLOG = 0x5
NETLINK_NO_ENOBUFS = 0x5
NETLINK_PKTINFO = 0x3
NETLINK_RDMA = 0x14
NETLINK_ROUTE = 0x0
NETLINK_RX_RING = 0x6
NETLINK_SCSITRANSPORT = 0x12
NETLINK_SELINUX = 0x7
NETLINK_SMC = 0x16
NETLINK_SOCK_DIAG = 0x4
NETLINK_TX_RING = 0x7
NETLINK_UNUSED = 0x1
NETLINK_USERSOCK = 0x2
NETLINK_XFRM = 0x6
NL0 = 0x0
NL1 = 0x100
NLA_ALIGNTO = 0x4
NLA_F_NESTED = 0x8000
NLA_F_NET_BYTEORDER = 0x4000
NLA_HDRLEN = 0x4
NLDLY = 0x100
NLMSG_ALIGNTO = 0x4
NLMSG_DONE = 0x3
NLMSG_ERROR = 0x2
NLMSG_HDRLEN = 0x10
NLMSG_MIN_TYPE = 0x10
NLMSG_NOOP = 0x1
NLMSG_OVERRUN = 0x4
NLM_F_ACK = 0x4
NLM_F_ACK_TLVS = 0x200
NLM_F_APPEND = 0x800
NLM_F_ATOMIC = 0x400
NLM_F_CAPPED = 0x100
NLM_F_CREATE = 0x400
NLM_F_DUMP = 0x300
NLM_F_DUMP_FILTERED = 0x20
NLM_F_DUMP_INTR = 0x10
NLM_F_ECHO = 0x8
NLM_F_EXCL = 0x200
NLM_F_MATCH = 0x200
NLM_F_MULTI = 0x2
NLM_F_REPLACE = 0x100
NLM_F_REQUEST = 0x1
NLM_F_ROOT = 0x100
NOFLSH = 0x80
OCRNL = 0x8
OFDEL = 0x80
OFILL = 0x40
OLCUC = 0x2
ONLCR = 0x4
ONLRET = 0x20
ONOCR = 0x10
OPOST = 0x1
O_ACCMODE = 0x3
O_APPEND = 0x400
O_ASYNC = 0x2000
O_CLOEXEC = 0x80000
O_CREAT = 0x40
O_DIRECT = 0x4000
O_DIRECTORY = 0x10000
O_DSYNC = 0x1000
O_EXCL = 0x80
O_FSYNC = 0x101000
O_LARGEFILE = 0x0
O_NDELAY = 0x800
O_NOATIME = 0x40000
O_NOCTTY = 0x100
O_NOFOLLOW = 0x20000
O_NONBLOCK = 0x800
O_PATH = 0x200000
O_RDONLY = 0x0
O_RDWR = 0x2
O_RSYNC = 0x101000
O_SYNC = 0x101000
O_TMPFILE = 0x410000
O_TRUNC = 0x200
O_WRONLY = 0x1
PACKET_ADD_MEMBERSHIP = 0x1
PACKET_AUXDATA = 0x8
PACKET_BROADCAST = 0x1
PACKET_COPY_THRESH = 0x7
PACKET_DROP_MEMBERSHIP = 0x2
PACKET_FANOUT = 0x12
PACKET_FANOUT_CBPF = 0x6
PACKET_FANOUT_CPU = 0x2
PACKET_FANOUT_DATA = 0x16
PACKET_FANOUT_EBPF = 0x7
PACKET_FANOUT_FLAG_DEFRAG = 0x8000
PACKET_FANOUT_FLAG_ROLLOVER = 0x1000
PACKET_FANOUT_FLAG_UNIQUEID = 0x2000
PACKET_FANOUT_HASH = 0x0
PACKET_FANOUT_LB = 0x1
PACKET_FANOUT_QM = 0x5
PACKET_FANOUT_RND = 0x4
PACKET_FANOUT_ROLLOVER = 0x3
PACKET_FASTROUTE = 0x6
PACKET_HDRLEN = 0xb
PACKET_HOST = 0x0
PACKET_KERNEL = 0x7
PACKET_LOOPBACK = 0x5
PACKET_LOSS = 0xe
PACKET_MR_ALLMULTI = 0x2
PACKET_MR_MULTICAST = 0x0
PACKET_MR_PROMISC = 0x1
PACKET_MR_UNICAST = 0x3
PACKET_MULTICAST = 0x2
PACKET_ORIGDEV = 0x9
PACKET_OTHERHOST = 0x3
PACKET_OUTGOING = 0x4
PACKET_QDISC_BYPASS = 0x14
PACKET_RECV_OUTPUT = 0x3
PACKET_RESERVE = 0xc
PACKET_ROLLOVER_STATS = 0x15
PACKET_RX_RING = 0x5
PACKET_STATISTICS = 0x6
PACKET_TIMESTAMP = 0x11
PACKET_TX_HAS_OFF = 0x13
PACKET_TX_RING = 0xd
PACKET_TX_TIMESTAMP = 0x10
PACKET_USER = 0x6
PACKET_VERSION = 0xa
PACKET_VNET_HDR = 0xf
PARENB = 0x100
PARITY_CRC16_PR0 = 0x2
PARITY_CRC16_PR0_CCITT = 0x4
PARITY_CRC16_PR1 = 0x3
PARITY_CRC16_PR1_CCITT = 0x5
PARITY_CRC32_PR0_CCITT = 0x6
PARITY_CRC32_PR1_CCITT = 0x7
PARITY_DEFAULT = 0x0
PARITY_NONE = 0x1
PARMRK = 0x8
PARODD = 0x200
PENDIN = 0x4000
PERF_EVENT_IOC_DISABLE = 0x2401
PERF_EVENT_IOC_ENABLE = 0x2400
PERF_EVENT_IOC_ID = 0x80082407
PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409
PERF_EVENT_IOC_PERIOD = 0x40082404
PERF_EVENT_IOC_REFRESH = 0x2402
PERF_EVENT_IOC_RESET = 0x2403
PERF_EVENT_IOC_SET_BPF = 0x40042408
PERF_EVENT_IOC_SET_FILTER = 0x40082406
PERF_EVENT_IOC_SET_OUTPUT = 0x2405
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
PROT_EXEC = 0x4
PROT_GROWSDOWN = 0x1000000
PROT_GROWSUP = 0x2000000
PROT_NONE = 0x0
PROT_READ = 0x1
PROT_WRITE = 0x2
PR_CAPBSET_DROP = 0x18
PR_CAPBSET_READ = 0x17
PR_CAP_AMBIENT = 0x2f
PR_CAP_AMBIENT_CLEAR_ALL = 0x4
PR_CAP_AMBIENT_IS_SET = 0x1
PR_CAP_AMBIENT_LOWER = 0x3
PR_CAP_AMBIENT_RAISE = 0x2
PR_ENDIAN_BIG = 0x0
PR_ENDIAN_LITTLE = 0x1
PR_ENDIAN_PPC_LITTLE = 0x2
PR_FPEMU_NOPRINT = 0x1
PR_FPEMU_SIGFPE = 0x2
PR_FP_EXC_ASYNC = 0x2
PR_FP_EXC_DISABLED = 0x0
PR_FP_EXC_DIV = 0x10000
PR_FP_EXC_INV = 0x100000
PR_FP_EXC_NONRECOV = 0x1
PR_FP_EXC_OVF = 0x20000
PR_FP_EXC_PRECISE = 0x3
PR_FP_EXC_RES = 0x80000
PR_FP_EXC_SW_ENABLE = 0x80
PR_FP_EXC_UND = 0x40000
PR_FP_MODE_FR = 0x1
PR_FP_MODE_FRE = 0x2
PR_GET_CHILD_SUBREAPER = 0x25
PR_GET_DUMPABLE = 0x3
PR_GET_ENDIAN = 0x13
PR_GET_FPEMU = 0x9
PR_GET_FPEXC = 0xb
PR_GET_FP_MODE = 0x2e
PR_GET_KEEPCAPS = 0x7
PR_GET_NAME = 0x10
PR_GET_NO_NEW_PRIVS = 0x27
PR_GET_PDEATHSIG = 0x2
PR_GET_SECCOMP = 0x15
PR_GET_SECUREBITS = 0x1b
PR_GET_THP_DISABLE = 0x2a
PR_GET_TID_ADDRESS = 0x28
PR_GET_TIMERSLACK = 0x1e
PR_GET_TIMING = 0xd
PR_GET_TSC = 0x19
PR_GET_UNALIGN = 0x5
PR_MCE_KILL = 0x21
PR_MCE_KILL_CLEAR = 0x0
PR_MCE_KILL_DEFAULT = 0x2
PR_MCE_KILL_EARLY = 0x1
PR_MCE_KILL_GET = 0x22
PR_MCE_KILL_LATE = 0x0
PR_MCE_KILL_SET = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
PR_SET_CHILD_SUBREAPER = 0x24
PR_SET_DUMPABLE = 0x4
PR_SET_ENDIAN = 0x14
PR_SET_FPEMU = 0xa
PR_SET_FPEXC = 0xc
PR_SET_FP_MODE = 0x2d
PR_SET_KEEPCAPS = 0x8
PR_SET_MM = 0x23
PR_SET_MM_ARG_END = 0x9
PR_SET_MM_ARG_START = 0x8
PR_SET_MM_AUXV = 0xc
PR_SET_MM_BRK = 0x7
PR_SET_MM_END_CODE = 0x2
PR_SET_MM_END_DATA = 0x4
PR_SET_MM_ENV_END = 0xb
PR_SET_MM_ENV_START = 0xa
PR_SET_MM_EXE_FILE = 0xd
PR_SET_MM_MAP = 0xe
PR_SET_MM_MAP_SIZE = 0xf
PR_SET_MM_START_BRK = 0x6
PR_SET_MM_START_CODE = 0x1
PR_SET_MM_START_DATA = 0x3
PR_SET_MM_START_STACK = 0x5
PR_SET_NAME = 0xf
PR_SET_NO_NEW_PRIVS = 0x26
PR_SET_PDEATHSIG = 0x1
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PR_SET_SECCOMP = 0x16
PR_SET_SECUREBITS = 0x1c
PR_SET_THP_DISABLE = 0x29
PR_SET_TIMERSLACK = 0x1d
PR_SET_TIMING = 0xe
PR_SET_TSC = 0x1a
PR_SET_UNALIGN = 0x6
PR_TASK_PERF_EVENTS_DISABLE = 0x1f
PR_TASK_PERF_EVENTS_ENABLE = 0x20
PR_TIMING_STATISTICAL = 0x0
PR_TIMING_TIMESTAMP = 0x1
PR_TSC_ENABLE = 0x1
PR_TSC_SIGSEGV = 0x2
PR_UNALIGN_NOPRINT = 0x1
PR_UNALIGN_SIGBUS = 0x2
PTRACE_ARCH_PRCTL = 0x1e
PTRACE_ATTACH = 0x10
PTRACE_CONT = 0x7
PTRACE_DETACH = 0x11
PTRACE_EVENT_CLONE = 0x3
PTRACE_EVENT_EXEC = 0x4
PTRACE_EVENT_EXIT = 0x6
PTRACE_EVENT_FORK = 0x1
PTRACE_EVENT_SECCOMP = 0x7
PTRACE_EVENT_STOP = 0x80
PTRACE_EVENT_VFORK = 0x2
PTRACE_EVENT_VFORK_DONE = 0x5
PTRACE_GETEVENTMSG = 0x4201
PTRACE_GETFPREGS = 0xe
PTRACE_GETFPXREGS = 0x12
PTRACE_GETREGS = 0xc
PTRACE_GETREGSET = 0x4204
PTRACE_GETSIGINFO = 0x4202
PTRACE_GETSIGMASK = 0x420a
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_INTERRUPT = 0x4207
PTRACE_KILL = 0x8
PTRACE_LISTEN = 0x4208
PTRACE_OLDSETOPTIONS = 0x15
PTRACE_O_EXITKILL = 0x100000
PTRACE_O_MASK = 0x3000ff
PTRACE_O_SUSPEND_SECCOMP = 0x200000
PTRACE_O_TRACECLONE = 0x8
PTRACE_O_TRACEEXEC = 0x10
PTRACE_O_TRACEEXIT = 0x40
PTRACE_O_TRACEFORK = 0x2
PTRACE_O_TRACESECCOMP = 0x80
PTRACE_O_TRACESYSGOOD = 0x1
PTRACE_O_TRACEVFORK = 0x4
PTRACE_O_TRACEVFORKDONE = 0x20
PTRACE_PEEKDATA = 0x2
PTRACE_PEEKSIGINFO = 0x4209
PTRACE_PEEKSIGINFO_SHARED = 0x1
PTRACE_PEEKTEXT = 0x1
PTRACE_PEEKUSR = 0x3
PTRACE_POKEDATA = 0x5
PTRACE_POKETEXT = 0x4
PTRACE_POKEUSR = 0x6
PTRACE_SECCOMP_GET_FILTER = 0x420c
PTRACE_SEIZE = 0x4206
PTRACE_SETFPREGS = 0xf
PTRACE_SETFPXREGS = 0x13
PTRACE_SETOPTIONS = 0x4200
PTRACE_SETREGS = 0xd
PTRACE_SETREGSET = 0x4205
PTRACE_SETSIGINFO = 0x4203
PTRACE_SETSIGMASK = 0x420b
PTRACE_SET_THREAD_AREA = 0x1a
PTRACE_SINGLEBLOCK = 0x21
PTRACE_SINGLESTEP = 0x9
PTRACE_SYSCALL = 0x18
PTRACE_SYSEMU = 0x1f
PTRACE_SYSEMU_SINGLESTEP = 0x20
PTRACE_TRACEME = 0x0
RLIMIT_AS = 0x9
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_LOCKS = 0xa
RLIMIT_MEMLOCK = 0x8
RLIMIT_MSGQUEUE = 0xc
RLIMIT_NICE = 0xd
RLIMIT_NOFILE = 0x7
RLIMIT_NPROC = 0x6
RLIMIT_RSS = 0x5
RLIMIT_RTPRIO = 0xe
RLIMIT_RTTIME = 0xf
RLIMIT_SIGPENDING = 0xb
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0xffffffffffffffff
RTAX_ADVMSS = 0x8
RTAX_CC_ALGO = 0x10
RTAX_CWND = 0x7
RTAX_FEATURES = 0xc
RTAX_FEATURE_ALLFRAG = 0x8
RTAX_FEATURE_ECN = 0x1
RTAX_FEATURE_MASK = 0xf
RTAX_FEATURE_SACK = 0x2
RTAX_FEATURE_TIMESTAMP = 0x4
RTAX_HOPLIMIT = 0xa
RTAX_INITCWND = 0xb
RTAX_INITRWND = 0xe
RTAX_LOCK = 0x1
RTAX_MAX = 0x10
RTAX_MTU = 0x2
RTAX_QUICKACK = 0xf
RTAX_REORDERING = 0x9
RTAX_RTO_MIN = 0xd
RTAX_RTT = 0x4
RTAX_RTTVAR = 0x5
RTAX_SSTHRESH = 0x6
RTAX_UNSPEC = 0x0
RTAX_WINDOW = 0x3
RTA_ALIGNTO = 0x4
RTA_MAX = 0x1a
RTCF_DIRECTSRC = 0x4000000
RTCF_DOREDIRECT = 0x1000000
RTCF_LOG = 0x2000000
RTCF_MASQ = 0x400000
RTCF_NAT = 0x800000
RTCF_VALVE = 0x200000
RTF_ADDRCLASSMASK = 0xf8000000
RTF_ADDRCONF = 0x40000
RTF_ALLONLINK = 0x20000
RTF_BROADCAST = 0x10000000
RTF_CACHE = 0x1000000
RTF_DEFAULT = 0x10000
RTF_DYNAMIC = 0x10
RTF_FLOW = 0x2000000
RTF_GATEWAY = 0x2
RTF_HOST = 0x4
RTF_INTERFACE = 0x40000000
RTF_IRTT = 0x100
RTF_LINKRT = 0x100000
RTF_LOCAL = 0x80000000
RTF_MODIFIED = 0x20
RTF_MSS = 0x40
RTF_MTU = 0x40
RTF_MULTICAST = 0x20000000
RTF_NAT = 0x8000000
RTF_NOFORWARD = 0x1000
RTF_NONEXTHOP = 0x200000
RTF_NOPMTUDISC = 0x4000
RTF_POLICY = 0x4000000
RTF_REINSTATE = 0x8
RTF_REJECT = 0x200
RTF_STATIC = 0x400
RTF_THROW = 0x2000
RTF_UP = 0x1
RTF_WINDOW = 0x80
RTF_XRESOLVE = 0x800
RTM_BASE = 0x10
RTM_DELACTION = 0x31
RTM_DELADDR = 0x15
RTM_DELADDRLABEL = 0x49
RTM_DELLINK = 0x11
RTM_DELMDB = 0x55
RTM_DELNEIGH = 0x1d
RTM_DELNETCONF = 0x51
RTM_DELNSID = 0x59
RTM_DELQDISC = 0x25
RTM_DELROUTE = 0x19
RTM_DELRULE = 0x21
RTM_DELTCLASS = 0x29
RTM_DELTFILTER = 0x2d
RTM_F_CLONED = 0x200
RTM_F_EQUALIZE = 0x400
RTM_F_FIB_MATCH = 0x2000
RTM_F_LOOKUP_TABLE = 0x1000
RTM_F_NOTIFY = 0x100
RTM_F_PREFIX = 0x800
RTM_GETACTION = 0x32
RTM_GETADDR = 0x16
RTM_GETADDRLABEL = 0x4a
RTM_GETANYCAST = 0x3e
RTM_GETDCB = 0x4e
RTM_GETLINK = 0x12
RTM_GETMDB = 0x56
RTM_GETMULTICAST = 0x3a
RTM_GETNEIGH = 0x1e
RTM_GETNEIGHTBL = 0x42
RTM_GETNETCONF = 0x52
RTM_GETNSID = 0x5a
RTM_GETQDISC = 0x26
RTM_GETROUTE = 0x1a
RTM_GETRULE = 0x22
RTM_GETSTATS = 0x5e
RTM_GETTCLASS = 0x2a
RTM_GETTFILTER = 0x2e
RTM_MAX = 0x63
RTM_NEWACTION = 0x30
RTM_NEWADDR = 0x14
RTM_NEWADDRLABEL = 0x48
RTM_NEWCACHEREPORT = 0x60
RTM_NEWLINK = 0x10
RTM_NEWMDB = 0x54
RTM_NEWNDUSEROPT = 0x44
RTM_NEWNEIGH = 0x1c
RTM_NEWNEIGHTBL = 0x40
RTM_NEWNETCONF = 0x50
RTM_NEWNSID = 0x58
RTM_NEWPREFIX = 0x34
RTM_NEWQDISC = 0x24
RTM_NEWROUTE = 0x18
RTM_NEWRULE = 0x20
RTM_NEWSTATS = 0x5c
RTM_NEWTCLASS = 0x28
RTM_NEWTFILTER = 0x2c
RTM_NR_FAMILIES = 0x15
RTM_NR_MSGTYPES = 0x54
RTM_SETDCB = 0x4f
RTM_SETLINK = 0x13
RTM_SETNEIGHTBL = 0x43
RTNH_ALIGNTO = 0x4
RTNH_COMPARE_MASK = 0x19
RTNH_F_DEAD = 0x1
RTNH_F_LINKDOWN = 0x10
RTNH_F_OFFLOAD = 0x8
RTNH_F_ONLINK = 0x4
RTNH_F_PERVASIVE = 0x2
RTNH_F_UNRESOLVED = 0x20
RTN_MAX = 0xb
RTPROT_BABEL = 0x2a
RTPROT_BIRD = 0xc
RTPROT_BOOT = 0x3
RTPROT_DHCP = 0x10
RTPROT_DNROUTED = 0xd
RTPROT_GATED = 0x8
RTPROT_KERNEL = 0x2
RTPROT_MROUTED = 0x11
RTPROT_MRT = 0xa
RTPROT_NTK = 0xf
RTPROT_RA = 0x9
RTPROT_REDIRECT = 0x1
RTPROT_STATIC = 0x4
RTPROT_UNSPEC = 0x0
RTPROT_XORP = 0xe
RTPROT_ZEBRA = 0xb
RT_CLASS_DEFAULT = 0xfd
RT_CLASS_LOCAL = 0xff
RT_CLASS_MAIN = 0xfe
RT_CLASS_MAX = 0xff
RT_CLASS_UNSPEC = 0x0
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
SCM_CREDENTIALS = 0x2
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x1d
SCM_TIMESTAMPING = 0x25
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_WIFI_STATUS = 0x29
SECCOMP_MODE_DISABLED = 0x0
SECCOMP_MODE_FILTER = 0x2
SECCOMP_MODE_STRICT = 0x1
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
SIOCADDDLCI = 0x8980
SIOCADDMULTI = 0x8931
SIOCADDRT = 0x890b
SIOCATMARK = 0x8905
SIOCBONDCHANGEACTIVE = 0x8995
SIOCBONDENSLAVE = 0x8990
SIOCBONDINFOQUERY = 0x8994
SIOCBONDRELEASE = 0x8991
SIOCBONDSETHWADDR = 0x8992
SIOCBONDSLAVEINFOQUERY = 0x8993
SIOCBRADDBR = 0x89a0
SIOCBRADDIF = 0x89a2
SIOCBRDELBR = 0x89a1
SIOCBRDELIF = 0x89a3
SIOCDARP = 0x8953
SIOCDELDLCI = 0x8981
SIOCDELMULTI = 0x8932
SIOCDELRT = 0x890c
SIOCDEVPRIVATE = 0x89f0
SIOCDIFADDR = 0x8936
SIOCDRARP = 0x8960
SIOCETHTOOL = 0x8946
SIOCGARP = 0x8954
SIOCGHWTSTAMP = 0x89b1
SIOCGIFADDR = 0x8915
SIOCGIFBR = 0x8940
SIOCGIFBRDADDR = 0x8919
SIOCGIFCONF = 0x8912
SIOCGIFCOUNT = 0x8938
SIOCGIFDSTADDR = 0x8917
SIOCGIFENCAP = 0x8925
SIOCGIFFLAGS = 0x8913
SIOCGIFHWADDR = 0x8927
SIOCGIFINDEX = 0x8933
SIOCGIFMAP = 0x8970
SIOCGIFMEM = 0x891f
SIOCGIFMETRIC = 0x891d
SIOCGIFMTU = 0x8921
SIOCGIFNAME = 0x8910
SIOCGIFNETMASK = 0x891b
SIOCGIFPFLAGS = 0x8935
SIOCGIFSLAVE = 0x8929
SIOCGIFTXQLEN = 0x8942
SIOCGIFVLAN = 0x8982
SIOCGMIIPHY = 0x8947
SIOCGMIIREG = 0x8948
SIOCGPGRP = 0x8904
SIOCGRARP = 0x8961
SIOCGSKNS = 0x894c
SIOCGSTAMP = 0x8906
SIOCGSTAMPNS = 0x8907
SIOCINQ = 0x541b
SIOCOUTQ = 0x5411
SIOCOUTQNSD = 0x894b
SIOCPROTOPRIVATE = 0x89e0
SIOCRTMSG = 0x890d
SIOCSARP = 0x8955
SIOCSHWTSTAMP = 0x89b0
SIOCSIFADDR = 0x8916
SIOCSIFBR = 0x8941
SIOCSIFBRDADDR = 0x891a
SIOCSIFDSTADDR = 0x8918
SIOCSIFENCAP = 0x8926
SIOCSIFFLAGS = 0x8914
SIOCSIFHWADDR = 0x8924
SIOCSIFHWBROADCAST = 0x8937
SIOCSIFLINK = 0x8911
SIOCSIFMAP = 0x8971
SIOCSIFMEM = 0x8920
SIOCSIFMETRIC = 0x891e
SIOCSIFMTU = 0x8922
SIOCSIFNAME = 0x8923
SIOCSIFNETMASK = 0x891c
SIOCSIFPFLAGS = 0x8934
SIOCSIFSLAVE = 0x8930
SIOCSIFTXQLEN = 0x8943
SIOCSIFVLAN = 0x8983
SIOCSMIIREG = 0x8949
SIOCSPGRP = 0x8902
SIOCSRARP = 0x8962
SIOCWANDEV = 0x894a
SOCK_CLOEXEC = 0x80000
SOCK_DCCP = 0x6
SOCK_DGRAM = 0x2
SOCK_IOC_TYPE = 0x89
SOCK_NONBLOCK = 0x800
SOCK_PACKET = 0xa
SOCK_RAW = 0x3
SOCK_RDM = 0x4
SOCK_SEQPACKET = 0x5
SOCK_STREAM = 0x1
SOL_AAL = 0x109
SOL_ALG = 0x117
SOL_ATM = 0x108
SOL_CAIF = 0x116
SOL_CAN_BASE = 0x64
SOL_DCCP = 0x10d
SOL_DECNET = 0x105
SOL_ICMPV6 = 0x3a
SOL_IP = 0x0
SOL_IPV6 = 0x29
SOL_IRDA = 0x10a
SOL_IUCV = 0x115
SOL_KCM = 0x119
SOL_LLC = 0x10c
SOL_NETBEUI = 0x10b
SOL_NETLINK = 0x10e
SOL_NFC = 0x118
SOL_PACKET = 0x107
SOL_PNPIPE = 0x113
SOL_PPPOL2TP = 0x111
SOL_RAW = 0xff
SOL_RDS = 0x114
SOL_RXRPC = 0x110
SOL_SOCKET = 0x1
SOL_TCP = 0x6
SOL_TIPC = 0x10f
SOL_X25 = 0x106
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
SO_ATTACH_FILTER = 0x1a
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
SO_BUSY_POLL = 0x2e
SO_CNX_ADVICE = 0x35
SO_COOKIE = 0x39
SO_DEBUG = 0x1
SO_DETACH_BPF = 0x1b
SO_DETACH_FILTER = 0x1b
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
SO_ERROR = 0x4
SO_GET_FILTER = 0x1a
SO_INCOMING_CPU = 0x31
SO_INCOMING_NAPI_ID = 0x38
SO_KEEPALIVE = 0x9
SO_LINGER = 0xd
SO_LOCK_FILTER = 0x2c
SO_MARK = 0x24
SO_MAX_PACING_RATE = 0x2f
SO_MEMINFO = 0x37
SO_NOFCS = 0x2b
SO_NO_CHECK = 0xb
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
SO_PEERGROUPS = 0x3b
SO_PEERNAME = 0x1c
SO_PEERSEC = 0x1f
SO_PRIORITY = 0xc
SO_PROTOCOL = 0x26
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVTIMEO = 0x14
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
SO_SECURITY_AUTHENTICATION = 0x16
SO_SECURITY_ENCRYPTION_NETWORK = 0x18
SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
SO_SELECT_ERR_QUEUE = 0x2d
SO_SNDBUF = 0x7
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x13
SO_SNDTIMEO = 0x15
SO_TIMESTAMP = 0x1d
SO_TIMESTAMPING = 0x25
SO_TIMESTAMPNS = 0x23
SO_TYPE = 0x3
SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2
SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1
SO_VM_SOCKETS_BUFFER_SIZE = 0x0
SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6
SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7
SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3
SO_VM_SOCKETS_TRUSTED = 0x5
SO_WIFI_STATUS = 0x29
SPLICE_F_GIFT = 0x8
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
S_IFCHR = 0x2000
S_IFDIR = 0x4000
S_IFIFO = 0x1000
S_IFLNK = 0xa000
S_IFMT = 0xf000
S_IFREG = 0x8000
S_IFSOCK = 0xc000
S_IREAD = 0x100
S_IRGRP = 0x20
S_IROTH = 0x4
S_IRUSR = 0x100
S_IRWXG = 0x38
S_IRWXO = 0x7
S_IRWXU = 0x1c0
S_ISGID = 0x400
S_ISUID = 0x800
S_ISVTX = 0x200
S_IWGRP = 0x10
S_IWOTH = 0x2
S_IWRITE = 0x80
S_IWUSR = 0x80
S_IXGRP = 0x8
S_IXOTH = 0x1
S_IXUSR = 0x40
TAB0 = 0x0
TAB1 = 0x800
TAB2 = 0x1000
TAB3 = 0x1800
TABDLY = 0x1800
TASKSTATS_CMD_ATTR_MAX = 0x4
TASKSTATS_CMD_MAX = 0x2
TASKSTATS_GENL_NAME = "TASKSTATS"
TASKSTATS_GENL_VERSION = 0x1
TASKSTATS_TYPE_MAX = 0x6
TASKSTATS_VERSION = 0x8
TCFLSH = 0x540b
TCGETA = 0x5405
TCGETS = 0x5401
TCGETS2 = 0x802c542a
TCGETX = 0x5432
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
TCION = 0x3
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
TCP_CC_INFO = 0x1a
TCP_CONGESTION = 0xd
TCP_COOKIE_IN_ALWAYS = 0x1
TCP_COOKIE_MAX = 0x10
TCP_COOKIE_MIN = 0x8
TCP_COOKIE_OUT_NEVER = 0x2
TCP_COOKIE_PAIR_SIZE = 0x20
TCP_COOKIE_TRANSACTIONS = 0xf
TCP_CORK = 0x3
TCP_DEFER_ACCEPT = 0x9
TCP_FASTOPEN = 0x17
TCP_FASTOPEN_CONNECT = 0x1e
TCP_INFO = 0xb
TCP_KEEPCNT = 0x6
TCP_KEEPIDLE = 0x4
TCP_KEEPINTVL = 0x5
TCP_LINGER2 = 0x8
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_WINSHIFT = 0xe
TCP_MD5SIG = 0xe
TCP_MD5SIG_MAXKEYLEN = 0x50
TCP_MSS = 0x200
TCP_MSS_DEFAULT = 0x218
TCP_MSS_DESIRED = 0x4c4
TCP_NODELAY = 0x1
TCP_NOTSENT_LOWAT = 0x19
TCP_QUEUE_SEQ = 0x15
TCP_QUICKACK = 0xc
TCP_REPAIR = 0x13
TCP_REPAIR_OPTIONS = 0x16
TCP_REPAIR_QUEUE = 0x14
TCP_REPAIR_WINDOW = 0x1d
TCP_SAVED_SYN = 0x1c
TCP_SAVE_SYN = 0x1b
TCP_SYNCNT = 0x7
TCP_S_DATA_IN = 0x4
TCP_S_DATA_OUT = 0x8
TCP_THIN_DUPACK = 0x11
TCP_THIN_LINEAR_TIMEOUTS = 0x10
TCP_TIMESTAMP = 0x18
TCP_USER_TIMEOUT = 0x12
TCP_WINDOW_CLAMP = 0xa
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSBRKP = 0x5425
TCSETA = 0x5406
TCSETAF = 0x5408
TCSETAW = 0x5407
TCSETS = 0x5402
TCSETS2 = 0x402c542b
TCSETSF = 0x5404
TCSETSF2 = 0x402c542d
TCSETSW = 0x5403
TCSETSW2 = 0x402c542c
TCSETX = 0x5433
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
TIOCGDEV = 0x80045432
TIOCGETD = 0x5424
TIOCGEXCL = 0x80045440
TIOCGICOUNT = 0x545d
TIOCGLCKTRMIOS = 0x5456
TIOCGPGRP = 0x540f
TIOCGPKT = 0x80045438
TIOCGPTLCK = 0x80045439
TIOCGPTN = 0x80045430
TIOCGPTPEER = 0x5441
TIOCGRS485 = 0x542e
TIOCGSERIAL = 0x541e
TIOCGSID = 0x5429
TIOCGSOFTCAR = 0x5419
TIOCGWINSZ = 0x5413
TIOCINQ = 0x541b
TIOCLINUX = 0x541c
TIOCMBIC = 0x5417
TIOCMBIS = 0x5416
TIOCMGET = 0x5415
TIOCMIWAIT = 0x545c
TIOCMSET = 0x5418
TIOCM_CAR = 0x40
TIOCM_CD = 0x40
TIOCM_CTS = 0x20
TIOCM_DSR = 0x100
TIOCM_DTR = 0x2
TIOCM_LE = 0x1
TIOCM_RI = 0x80
TIOCM_RNG = 0x80
TIOCM_RTS = 0x4
TIOCM_SR = 0x10
TIOCM_ST = 0x8
TIOCNOTTY = 0x5422
TIOCNXCL = 0x540d
TIOCOUTQ = 0x5411
TIOCPKT = 0x5420
TIOCPKT_DATA = 0x0
TIOCPKT_DOSTOP = 0x20
TIOCPKT_FLUSHREAD = 0x1
TIOCPKT_FLUSHWRITE = 0x2
TIOCPKT_IOCTL = 0x40
TIOCPKT_NOSTOP = 0x10
TIOCPKT_START = 0x8
TIOCPKT_STOP = 0x4
TIOCSBRK = 0x5427
TIOCSCTTY = 0x540e
TIOCSERCONFIG = 0x5453
TIOCSERGETLSR = 0x5459
TIOCSERGETMULTI = 0x545a
TIOCSERGSTRUCT = 0x5458
TIOCSERGWILD = 0x5454
TIOCSERSETMULTI = 0x545b
TIOCSERSWILD = 0x5455
TIOCSER_TEMT = 0x1
TIOCSETD = 0x5423
TIOCSIG = 0x40045436
TIOCSLCKTRMIOS = 0x5457
TIOCSPGRP = 0x5410
TIOCSPTLCK = 0x40045431
TIOCSRS485 = 0x542f
TIOCSSERIAL = 0x541f
TIOCSSOFTCAR = 0x541a
TIOCSTI = 0x5412
TIOCSWINSZ = 0x5414
TIOCVHANGUP = 0x5437
TOSTOP = 0x100
TS_COMM_LEN = 0x20
TUNATTACHFILTER = 0x401054d5
TUNDETACHFILTER = 0x401054d6
TUNGETFEATURES = 0x800454cf
TUNGETFILTER = 0x801054db
TUNGETIFF = 0x800454d2
TUNGETSNDBUF = 0x800454d3
TUNGETVNETBE = 0x800454df
TUNGETVNETHDRSZ = 0x800454d7
TUNGETVNETLE = 0x800454dd
TUNSETDEBUG = 0x400454c9
TUNSETGROUP = 0x400454ce
TUNSETIFF = 0x400454ca
TUNSETIFINDEX = 0x400454da
TUNSETLINK = 0x400454cd
TUNSETNOCSUM = 0x400454c8
TUNSETOFFLOAD = 0x400454d0
TUNSETOWNER = 0x400454cc
TUNSETPERSIST = 0x400454cb
TUNSETQUEUE = 0x400454d9
TUNSETSNDBUF = 0x400454d4
TUNSETTXFILTER = 0x400454d1
TUNSETVNETBE = 0x400454de
TUNSETVNETHDRSZ = 0x400454d8
TUNSETVNETLE = 0x400454dc
UMOUNT_NOFOLLOW = 0x8
UTIME_NOW = 0x3fffffff
UTIME_OMIT = 0x3ffffffe
VDISCARD = 0xd
VEOF = 0x4
VEOL = 0xb
VEOL2 = 0x10
VERASE = 0x2
VINTR = 0x0
VKILL = 0x3
VLNEXT = 0xf
VMADDR_CID_ANY = 0xffffffff
VMADDR_CID_HOST = 0x2
VMADDR_CID_HYPERVISOR = 0x0
VMADDR_CID_RESERVED = 0x1
VMADDR_PORT_ANY = 0xffffffff
VMIN = 0x6
VM_SOCKETS_INVALID_VERSION = 0xffffffff
VQUIT = 0x1
VREPRINT = 0xc
VSTART = 0x8
VSTOP = 0x9
VSUSP = 0xa
VSWTC = 0x7
VT0 = 0x0
VT1 = 0x4000
VTDLY = 0x4000
VTIME = 0x5
VWERASE = 0xe
WALL = 0x40000000
WCLONE = 0x80000000
WCONTINUED = 0x8
WDIOC_GETBOOTSTATUS = 0x80045702
WDIOC_GETPRETIMEOUT = 0x80045709
WDIOC_GETSTATUS = 0x80045701
WDIOC_GETSUPPORT = 0x80285700
WDIOC_GETTEMP = 0x80045703
WDIOC_GETTIMELEFT = 0x8004570a
WDIOC_GETTIMEOUT = 0x80045707
WDIOC_KEEPALIVE = 0x80045705
WDIOC_SETOPTIONS = 0x80045704
WDIOC_SETPRETIMEOUT = 0xc0045708
WDIOC_SETTIMEOUT = 0xc0045706
WEXITED = 0x4
WNOHANG = 0x1
WNOTHREAD = 0x20000000
WNOWAIT = 0x1000000
WORDSIZE = 0x40
WSTOPPED = 0x2
WUNTRACED = 0x2
XATTR_CREATE = 0x1
XATTR_REPLACE = 0x2
XCASE = 0x4
XTABS = 0x1800
)
// Errors
const (
E2BIG = syscall.Errno(0x7)
EACCES = syscall.Errno(0xd)
EADDRINUSE = syscall.Errno(0x62)
EADDRNOTAVAIL = syscall.Errno(0x63)
EADV = syscall.Errno(0x44)
EAFNOSUPPORT = syscall.Errno(0x61)
EAGAIN = syscall.Errno(0xb)
EALREADY = syscall.Errno(0x72)
EBADE = syscall.Errno(0x34)
EBADF = syscall.Errno(0x9)
EBADFD = syscall.Errno(0x4d)
EBADMSG = syscall.Errno(0x4a)
EBADR = syscall.Errno(0x35)
EBADRQC = syscall.Errno(0x38)
EBADSLT = syscall.Errno(0x39)
EBFONT = syscall.Errno(0x3b)
EBUSY = syscall.Errno(0x10)
ECANCELED = syscall.Errno(0x7d)
ECHILD = syscall.Errno(0xa)
ECHRNG = syscall.Errno(0x2c)
ECOMM = syscall.Errno(0x46)
ECONNABORTED = syscall.Errno(0x67)
ECONNREFUSED = syscall.Errno(0x6f)
ECONNRESET = syscall.Errno(0x68)
EDEADLK = syscall.Errno(0x23)
EDEADLOCK = syscall.Errno(0x23)
EDESTADDRREQ = syscall.Errno(0x59)
EDOM = syscall.Errno(0x21)
EDOTDOT = syscall.Errno(0x49)
EDQUOT = syscall.Errno(0x7a)
EEXIST = syscall.Errno(0x11)
EFAULT = syscall.Errno(0xe)
EFBIG = syscall.Errno(0x1b)
EHOSTDOWN = syscall.Errno(0x70)
EHOSTUNREACH = syscall.Errno(0x71)
EHWPOISON = syscall.Errno(0x85)
EIDRM = syscall.Errno(0x2b)
EILSEQ = syscall.Errno(0x54)
EINPROGRESS = syscall.Errno(0x73)
EINTR = syscall.Errno(0x4)
EINVAL = syscall.Errno(0x16)
EIO = syscall.Errno(0x5)
EISCONN = syscall.Errno(0x6a)
EISDIR = syscall.Errno(0x15)
EISNAM = syscall.Errno(0x78)
EKEYEXPIRED = syscall.Errno(0x7f)
EKEYREJECTED = syscall.Errno(0x81)
EKEYREVOKED = syscall.Errno(0x80)
EL2HLT = syscall.Errno(0x33)
EL2NSYNC = syscall.Errno(0x2d)
EL3HLT = syscall.Errno(0x2e)
EL3RST = syscall.Errno(0x2f)
ELIBACC = syscall.Errno(0x4f)
ELIBBAD = syscall.Errno(0x50)
ELIBEXEC = syscall.Errno(0x53)
ELIBMAX = syscall.Errno(0x52)
ELIBSCN = syscall.Errno(0x51)
ELNRNG = syscall.Errno(0x30)
ELOOP = syscall.Errno(0x28)
EMEDIUMTYPE = syscall.Errno(0x7c)
EMFILE = syscall.Errno(0x18)
EMLINK = syscall.Errno(0x1f)
EMSGSIZE = syscall.Errno(0x5a)
EMULTIHOP = syscall.Errno(0x48)
ENAMETOOLONG = syscall.Errno(0x24)
ENAVAIL = syscall.Errno(0x77)
ENETDOWN = syscall.Errno(0x64)
ENETRESET = syscall.Errno(0x66)
ENETUNREACH = syscall.Errno(0x65)
ENFILE = syscall.Errno(0x17)
ENOANO = syscall.Errno(0x37)
ENOBUFS = syscall.Errno(0x69)
ENOCSI = syscall.Errno(0x32)
ENODATA = syscall.Errno(0x3d)
ENODEV = syscall.Errno(0x13)
ENOENT = syscall.Errno(0x2)
ENOEXEC = syscall.Errno(0x8)
ENOKEY = syscall.Errno(0x7e)
ENOLCK = syscall.Errno(0x25)
ENOLINK = syscall.Errno(0x43)
ENOMEDIUM = syscall.Errno(0x7b)
ENOMEM = syscall.Errno(0xc)
ENOMSG = syscall.Errno(0x2a)
ENONET = syscall.Errno(0x40)
ENOPKG = syscall.Errno(0x41)
ENOPROTOOPT = syscall.Errno(0x5c)
ENOSPC = syscall.Errno(0x1c)
ENOSR = syscall.Errno(0x3f)
ENOSTR = syscall.Errno(0x3c)
ENOSYS = syscall.Errno(0x26)
ENOTBLK = syscall.Errno(0xf)
ENOTCONN = syscall.Errno(0x6b)
ENOTDIR = syscall.Errno(0x14)
ENOTEMPTY = syscall.Errno(0x27)
ENOTNAM = syscall.Errno(0x76)
ENOTRECOVERABLE = syscall.Errno(0x83)
ENOTSOCK = syscall.Errno(0x58)
ENOTSUP = syscall.Errno(0x5f)
ENOTTY = syscall.Errno(0x19)
ENOTUNIQ = syscall.Errno(0x4c)
ENXIO = syscall.Errno(0x6)
EOPNOTSUPP = syscall.Errno(0x5f)
EOVERFLOW = syscall.Errno(0x4b)
EOWNERDEAD = syscall.Errno(0x82)
EPERM = syscall.Errno(0x1)
EPFNOSUPPORT = syscall.Errno(0x60)
EPIPE = syscall.Errno(0x20)
EPROTO = syscall.Errno(0x47)
EPROTONOSUPPORT = syscall.Errno(0x5d)
EPROTOTYPE = syscall.Errno(0x5b)
ERANGE = syscall.Errno(0x22)
EREMCHG = syscall.Errno(0x4e)
EREMOTE = syscall.Errno(0x42)
EREMOTEIO = syscall.Errno(0x79)
ERESTART = syscall.Errno(0x55)
ERFKILL = syscall.Errno(0x84)
EROFS = syscall.Errno(0x1e)
ESHUTDOWN = syscall.Errno(0x6c)
ESOCKTNOSUPPORT = syscall.Errno(0x5e)
ESPIPE = syscall.Errno(0x1d)
ESRCH = syscall.Errno(0x3)
ESRMNT = syscall.Errno(0x45)
ESTALE = syscall.Errno(0x74)
ESTRPIPE = syscall.Errno(0x56)
ETIME = syscall.Errno(0x3e)
ETIMEDOUT = syscall.Errno(0x6e)
ETOOMANYREFS = syscall.Errno(0x6d)
ETXTBSY = syscall.Errno(0x1a)
EUCLEAN = syscall.Errno(0x75)
EUNATCH = syscall.Errno(0x31)
EUSERS = syscall.Errno(0x57)
EWOULDBLOCK = syscall.Errno(0xb)
EXDEV = syscall.Errno(0x12)
EXFULL = syscall.Errno(0x36)
)
// Signals
const (
SIGABRT = syscall.Signal(0x6)
SIGALRM = syscall.Signal(0xe)
SIGBUS = syscall.Signal(0x7)
SIGCHLD = syscall.Signal(0x11)
SIGCLD = syscall.Signal(0x11)
SIGCONT = syscall.Signal(0x12)
SIGFPE = syscall.Signal(0x8)
SIGHUP = syscall.Signal(0x1)
SIGILL = syscall.Signal(0x4)
SIGINT = syscall.Signal(0x2)
SIGIO = syscall.Signal(0x1d)
SIGIOT = syscall.Signal(0x6)
SIGKILL = syscall.Signal(0x9)
SIGPIPE = syscall.Signal(0xd)
SIGPOLL = syscall.Signal(0x1d)
SIGPROF = syscall.Signal(0x1b)
SIGPWR = syscall.Signal(0x1e)
SIGQUIT = syscall.Signal(0x3)
SIGSEGV = syscall.Signal(0xb)
SIGSTKFLT = syscall.Signal(0x10)
SIGSTOP = syscall.Signal(0x13)
SIGSYS = syscall.Signal(0x1f)
SIGTERM = syscall.Signal(0xf)
SIGTRAP = syscall.Signal(0x5)
SIGTSTP = syscall.Signal(0x14)
SIGTTIN = syscall.Signal(0x15)
SIGTTOU = syscall.Signal(0x16)
SIGURG = syscall.Signal(0x17)
SIGUSR1 = syscall.Signal(0xa)
SIGUSR2 = syscall.Signal(0xc)
SIGVTALRM = syscall.Signal(0x1a)
SIGWINCH = syscall.Signal(0x1c)
SIGXCPU = syscall.Signal(0x18)
SIGXFSZ = syscall.Signal(0x19)
)
// Error table
var errors = [...]string{
1: "operation not permitted",
2: "no such file or directory",
3: "no such process",
4: "interrupted system call",
5: "input/output error",
6: "no such device or address",
7: "argument list too long",
8: "exec format error",
9: "bad file descriptor",
10: "no child processes",
11: "resource temporarily unavailable",
12: "cannot allocate memory",
13: "permission denied",
14: "bad address",
15: "block device required",
16: "device or resource busy",
17: "file exists",
18: "invalid cross-device link",
19: "no such device",
20: "not a directory",
21: "is a directory",
22: "invalid argument",
23: "too many open files in system",
24: "too many open files",
25: "inappropriate ioctl for device",
26: "text file busy",
27: "file too large",
28: "no space left on device",
29: "illegal seek",
30: "read-only file system",
31: "too many links",
32: "broken pipe",
33: "numerical argument out of domain",
34: "numerical result out of range",
35: "resource deadlock avoided",
36: "file name too long",
37: "no locks available",
38: "function not implemented",
39: "directory not empty",
40: "too many levels of symbolic links",
42: "no message of desired type",
43: "identifier removed",
44: "channel number out of range",
45: "level 2 not synchronized",
46: "level 3 halted",
47: "level 3 reset",
48: "link number out of range",
49: "protocol driver not attached",
50: "no CSI structure available",
51: "level 2 halted",
52: "invalid exchange",
53: "invalid request descriptor",
54: "exchange full",
55: "no anode",
56: "invalid request code",
57: "invalid slot",
59: "bad font file format",
60: "device not a stream",
61: "no data available",
62: "timer expired",
63: "out of streams resources",
64: "machine is not on the network",
65: "package not installed",
66: "object is remote",
67: "link has been severed",
68: "advertise error",
69: "srmount error",
70: "communication error on send",
71: "protocol error",
72: "multihop attempted",
73: "RFS specific error",
74: "bad message",
75: "value too large for defined data type",
76: "name not unique on network",
77: "file descriptor in bad state",
78: "remote address changed",
79: "can not access a needed shared library",
80: "accessing a corrupted shared library",
81: ".lib section in a.out corrupted",
82: "attempting to link in too many shared libraries",
83: "cannot exec a shared library directly",
84: "invalid or incomplete multibyte or wide character",
85: "interrupted system call should be restarted",
86: "streams pipe error",
87: "too many users",
88: "socket operation on non-socket",
89: "destination address required",
90: "message too long",
91: "protocol wrong type for socket",
92: "protocol not available",
93: "protocol not supported",
94: "socket type not supported",
95: "operation not supported",
96: "protocol family not supported",
97: "address family not supported by protocol",
98: "address already in use",
99: "cannot assign requested address",
100: "network is down",
101: "network is unreachable",
102: "network dropped connection on reset",
103: "software caused connection abort",
104: "connection reset by peer",
105: "no buffer space available",
106: "transport endpoint is already connected",
107: "transport endpoint is not connected",
108: "cannot send after transport endpoint shutdown",
109: "too many references: cannot splice",
110: "connection timed out",
111: "connection refused",
112: "host is down",
113: "no route to host",
114: "operation already in progress",
115: "operation now in progress",
116: "stale file handle",
117: "structure needs cleaning",
118: "not a XENIX named type file",
119: "no XENIX semaphores available",
120: "is a named type file",
121: "remote I/O error",
122: "disk quota exceeded",
123: "no medium found",
124: "wrong medium type",
125: "operation canceled",
126: "required key not available",
127: "key has expired",
128: "key has been revoked",
129: "key was rejected by service",
130: "owner died",
131: "state not recoverable",
132: "operation not possible due to RF-kill",
133: "memory page has hardware error",
}
// Signal table
var signals = [...]string{
1: "hangup",
2: "interrupt",
3: "quit",
4: "illegal instruction",
5: "trace/breakpoint trap",
6: "aborted",
7: "bus error",
8: "floating point exception",
9: "killed",
10: "user defined signal 1",
11: "segmentation fault",
12: "user defined signal 2",
13: "broken pipe",
14: "alarm clock",
15: "terminated",
16: "stack fault",
17: "child exited",
18: "continued",
19: "stopped (signal)",
20: "stopped",
21: "stopped (tty input)",
22: "stopped (tty output)",
23: "urgent I/O condition",
24: "CPU time limit exceeded",
25: "file size limit exceeded",
26: "virtual timer expired",
27: "profiling timer expired",
28: "window changed",
29: "I/O possible",
30: "power failure",
31: "bad system call",
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spinnaker.cats.cache;
import com.netflix.spinnaker.kork.annotations.Beta;
import java.util.Collection;
import java.util.Map;
/**
* CacheData is stored in a Cache. Attributes are facts about the CacheData that can be updated by
* CachingAgents. Relationships are links to other CacheData.
*
* <p>Note: Not all caches may support a per record ttl
*/
@Beta
public interface CacheData {
String getId();
/** @return The ttl (in seconds) for this CacheData */
int getTtlSeconds();
Map<String, Object> getAttributes();
/**
* @return relationships for this CacheData, keyed by type returning a collection of ids for that
* type
*/
Map<String, Collection<String>> getRelationships();
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tencentcloudapi.mps.v20190612.models;
import com.tencentcloudapi.common.AbstractModel;
import com.google.gson.annotations.SerializedName;
import com.google.gson.annotations.Expose;
import java.util.HashMap;
public class DescribeSampleSnapshotTemplatesResponse extends AbstractModel{
/**
* 符合过滤条件的记录总数。
*/
@SerializedName("TotalCount")
@Expose
private Long TotalCount;
/**
* 采样截图模板详情列表。
*/
@SerializedName("SampleSnapshotTemplateSet")
@Expose
private SampleSnapshotTemplate [] SampleSnapshotTemplateSet;
/**
* 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
*/
@SerializedName("RequestId")
@Expose
private String RequestId;
/**
* Get 符合过滤条件的记录总数。
* @return TotalCount 符合过滤条件的记录总数。
*/
public Long getTotalCount() {
return this.TotalCount;
}
/**
* Set 符合过滤条件的记录总数。
* @param TotalCount 符合过滤条件的记录总数。
*/
public void setTotalCount(Long TotalCount) {
this.TotalCount = TotalCount;
}
/**
* Get 采样截图模板详情列表。
* @return SampleSnapshotTemplateSet 采样截图模板详情列表。
*/
public SampleSnapshotTemplate [] getSampleSnapshotTemplateSet() {
return this.SampleSnapshotTemplateSet;
}
/**
* Set 采样截图模板详情列表。
* @param SampleSnapshotTemplateSet 采样截图模板详情列表。
*/
public void setSampleSnapshotTemplateSet(SampleSnapshotTemplate [] SampleSnapshotTemplateSet) {
this.SampleSnapshotTemplateSet = SampleSnapshotTemplateSet;
}
/**
* Get 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
* @return RequestId 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
*/
public String getRequestId() {
return this.RequestId;
}
/**
* Set 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
* @param RequestId 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
*/
public void setRequestId(String RequestId) {
this.RequestId = RequestId;
}
/**
* Internal implementation, normal users should not use it.
*/
public void toMap(HashMap<String, String> map, String prefix) {
this.setParamSimple(map, prefix + "TotalCount", this.TotalCount);
this.setParamArrayObj(map, prefix + "SampleSnapshotTemplateSet.", this.SampleSnapshotTemplateSet);
this.setParamSimple(map, prefix + "RequestId", this.RequestId);
}
}
|
{
"pile_set_name": "Github"
}
|
package peering
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// OperationsClient is the peering Client
type OperationsClient struct {
BaseClient
}
// NewOperationsClient creates an instance of the OperationsClient client.
func NewOperationsClient(subscriptionID string) OperationsClient {
return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this
// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// List lists all of the available API operations for peering resources.
func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
defer func() {
sc := -1
if result.olr.Response.Response != nil {
sc = result.olr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.OperationsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.olr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "peering.OperationsClient", "List", resp, "Failure sending request")
return
}
result.olr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.OperationsClient", "List", resp, "Failure responding to request")
}
if result.olr.hasNextLink() && result.olr.IsEmpty() {
err = result.NextWithContext(ctx)
}
return
}
// ListPreparer prepares the List request.
func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
const APIVersion = "2020-04-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPath("/providers/Microsoft.Peering/operations"),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) {
req, err := lastResults.operationListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "peering.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "peering.OperationsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "peering.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx)
return
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"fmt"
"net/url"
"reflect"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/conversion"
)
// Scheme defines methods for serializing and deserializing API objects, a type
// registry for converting group, version, and kind information to and from Go
// schemas, and mappings between Go schemas of different versions. A scheme is the
// foundation for a versioned API and versioned configuration over time.
//
// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time
// identifier for a particular representation of that Type (typically backwards
// compatible), a Kind is the unique name for that Type within the Version, and a
// Group identifies a set of Versions, Kinds, and Types that evolve over time. An
// Unversioned Type is one that is not yet formally bound to a type and is promised
// to be backwards compatible (effectively a "v1" of a Type that does not expect
// to break in the future).
//
// Schemes are not expected to change at runtime and are only threadsafe after
// registration is complete.
type Scheme struct {
// versionMap allows one to figure out the go type of an object with
// the given version and name.
gvkToType map[unversioned.GroupVersionKind]reflect.Type
// typeToGroupVersion allows one to find metadata for a given go object.
// The reflect.Type we index by should *not* be a pointer.
typeToGVK map[reflect.Type][]unversioned.GroupVersionKind
// unversionedTypes are transformed without conversion in ConvertToVersion.
unversionedTypes map[reflect.Type]unversioned.GroupVersionKind
// unversionedKinds are the names of kinds that can be created in the context of any group
// or version
// TODO: resolve the status of unversioned types.
unversionedKinds map[string]reflect.Type
// Map from version and resource to the corresponding func to convert
// resource field labels in that version to internal version.
fieldLabelConversionFuncs map[string]map[string]FieldLabelConversionFunc
// converter stores all registered conversion functions. It also has
// default coverting behavior.
converter *conversion.Converter
// cloner stores all registered copy functions. It also has default
// deep copy behavior.
cloner *conversion.Cloner
}
// Function to convert a field selector to internal representation.
type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error)
// NewScheme creates a new Scheme. This scheme is pluggable by default.
func NewScheme() *Scheme {
s := &Scheme{
gvkToType: map[unversioned.GroupVersionKind]reflect.Type{},
typeToGVK: map[reflect.Type][]unversioned.GroupVersionKind{},
unversionedTypes: map[reflect.Type]unversioned.GroupVersionKind{},
unversionedKinds: map[string]reflect.Type{},
cloner: conversion.NewCloner(),
fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{},
}
s.converter = conversion.NewConverter(s.nameFunc)
s.AddConversionFuncs(DefaultEmbeddedConversions()...)
// Enable map[string][]string conversions by default
if err := s.AddConversionFuncs(DefaultStringConversions...); err != nil {
panic(err)
}
if err := s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
panic(err)
}
if err := s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
panic(err)
}
return s
}
// nameFunc returns the name of the type that we wish to use to determine when two types attempt
// a conversion. Defaults to the go name of the type if the type is not registered.
func (s *Scheme) nameFunc(t reflect.Type) string {
// find the preferred names for this type
gvks, ok := s.typeToGVK[t]
if !ok {
return t.Name()
}
for _, gvk := range gvks {
internalGV := gvk.GroupVersion()
internalGV.Version = "__internal" // this is hacky and maybe should be passed in
internalGVK := internalGV.WithKind(gvk.Kind)
if internalType, exists := s.gvkToType[internalGVK]; exists {
return s.typeToGVK[internalType][0].Kind
}
}
return gvks[0].Kind
}
// fromScope gets the input version, desired output version, and desired Scheme
// from a conversion.Scope.
func (s *Scheme) fromScope(scope conversion.Scope) (inVersion, outVersion string, scheme *Scheme) {
scheme = s
inVersion = scope.Meta().SrcVersion
outVersion = scope.Meta().DestVersion
return inVersion, outVersion, scheme
}
// Converter allows access to the converter for the scheme
func (s *Scheme) Converter() *conversion.Converter {
return s.converter
}
// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules.
// Whenever an object of this type is serialized, it is serialized with the provided group version and is not
// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an
// API group and version that would never be updated.
//
// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into
// every version with particular schemas. Resolve this method at that point.
func (s *Scheme) AddUnversionedTypes(version unversioned.GroupVersion, types ...Object) {
s.AddKnownTypes(version, types...)
for _, obj := range types {
t := reflect.TypeOf(obj).Elem()
gvk := version.WithKind(t.Name())
s.unversionedTypes[t] = gvk
if _, ok := s.unversionedKinds[gvk.Kind]; ok {
panic(fmt.Sprintf("%v has already been registered as unversioned kind %q - kind name must be unique", reflect.TypeOf(t), gvk.Kind))
}
s.unversionedKinds[gvk.Kind] = t
}
}
// AddKnownTypes registers all types passed in 'types' as being members of version 'version'.
// All objects passed to types should be pointers to structs. The name that go reports for
// the struct becomes the "kind" field when encoding. Version may not be empty - use the
// APIVersionInternal constant if you have a type that does not have a formal version.
func (s *Scheme) AddKnownTypes(gv unversioned.GroupVersion, types ...Object) {
if len(gv.Version) == 0 {
panic(fmt.Sprintf("version is required on all types: %s %v", gv, types[0]))
}
for _, obj := range types {
t := reflect.TypeOf(obj)
if t.Kind() != reflect.Ptr {
panic("All types must be pointers to structs.")
}
t = t.Elem()
if t.Kind() != reflect.Struct {
panic("All types must be pointers to structs.")
}
gvk := gv.WithKind(t.Name())
s.gvkToType[gvk] = t
s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
}
}
// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should
// be encoded as. Useful for testing when you don't want to make multiple packages to define
// your structs. Version may not be empty - use the APIVersionInternal constant if you have a
// type that does not have a formal version.
func (s *Scheme) AddKnownTypeWithName(gvk unversioned.GroupVersionKind, obj Object) {
t := reflect.TypeOf(obj)
if len(gvk.Version) == 0 {
panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t))
}
if t.Kind() != reflect.Ptr {
panic("All types must be pointers to structs.")
}
t = t.Elem()
if t.Kind() != reflect.Struct {
panic("All types must be pointers to structs.")
}
s.gvkToType[gvk] = t
s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
}
// KnownTypes returns the types known for the given version.
func (s *Scheme) KnownTypes(gv unversioned.GroupVersion) map[string]reflect.Type {
types := make(map[string]reflect.Type)
for gvk, t := range s.gvkToType {
if gv != gvk.GroupVersion() {
continue
}
types[gvk.Kind] = t
}
return types
}
// ObjectKind returns the group,version,kind of the go object,
// or an error if it's not a pointer or is unregistered.
func (s *Scheme) ObjectKind(obj Object) (unversioned.GroupVersionKind, error) {
gvks, err := s.ObjectKinds(obj)
if err != nil {
return unversioned.GroupVersionKind{}, err
}
return gvks[0], nil
}
// ObjectKinds returns all possible group,version,kind of the go object,
// or an error if it's not a pointer or is unregistered.
func (s *Scheme) ObjectKinds(obj Object) ([]unversioned.GroupVersionKind, error) {
v, err := conversion.EnforcePtr(obj)
if err != nil {
return nil, err
}
t := v.Type()
gvks, ok := s.typeToGVK[t]
if !ok {
return nil, ¬RegisteredErr{t: t}
}
return gvks, nil
}
// Recognizes returns true if the scheme is able to handle the provided group,version,kind
// of an object.
func (s *Scheme) Recognizes(gvk unversioned.GroupVersionKind) bool {
_, exists := s.gvkToType[gvk]
return exists
}
func (s *Scheme) IsUnversioned(obj Object) (bool, bool) {
v, err := conversion.EnforcePtr(obj)
if err != nil {
return false, false
}
t := v.Type()
if _, ok := s.typeToGVK[t]; !ok {
return false, false
}
_, ok := s.unversionedTypes[t]
return ok, true
}
// New returns a new API object of the given version and name, or an error if it hasn't
// been registered. The version and kind fields must be specified.
func (s *Scheme) New(kind unversioned.GroupVersionKind) (Object, error) {
if t, exists := s.gvkToType[kind]; exists {
return reflect.New(t).Interface().(Object), nil
}
if t, exists := s.unversionedKinds[kind.Kind]; exists {
return reflect.New(t).Interface().(Object), nil
}
return nil, ¬RegisteredErr{gvk: kind}
}
// Log sets a logger on the scheme. For test purposes only
func (s *Scheme) Log(l conversion.DebugLogger) {
s.converter.Debug = l
}
// AddIgnoredConversionType identifies a pair of types that should be skipped by
// conversion (because the data inside them is explicitly dropped during
// conversion).
func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error {
return s.converter.RegisterIgnoredConversion(from, to)
}
// AddConversionFuncs adds functions to the list of conversion functions. The given
// functions should know how to convert between two of your API objects, or their
// sub-objects. We deduce how to call these functions from the types of their two
// parameters; see the comment for Converter.Register.
//
// Note that, if you need to copy sub-objects that didn't change, you can use the
// conversion.Scope object that will be passed to your conversion function.
// Additionally, all conversions started by Scheme will set the SrcVersion and
// DestVersion fields on the Meta object. Example:
//
// s.AddConversionFuncs(
// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error {
// // You can depend on Meta() being non-nil, and this being set to
// // the source version, e.g., ""
// s.Meta().SrcVersion
// // You can depend on this being set to the destination version,
// // e.g., "v1".
// s.Meta().DestVersion
// // Call scope.Convert to copy sub-fields.
// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0)
// return nil
// },
// )
//
// (For more detail about conversion functions, see Converter.Register's comment.)
//
// Also note that the default behavior, if you don't add a conversion function, is to
// sanely copy fields that have the same names and same type names. It's OK if the
// destination type has extra fields, but it must not remove any. So you only need to
// add conversion functions for things with changed/removed fields.
func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error {
for _, f := range conversionFuncs {
if err := s.converter.RegisterConversionFunc(f); err != nil {
return err
}
}
return nil
}
// Similar to AddConversionFuncs, but registers conversion functions that were
// automatically generated.
func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error {
for _, f := range conversionFuncs {
if err := s.converter.RegisterGeneratedConversionFunc(f); err != nil {
return err
}
}
return nil
}
// AddDeepCopyFuncs adds a function to the list of deep-copy functions.
// For the expected format of deep-copy function, see the comment for
// Copier.RegisterDeepCopyFunction.
func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error {
for _, f := range deepCopyFuncs {
if err := s.cloner.RegisterDeepCopyFunc(f); err != nil {
return err
}
}
return nil
}
// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were
// automatically generated.
func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...interface{}) error {
for _, f := range deepCopyFuncs {
if err := s.cloner.RegisterGeneratedDeepCopyFunc(f); err != nil {
return err
}
}
return nil
}
// AddFieldLabelConversionFunc adds a conversion function to convert field selectors
// of the given kind from the given version to internal version representation.
func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error {
if s.fieldLabelConversionFuncs[version] == nil {
s.fieldLabelConversionFuncs[version] = map[string]FieldLabelConversionFunc{}
}
s.fieldLabelConversionFuncs[version][kind] = conversionFunc
return nil
}
// AddStructFieldConversion allows you to specify a mechanical copy for a moved
// or renamed struct field without writing an entire conversion function. See
// the comment in conversion.Converter.SetStructFieldCopy for parameter details.
// Call as many times as needed, even on the same fields.
func (s *Scheme) AddStructFieldConversion(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error {
return s.converter.SetStructFieldCopy(srcFieldType, srcFieldName, destFieldType, destFieldName)
}
// RegisterInputDefaults sets the provided field mapping function and field matching
// as the defaults for the provided input type. The fn may be nil, in which case no
// mapping will happen by default. Use this method to register a mechanism for handling
// a specific input type in conversion, such as a map[string]string to structs.
func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error {
return s.converter.RegisterInputDefaults(in, fn, defaultFlags)
}
// AddDefaultingFuncs adds functions to the list of default-value functions.
// Each of the given functions is responsible for applying default values
// when converting an instance of a versioned API object into an internal
// API object. These functions do not need to handle sub-objects. We deduce
// how to call these functions from the types of their two parameters.
//
// s.AddDefaultingFuncs(
// func(obj *v1.Pod) {
// if obj.OptionalField == "" {
// obj.OptionalField = "DefaultValue"
// }
// },
// )
func (s *Scheme) AddDefaultingFuncs(defaultingFuncs ...interface{}) error {
for _, f := range defaultingFuncs {
err := s.converter.RegisterDefaultingFunc(f)
if err != nil {
return err
}
}
return nil
}
// Copy does a deep copy of an API object.
func (s *Scheme) Copy(src Object) (Object, error) {
dst, err := s.DeepCopy(src)
if err != nil {
return nil, err
}
return dst.(Object), nil
}
// Performs a deep copy of the given object.
func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) {
return s.cloner.DeepCopy(src)
}
// Convert will attempt to convert in into out. Both must be pointers. For easy
// testing of conversion functions. Returns an error if the conversion isn't
// possible. You can call this with types that haven't been registered (for example,
// a to test conversion of types that are nested within registered types), but in
// that case, the conversion.Scope object passed to your conversion functions won't
// have SrcVersion or DestVersion fields set correctly in Meta().
func (s *Scheme) Convert(in, out interface{}) error {
inVersion := unversioned.GroupVersion{Group: "unknown", Version: "unknown"}
outVersion := unversioned.GroupVersion{Group: "unknown", Version: "unknown"}
if inObj, ok := in.(Object); ok {
if gvk, err := s.ObjectKind(inObj); err == nil {
inVersion = gvk.GroupVersion()
}
}
if outObj, ok := out.(Object); ok {
if gvk, err := s.ObjectKind(outObj); err == nil {
outVersion = gvk.GroupVersion()
}
}
flags, meta := s.generateConvertMeta(inVersion, outVersion, in)
if flags == 0 {
flags = conversion.AllowDifferentFieldTypeNames
}
return s.converter.Convert(in, out, flags, meta)
}
// Converts the given field label and value for an kind field selector from
// versioned representation to an unversioned one.
func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) {
if s.fieldLabelConversionFuncs[version] == nil {
return "", "", fmt.Errorf("No field label conversion function found for version: %s", version)
}
conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind]
if !ok {
return "", "", fmt.Errorf("No field label conversion function found for version %s and kind %s", version, kind)
}
return conversionFunc(label, value)
}
// ConvertToVersion attempts to convert an input object to its matching Kind in another
// version within this scheme. Will return an error if the provided version does not
// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also
// return an error if the conversion does not result in a valid Object being
// returned. The serializer handles loading/serializing nested objects.
func (s *Scheme) ConvertToVersion(in Object, outVersion string) (Object, error) {
gv, err := unversioned.ParseGroupVersion(outVersion)
if err != nil {
return nil, err
}
switch in.(type) {
case *Unknown, *Unstructured, *UnstructuredList:
old := in.GetObjectKind().GroupVersionKind()
defer in.GetObjectKind().SetGroupVersionKind(old)
setTargetVersion(in, s, gv)
return in, nil
}
t := reflect.TypeOf(in)
if t.Kind() != reflect.Ptr {
return nil, fmt.Errorf("only pointer types may be converted: %v", t)
}
t = t.Elem()
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t)
}
var kind unversioned.GroupVersionKind
if unversionedKind, ok := s.unversionedTypes[t]; ok {
kind = unversionedKind
} else {
kinds, ok := s.typeToGVK[t]
if !ok || len(kinds) == 0 {
return nil, fmt.Errorf("%v is not a registered type and cannot be converted into version %q", t, outVersion)
}
kind = kinds[0]
}
outKind := gv.WithKind(kind.Kind)
inKind, err := s.ObjectKind(in)
if err != nil {
return nil, err
}
out, err := s.New(outKind)
if err != nil {
return nil, err
}
flags, meta := s.generateConvertMeta(inKind.GroupVersion(), gv, in)
if err := s.converter.Convert(in, out, flags, meta); err != nil {
return nil, err
}
setTargetVersion(out, s, gv)
return out, nil
}
// generateConvertMeta constructs the meta value we pass to Convert.
func (s *Scheme) generateConvertMeta(srcGroupVersion, destGroupVersion unversioned.GroupVersion, in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) {
flags, meta := s.converter.DefaultMeta(reflect.TypeOf(in))
meta.SrcVersion = srcGroupVersion.String()
meta.DestVersion = destGroupVersion.String()
return flags, meta
}
func setTargetVersion(obj Object, raw *Scheme, gv unversioned.GroupVersion) {
if gv.Version == APIVersionInternal {
// internal is a special case
obj.GetObjectKind().SetGroupVersionKind(nil)
} else {
gvk, _ := raw.ObjectKind(obj)
obj.GetObjectKind().SetGroupVersionKind(&unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: gvk.Kind})
}
}
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_23) on Mon May 02 03:14:24 KST 2011 -->
<TITLE>
Uses of Class org.eclipse.cdt.managedbuilder.llvm.ui.preferences.LibraryPathListEditor
</TITLE>
<META NAME="date" CONTENT="2011-05-02">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.eclipse.cdt.managedbuilder.llvm.ui.preferences.LibraryPathListEditor";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../org/eclipse/cdt/managedbuilder/llvm/ui/preferences/LibraryPathListEditor.html" title="class in org.eclipse.cdt.managedbuilder.llvm.ui.preferences"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../../index.html?org/eclipse/cdt/managedbuilder/llvm/ui/preferences/\class-useLibraryPathListEditor.html" target="_top"><B>FRAMES</B></A>
<A HREF="LibraryPathListEditor.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>org.eclipse.cdt.managedbuilder.llvm.ui.preferences.LibraryPathListEditor</B></H2>
</CENTER>
No usage of org.eclipse.cdt.managedbuilder.llvm.ui.preferences.LibraryPathListEditor
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../org/eclipse/cdt/managedbuilder/llvm/ui/preferences/LibraryPathListEditor.html" title="class in org.eclipse.cdt.managedbuilder.llvm.ui.preferences"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../../index.html?org/eclipse/cdt/managedbuilder/llvm/ui/preferences/\class-useLibraryPathListEditor.html" target="_top"><B>FRAMES</B></A>
<A HREF="LibraryPathListEditor.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
|
{
"pile_set_name": "Github"
}
|
// Licensed to Cloudera, Inc. under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Cloudera, Inc. licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.cloudera.csd.validation.monitoring.constraints;
import com.cloudera.csd.descriptors.MetricDescriptor;
import com.cloudera.csd.validation.monitoring.AbstractMonitoringValidator;
import com.cloudera.csd.validation.monitoring.MonitoringValidationContext;
import com.cloudera.csd.validation.references.components.DescriptorPathImpl;
import com.google.common.base.Preconditions;
import java.util.List;
import javax.validation.ConstraintViolation;
import org.apache.commons.lang.StringUtils;
/**
* See getDescription for more details.
*/
public class ConsistentMetricDefinitionValidator
extends AbstractMonitoringValidator<MetricDescriptor> {
@Override
public String getDescription() {
return
"Validates that metric definitions are consistent if that metric is " +
"defined for multiple entity types.";
}
@Override
public <T> List<ConstraintViolation<T>> validate(
MonitoringValidationContext context,
MetricDescriptor metricDescriptor,
DescriptorPathImpl path) {
Preconditions.checkNotNull(context);
Preconditions.checkNotNull(metricDescriptor);
Preconditions.checkNotNull(path);
MetricDescriptor definition =
context.metricsDefined.get(metricDescriptor.getName());
Preconditions.checkNotNull(definition);
if (!StringUtils.equals(
metricDescriptor.getLabel(),
definition.getLabel())) {
String msg = String.format(
"Inconsistent labels for metric '%s': '%s' and '%s'. ",
metricDescriptor.getName(),
metricDescriptor.getLabel(),
definition.getLabel());
return forViolation(
msg, metricDescriptor, metricDescriptor.getLabel(), path);
}
if (!StringUtils.equals(
metricDescriptor.getDescription(),
definition.getDescription())) {
String msg = String.format(
"Inconsistent descriptions for metric '%s': '%s' and '%s'. ",
metricDescriptor.getName(),
metricDescriptor.getDescription(),
definition.getDescription());
return forViolation(
msg, metricDescriptor, metricDescriptor.getDescription(), path);
}
if (!StringUtils.equals(
metricDescriptor.getNumeratorUnit(),
definition.getNumeratorUnit())) {
String msg = String.format(
"Inconsistent numerator units for metric '%s': '%s' and '%s'. ",
metricDescriptor.getName(),
metricDescriptor.getNumeratorUnit(),
definition.getNumeratorUnit());
return forViolation(
msg, metricDescriptor, metricDescriptor.getNumeratorUnit(), path);
}
if (!StringUtils.equals(
metricDescriptor.getDenominatorUnit(),
definition.getDenominatorUnit())) {
String msg = String.format(
"Inconsistent denominator units for metric '%s': '%s' and '%s'. ",
metricDescriptor.getName(),
metricDescriptor.getDenominatorUnit(),
definition.getDenominatorUnit());
return forViolation(
msg, metricDescriptor, metricDescriptor.getDenominatorUnit(), path);
}
if (metricDescriptor.isCounter() != definition.isCounter()) {
String msg = String.format(
"Inconsistent counter definitions for metric '%s': '%s' and '%s'. ",
metricDescriptor.getName(),
metricDescriptor.isCounter(),
definition.isCounter());
return forViolation(
msg, metricDescriptor, metricDescriptor.isCounter(), path);
}
if (!StringUtils.equals(
metricDescriptor.getWeightingMetricName(),
definition.getWeightingMetricName())) {
String msg = String.format(
"Inconsistent weighting metric names for metric '%s': '%s' and '%s'. ",
metricDescriptor.getName(),
metricDescriptor.getWeightingMetricName(),
definition.getWeightingMetricName());
return forViolation(
msg, metricDescriptor, metricDescriptor.getWeightingMetricName(), path);
}
return noViolations();
}
}
|
{
"pile_set_name": "Github"
}
|
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
class Google_Service_FirebaseHosting_Release extends Google_Model
{
public $message;
public $name;
public $releaseTime;
protected $releaseUserType = 'Google_Service_FirebaseHosting_ActingUser';
protected $releaseUserDataType = '';
public $type;
protected $versionType = 'Google_Service_FirebaseHosting_Version';
protected $versionDataType = '';
public function setMessage($message)
{
$this->message = $message;
}
public function getMessage()
{
return $this->message;
}
public function setName($name)
{
$this->name = $name;
}
public function getName()
{
return $this->name;
}
public function setReleaseTime($releaseTime)
{
$this->releaseTime = $releaseTime;
}
public function getReleaseTime()
{
return $this->releaseTime;
}
/**
* @param Google_Service_FirebaseHosting_ActingUser
*/
public function setReleaseUser(Google_Service_FirebaseHosting_ActingUser $releaseUser)
{
$this->releaseUser = $releaseUser;
}
/**
* @return Google_Service_FirebaseHosting_ActingUser
*/
public function getReleaseUser()
{
return $this->releaseUser;
}
public function setType($type)
{
$this->type = $type;
}
public function getType()
{
return $this->type;
}
/**
* @param Google_Service_FirebaseHosting_Version
*/
public function setVersion(Google_Service_FirebaseHosting_Version $version)
{
$this->version = $version;
}
/**
* @return Google_Service_FirebaseHosting_Version
*/
public function getVersion()
{
return $this->version;
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.platform;
import java.io.Serializable;
import javax.cache.event.CacheEntryEvent;
import javax.cache.event.CacheEntryListenerException;
import org.apache.ignite.Ignite;
import org.apache.ignite.binary.BinaryObject;
import org.apache.ignite.cache.CacheEntryEventSerializableFilter;
import org.apache.ignite.resources.IgniteInstanceResource;
/**
* Test filter factory
*/
public class PlatformCacheEntryEventFilterFactory implements Serializable,
PlatformJavaObjectFactory<CacheEntryEventSerializableFilter> {
/** Property to be set from platform. */
private String startsWith = "-";
/** Injected instance. */
@IgniteInstanceResource
private Ignite ignite;
/** {@inheritDoc} */
@Override public CacheEntryEventSerializableFilter create() {
assert ignite != null;
return new CacheEntryEventSerializableFilter() {
@Override public boolean evaluate(CacheEntryEvent event) throws CacheEntryListenerException {
Object value = event.getValue();
if (value instanceof String)
return ((String)value).startsWith(startsWith);
assert value instanceof BinaryObject;
return ((String)((BinaryObject)value).field("String")).startsWith(startsWith);
}
};
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2016-2018 John Grosh (jagrosh) & Kaidan Gustave (TheMonitorLizard)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jagrosh.jdautilities.menu;
import java.awt.Color;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import com.jagrosh.jdautilities.commons.waiter.EventWaiter;
import net.dv8tion.jda.api.EmbedBuilder;
import net.dv8tion.jda.api.MessageBuilder;
import net.dv8tion.jda.api.entities.Emote;
import net.dv8tion.jda.api.entities.Message;
import net.dv8tion.jda.api.entities.MessageChannel;
import net.dv8tion.jda.api.entities.MessageReaction.ReactionEmote;
import net.dv8tion.jda.api.entities.Role;
import net.dv8tion.jda.api.entities.User;
import net.dv8tion.jda.api.events.message.react.MessageReactionAddEvent;
import net.dv8tion.jda.api.requests.RestAction;
import net.dv8tion.jda.internal.utils.Checks;
/**
* A {@link com.jagrosh.jdautilities.menu.Menu Menu} implementation that creates
* a organized display of emotes/emojis as buttons paired with options, and below
* the menu reactions corresponding to each button.
*
* @author John Grosh
*/
public class ButtonMenu extends Menu
{
private final Color color;
private final String text;
private final String description;
private final List<String> choices;
private final Consumer<ReactionEmote> action;
private final Consumer<Message> finalAction;
ButtonMenu(EventWaiter waiter, Set<User> users, Set<Role> roles, long timeout, TimeUnit unit,
Color color, String text, String description, List<String> choices, Consumer<ReactionEmote> action, Consumer<Message> finalAction)
{
super(waiter, users, roles, timeout, unit);
this.color = color;
this.text = text;
this.description = description;
this.choices = choices;
this.action = action;
this.finalAction = finalAction;
}
/**
* Shows the ButtonMenu as a new {@link net.dv8tion.jda.api.entities.Message Message}
* in the provided {@link net.dv8tion.jda.api.entities.MessageChannel MessageChannel}.
*
* @param channel
* The MessageChannel to send the new Message to
*/
@Override
public void display(MessageChannel channel)
{
initialize(channel.sendMessage(getMessage()));
}
/**
* Displays this ButtonMenu by editing the provided {@link net.dv8tion.jda.api.entities.Message Message}.
*
* @param message
* The Message to display the Menu in
*/
@Override
public void display(Message message)
{
initialize(message.editMessage(getMessage()));
}
// Initializes the ButtonMenu using a Message RestAction
// This is either through editing a previously existing Message
// OR through sending a new one to a TextChannel.
private void initialize(RestAction<Message> ra)
{
ra.queue(m -> {
for(int i=0; i<choices.size(); i++)
{
// Get the emote to display.
Emote emote;
try {
emote = m.getJDA().getEmoteById(choices.get(i));
} catch(Exception e) {
emote = null;
}
// If the emote is null that means that it might be an emoji.
// If it's neither, that's on the developer and we'll let it
// throw an error when we queue a rest action.
RestAction<Void> r = emote==null ? m.addReaction(choices.get(i)) : m.addReaction(emote);
if(i+1<choices.size())
r.queue(); // If there is still more reactions to add we delay using the EventWaiter
else
{
// This is the last reaction added.
r.queue(v -> {
waiter.waitForEvent(MessageReactionAddEvent.class, event -> {
// If the message is not the same as the ButtonMenu
// currently being displayed.
if(!event.getMessageId().equals(m.getId()))
return false;
// If the reaction is an Emote we get the Snowflake,
// otherwise we get the unicode value.
String re = event.getReaction().getReactionEmote().isEmote()
? event.getReaction().getReactionEmote().getId()
: event.getReaction().getReactionEmote().getName();
// If the value we got is not registered as a button to
// the ButtonMenu being displayed we return false.
if(!choices.contains(re))
return false;
// Last check is that the person who added the reaction
// is a valid user.
return isValidUser(event.getUser(), event.isFromGuild() ? event.getGuild() : null);
}, (MessageReactionAddEvent event) -> {
// What happens next is after a valid event
// is fired and processed above.
// Preform the specified action with the ReactionEmote
action.accept(event.getReaction().getReactionEmote());
finalAction.accept(m);
}, timeout, unit, () -> finalAction.accept(m));
});
}
}
});
}
// Generates a ButtonMenu message
private Message getMessage()
{
MessageBuilder mbuilder = new MessageBuilder();
if(text!=null)
mbuilder.append(text);
if(description!=null)
mbuilder.setEmbed(new EmbedBuilder().setColor(color).setDescription(description).build());
return mbuilder.build();
}
/**
* The {@link com.jagrosh.jdautilities.menu.Menu.Builder Menu.Builder} for
* a {@link com.jagrosh.jdautilities.menu.ButtonMenu ButtonMenu}.
*
* @author John Grosh
*/
public static class Builder extends Menu.Builder<Builder, ButtonMenu>
{
private Color color;
private String text;
private String description;
private final List<String> choices = new LinkedList<>();
private Consumer<ReactionEmote> action;
private Consumer<Message> finalAction = (m) -> {};
/**
* Builds the {@link com.jagrosh.jdautilities.menu.ButtonMenu ButtonMenu}
* with this Builder.
*
* @return The OrderedMenu built from this Builder.
*
* @throws java.lang.IllegalArgumentException
* If one of the following is violated:
* <ul>
* <li>No {@link com.jagrosh.jdautilities.commons.waiter.EventWaiter EventWaiter} was set.</li>
* <li>No choices were set.</li>
* <li>No action {@link java.util.function.Consumer Consumer} was set.</li>
* <li>Neither text nor description were set.</li>
* </ul>
*/
@Override
public ButtonMenu build()
{
Checks.check(waiter != null, "Must set an EventWaiter");
Checks.check(!choices.isEmpty(), "Must have at least one choice");
Checks.check(action != null, "Must provide an action consumer");
Checks.check(text != null || description != null, "Either text or description must be set");
return new ButtonMenu(waiter, users, roles, timeout, unit, color, text, description, choices, action, finalAction);
}
/**
* Sets the {@link java.awt.Color Color} of the {@link net.dv8tion.jda.api.entities.MessageEmbed MessageEmbed}.
*
* @param color
* The Color of the MessageEmbed
*
* @return This builder
*/
public Builder setColor(Color color)
{
this.color = color;
return this;
}
/**
* Sets the text of the {@link net.dv8tion.jda.api.entities.Message Message} to be displayed
* when the {@link com.jagrosh.jdautilities.menu.ButtonMenu ButtonMenu} is built.
*
* <p>This is displayed directly above the embed.
*
* @param text
* The Message content to be displayed above the embed when the ButtonMenu is built
*
* @return This builder
*/
public Builder setText(String text)
{
this.text = text;
return this;
}
/**
* Sets the description to be placed in an {@link net.dv8tion.jda.api.entities.MessageEmbed MessageEmbed}.
* <br>If this is {@code null}, no MessageEmbed will be displayed
*
* @param description
* The content of the MessageEmbed's description
*
* @return This builder
*/
public Builder setDescription(String description)
{
this.description = description;
return this;
}
/**
* Sets the {@link java.util.function.Consumer Consumer} action to perform upon selecting a button.
*
* @param action
* The Consumer action to perform upon selecting a button
*
* @return This builder
*/
public Builder setAction(Consumer<ReactionEmote> action)
{
this.action = action;
return this;
}
/**
* Sets the {@link java.util.function.Consumer Consumer} to perform if the
* {@link com.jagrosh.jdautilities.menu.ButtonMenu ButtonMenu} is done,
* either via cancellation, a timeout, or a selection being made.<p>
*
* This accepts the message used to display the menu when called.
*
* @param finalAction
* The Runnable action to perform if the ButtonMenu is done
*
* @return This builder
*/
public Builder setFinalAction(Consumer<Message> finalAction)
{
this.finalAction = finalAction;
return this;
}
/**
* Adds a single String unicode emoji as a button choice.
*
* <p>Any non-unicode {@link net.dv8tion.jda.api.entities.Emote Emote} should be
* added using {@link ButtonMenu.Builder#addChoice(Emote)
* ButtonMenu.Builder#addChoice(Emote)}.
*
* @param emoji
* The String unicode emoji to add
*
* @return This builder
*/
public Builder addChoice(String emoji)
{
this.choices.add(emoji);
return this;
}
/**
* Adds a single custom {@link net.dv8tion.jda.api.entities.Emote Emote} as button choices.
*
* <p>Any regular unicode emojis should be added using {@link
* ButtonMenu.Builder#addChoice(String)
* ButtonMenu.Builder#addChoice(String)}.
*
* @param emote
* The Emote object to add
*
* @return This builder
*/
public Builder addChoice(Emote emote)
{
return addChoice(emote.getId());
}
/**
* Adds String unicode emojis as button choices.
*
* <p>Any non-unicode {@link net.dv8tion.jda.api.entities.Emote Emote}s should be
* added using {@link ButtonMenu.Builder#addChoices(Emote...)
* ButtonMenu.Builder#addChoices(Emote...)}.
*
* @param emojis
* The String unicode emojis to add
*
* @return This builder
*/
public Builder addChoices(String... emojis)
{
for(String emoji : emojis)
addChoice(emoji);
return this;
}
/**
* Adds custom {@link net.dv8tion.jda.api.entities.Emote Emote}s as button choices.
*
* <p>Any regular unicode emojis should be added using {@link
* ButtonMenu.Builder#addChoices(String...)
* ButtonMenu.Builder#addChoices(String...)}.
*
* @param emotes
* The Emote objects to add
*
* @return This builder
*/
public Builder addChoices(Emote... emotes)
{
for(Emote emote : emotes)
addChoice(emote);
return this;
}
/**
* Sets the String unicode emojis as button choices.
*
* <p>Any non-unicode {@link net.dv8tion.jda.api.entities.Emote Emote}s should be
* set using {@link ButtonMenu.Builder#setChoices(Emote...)
* ButtonMenu.Builder#setChoices(Emote...)}.
*
* @param emojis
* The String unicode emojis to set
*
* @return This builder
*/
public Builder setChoices(String... emojis)
{
this.choices.clear();
return addChoices(emojis);
}
/**
* Sets the {@link net.dv8tion.jda.api.entities.Emote Emote}s as button choices.
*
* <p>Any regular unicode emojis should be set using {@link
* ButtonMenu.Builder#setChoices(String...)
* ButtonMenu.Builder#setChoices(String...)}.
*
* @param emotes
* The Emote objects to set
*
* @return This builder
*/
public Builder setChoices(Emote... emotes)
{
this.choices.clear();
return addChoices(emotes);
}
}
}
|
{
"pile_set_name": "Github"
}
|
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
|
{
"pile_set_name": "Github"
}
|
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"log"
)
func initHijack() {
pkg := NewPackageLogger("log", "")
w := packageWriter{pkg}
log.SetFlags(0)
log.SetPrefix("")
log.SetOutput(w)
}
type packageWriter struct {
pl *PackageLogger
}
func (p packageWriter) Write(b []byte) (int, error) {
if p.pl.level < INFO {
return 0, nil
}
p.pl.internalLog(calldepth+2, INFO, string(b))
return len(b), nil
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2013-2020 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package brave.propagation;
import brave.Span;
import brave.Tracer;
import brave.Tracer.SpanInScope;
import brave.Tracing;
import brave.internal.Nullable;
import java.util.ArrayDeque;
/**
* This type allows you to place a span in scope in one method and access it in another without
* using an explicit request parameter.
*
* <p>Many libraries expose a callback model as opposed to an interceptor one. When creating new
* instrumentation, you may find places where you need to place a span in scope in one callback
* (like `onStart()`) and end the scope in another callback (like `onFinish()`).
*
* <p>Provided the library guarantees these run on the same thread, you can simply propagate the
* result of {@link Tracer#startScopedSpan(String)} or {@link Tracer#withSpanInScope(Span)} from the
* starting callback to the closing one. This is typically done with a request-scoped attribute.
*
* Here's an example:
* <pre>{@code
* class MyFilter extends Filter {
* public void onStart(Request request, Attributes attributes) {
* // Assume you have code to start the span and add relevant tags...
*
* // We now set the span in scope so that any code between here and
* // the end of the request can see it with Tracer.currentSpan()
* SpanInScope spanInScope = tracer.withSpanInScope(span);
*
* // We don't want to leak the scope, so we place it somewhere we can
* // lookup later
* attributes.put(SpanInScope.class, spanInScope);
* }
*
* public void onFinish(Response response, Attributes attributes) {
* // as long as we are on the same thread, we can read the span started above
* Span span = tracer.currentSpan();
*
* // Assume you have code to complete the span
*
* // We now remove the scope (which implicitly detaches it from the span)
* attributes.remove(SpanInScope.class).close();
* }
* }
* }</pre>
*
* <p>Sometimes you have to instrument a library where There's no attribute namespace shared across
* request and response. For this scenario, you can use {@link ThreadLocalSpan} to temporarily store
* the span between callbacks.
*
* Here's an example:
* <pre>{@code
* class MyFilter extends Filter {
* final ThreadLocalSpan threadLocalSpan;
*
* public void onStart(Request request) {
* // Allocates a span and places it in scope so that code between here and onFinish can see it
* Span span = threadLocalSpan.next();
* if (span == null || span.isNoop()) return; // skip below logic on noop
*
* // Assume you have code to start the span and add relevant tags...
* }
*
* public void onFinish(Response response, Attributes attributes) {
* // as long as we are on the same thread, we can read the span started above
* Span span = threadLocalSpan.remove();
* if (span == null || span.isNoop()) return; // skip below logic on noop
*
* // Assume you have code to complete the span
* }
* }
* }</pre>
*/
public class ThreadLocalSpan {
/**
* This uses the {@link Tracing#currentTracer()}, which means calls to {@link #next()} may return
* null. Use this when you have no other means to get a reference to the tracer. For example, JDBC
* connections, as they often initialize prior to the tracing component.
*/
public static final ThreadLocalSpan CURRENT_TRACER = new ThreadLocalSpan(null);
public static ThreadLocalSpan create(Tracer tracer) {
if (tracer == null) throw new NullPointerException("tracer == null");
return new ThreadLocalSpan(tracer);
}
@Nullable final Tracer tracer;
ThreadLocalSpan(Tracer tracer) {
this.tracer = tracer;
}
Tracer tracer() {
return tracer != null ? tracer : Tracing.currentTracer();
}
/**
* Returns the {@link Tracer#nextSpan(TraceContextOrSamplingFlags)} or null if {@link
* #CURRENT_TRACER} and tracing isn't available.
*/
@Nullable public Span next(TraceContextOrSamplingFlags extracted) {
Tracer tracer = tracer();
if (tracer == null) return null;
Span next = tracer.nextSpan(extracted);
SpanAndScope spanAndScope = new SpanAndScope(next, tracer.withSpanInScope(next));
getCurrentSpanInScopeStack().addFirst(spanAndScope);
return next;
}
/**
* Returns the {@link Tracer#nextSpan()} or null if {@link #CURRENT_TRACER} and tracing isn't
* available.
*/
@Nullable public Span next() {
Tracer tracer = tracer();
if (tracer == null) return null;
Span next = tracer.nextSpan();
SpanAndScope spanAndScope = new SpanAndScope(next, tracer.withSpanInScope(next));
getCurrentSpanInScopeStack().addFirst(spanAndScope);
return next;
}
static final class SpanAndScope {
final Span span;
final SpanInScope spanInScope;
SpanAndScope(Span span, SpanInScope spanInScope) {
this.span = span;
this.spanInScope = spanInScope;
}
}
/**
* Returns the span set in scope via {@link #next()} or null if there was none.
*
* <p>When assertions are on, this will throw an assertion error if the span returned was not the
* one currently in context. This could happen if someone called {@link
* Tracer#withSpanInScope(Span)} or {@link CurrentTraceContext#newScope(TraceContext)} outside a
* try/finally block.
*/
@Nullable public Span remove() {
Tracer tracer = tracer();
Span currentSpan = tracer != null ? tracer.currentSpan() : null;
SpanAndScope spanAndScope = getCurrentSpanInScopeStack().pollFirst();
if (spanAndScope == null) return currentSpan;
Span span = spanAndScope.span;
spanAndScope.spanInScope.close();
assert span.equals(currentSpan) :
"Misalignment: scoped span " + span + " != current span " + currentSpan;
return currentSpan;
}
/**
* This keeps track of a stack with a normal array dequeue. Redundant stacking of the same span is
* not possible because there is no api to place an arbitrary span in scope using this api.
*/
@SuppressWarnings("ThreadLocalUsage") // intentional: to support multiple Tracer instances
final ThreadLocal<ArrayDeque<SpanAndScope>> currentSpanInScopeStack = new ThreadLocal<>();
ArrayDeque<SpanAndScope> getCurrentSpanInScopeStack() {
ArrayDeque<SpanAndScope> stack = currentSpanInScopeStack.get();
if (stack == null) {
stack = new ArrayDeque<>();
currentSpanInScopeStack.set(stack);
}
return stack;
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Viry3D
* Copyright 2014-2019 by Stack - stackos@qq.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "String.h"
#include "memory/Memory.h"
#include <stdarg.h>
#if VR_WINDOWS
#include <Windows.h>
#endif
namespace Viry3D
{
static const char BASE64_TABLE[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
String String::Format(const char* format, ...)
{
String result;
va_list vs;
va_start(vs, format);
int size = vsnprintf(nullptr, 0, format, vs);
va_end(vs);
char* buffer = Memory::Alloc<char>(size + 1);
buffer[size] = 0;
va_start(vs, format);
size = vsnprintf(buffer, size + 1, format, vs);
va_end(vs);
result.m_string = buffer;
Memory::Free(buffer, size + 1);
return result;
}
String String::Base64(const char* bytes, int size)
{
int size_pad = size;
if (size_pad % 3 != 0)
{
size_pad += 3 - (size_pad % 3);
}
int round = size_pad / 3;
std::string str(round * 4, '\0');
int index;
char a, b, c;
for (int i = 0; i < round; ++i)
{
a = 0; b = 0; c = 0;
index = i * 3 + 0;
if (index < size) a = bytes[index];
index = i * 3 + 1;
if (index < size) b = bytes[index];
index = i * 3 + 2;
if (index < size) c = bytes[index];
str[i * 4 + 0] = BASE64_TABLE[(a & 0xfc) >> 2];
str[i * 4 + 1] = BASE64_TABLE[((a & 0x3) << 4) | ((b & 0xf0) >> 4)];
str[i * 4 + 2] = BASE64_TABLE[((b & 0xf) << 2) | ((c & 0xc0) >> 6)];
str[i * 4 + 3] = BASE64_TABLE[c & 0x3f];
}
for (int i = size_pad - size, j = 0; i > 0; --i, ++j)
{
str[(round - 1) * 4 + 3 - j] = '=';
}
return String(str.c_str());
}
String String::Utf8ToGb2312(const String& str)
{
#if VR_WINDOWS
int size = MultiByteToWideChar(CP_UTF8, 0, str.CString(), str.Size(), nullptr, 0);
wchar_t* wstr = (wchar_t*) calloc(1, (size + 1) * 2);
MultiByteToWideChar(CP_UTF8, 0, str.CString(), str.Size(), wstr, size);
size = WideCharToMultiByte(CP_ACP, 0, wstr, size, nullptr, 0, nullptr, false);
char* cstr = (char*) calloc(1, size + 1);
WideCharToMultiByte(CP_ACP, 0, wstr, size, cstr, size, nullptr, false);
String ret = cstr;
free(cstr);
free(wstr);
return ret;
#else
return str;
#endif
}
String String::Gb2312ToUtf8(const String& str)
{
#if VR_WINDOWS
int size = MultiByteToWideChar(CP_ACP, 0, str.CString(), str.Size(), nullptr, 0);
wchar_t* wstr = (wchar_t*) calloc(1, (size + 1) * 2);
MultiByteToWideChar(CP_ACP, 0, str.CString(), str.Size(), wstr, size);
size = WideCharToMultiByte(CP_UTF8, 0, wstr, size, nullptr, 0, nullptr, false);
char* cstr = (char*) calloc(1, size + 1);
WideCharToMultiByte(CP_UTF8, 0, wstr, size, cstr, size, nullptr, false);
String ret = cstr;
free(cstr);
free(wstr);
return ret;
#else
return str;
#endif
}
String String::UrlDecode(const String& str)
{
std::string dest = str.CString();
int i = 0;
int j = 0;
char c;
int size = (int) str.Size();
while (i < size)
{
c = str[i];
switch (c)
{
case '+':
dest[j++] = ' ';
i++;
break;
case '%':
{
while (i + 2 < size && c == '%')
{
auto sub = str.Substring(i + 1, 2);
char v = (char) strtol(sub.CString(), nullptr, 16);
dest[j++] = v;
i += 3;
if (i < size)
{
c = str[i];
}
}
}
break;
default:
dest[j++] = c;
i++;
break;
}
}
return String(dest.c_str(), j);
}
String::String()
{
}
String::String(const char *str):
m_string(str)
{
}
String::String(const char* str, int size) :
m_string(str, size)
{
}
String::String(const ByteBuffer& buffer) :
m_string((const char*) buffer.Bytes(), buffer.Size())
{
}
int String::Size() const
{
return (int) m_string.size();
}
bool String::Empty() const
{
return m_string.empty();
}
bool String::operator ==(const String& right) const
{
if (this->Size() == right.Size())
{
return Memory::Compare(this->m_string.data(), right.m_string.data(), (int) m_string.size()) == 0;
}
return false;
}
bool String::operator !=(const String& right) const
{
return !(*this == right);
}
bool String::operator ==(const char* right) const
{
if (right && this->Size() == strlen(right))
{
return Memory::Compare(this->m_string.data(), right, this->Size()) == 0;
}
return false;
}
bool String::operator !=(const char* right) const
{
return !(*this == right);
}
bool operator ==(const char* left, const String& right)
{
return right == left;
}
bool operator !=(const char* left, const String& right)
{
return right != left;
}
String String::operator +(const String& right) const
{
String result;
result.m_string = m_string + right.m_string;
return result;
}
String& String::operator +=(const String& right)
{
*this = *this + right;
return *this;
}
String operator +(const char* left, const String& right)
{
return String(left) + right;
}
bool String::operator <(const String& right) const
{
return m_string < right.m_string;
}
char& String::operator[](int index)
{
return m_string[index];
}
const char& String::operator[](int index) const
{
return m_string[index];
}
const char* String::CString() const
{
return m_string.c_str();
}
int String::IndexOf(const String& str, int start) const
{
size_t pos = m_string.find(str.m_string, start);
if (pos != std::string::npos)
{
return (int) pos;
}
else
{
return -1;
}
}
bool String::Contains(const String& str) const
{
return this->IndexOf(str) >= 0;
}
int String::LastIndexOf(const String& str, int start) const
{
size_t pos = m_string.rfind(str.m_string, start);
if (pos != std::string::npos)
{
return (int) pos;
}
else
{
return -1;
}
}
String String::Replace(const String& old, const String& to) const
{
String result(*this);
int start = 0;
while (true)
{
int index = result.IndexOf(old, start);
if (index >= 0)
{
result.m_string.replace(index, old.m_string.size(), to.m_string);
start = index + (int) to.m_string.size();
}
else
{
break;
}
}
return result;
}
Vector<String> String::Split(const String& separator, bool exclude_empty) const
{
Vector<String> result;
int start = 0;
while (true)
{
int index = this->IndexOf(separator, start);
if (index >= 0)
{
String str = this->Substring(start, index - start);
if (!str.Empty() || !exclude_empty)
{
result.Add(str);
}
start = index + separator.Size();
}
else
{
break;
}
}
String str = this->Substring(start, -1);
if (!str.Empty() || !exclude_empty)
{
result.Add(str);
}
return result;
}
bool String::StartsWith(const String& str) const
{
if (str.Size() == 0)
{
return true;
}
else if (this->Size() < str.Size())
{
return false;
}
else
{
return Memory::Compare(&(*this)[0], &str[0], str.Size()) == 0;
}
}
bool String::EndsWith(const String& str) const
{
if (str.Size() == 0)
{
return true;
}
else if (this->Size() < str.Size())
{
return false;
}
else
{
return Memory::Compare(&(*this)[this->Size() - str.Size()], &str[0], str.Size()) == 0;
}
}
String String::Substring(int start, int count) const
{
String result;
result.m_string = m_string.substr(start, count);
return result;
}
static int Utf8ToUnicode32(const char* utf8, char32_t& c32)
{
int byte_count = 0;
for (int i = 0; i < 8; ++i)
{
unsigned char c = utf8[0];
if (((c << i) & 0x80) == 0)
{
if (i == 0)
{
byte_count = 1;
}
else
{
byte_count = i;
}
break;
}
}
if (byte_count >= 1 && byte_count <= 6)
{
char32_t code = 0;
for (int i = 0; i < byte_count; ++i)
{
unsigned int c = utf8[i];
unsigned char part;
if (i == 0)
{
part = (c << (byte_count + 24)) >> (byte_count + 24);
}
else
{
part = c & 0x3f;
}
code = (code << 6) | part;
}
c32 = code;
return byte_count;
}
else
{
return 0;
}
}
static Vector<char> Unicode32ToUtf8(char32_t c32)
{
Vector<char> buffer;
int byte_count = 0;
if (c32 <= 0x7f)
{
byte_count = 1;
}
else if (c32 <= 0x7ff)
{
byte_count = 2;
}
else if (c32 <= 0xffff)
{
byte_count = 3;
}
else if (c32 <= 0x1fffff)
{
byte_count = 4;
}
else if (c32 <= 0x3ffffff)
{
byte_count = 5;
}
else if (c32 <= 0x7fffffff)
{
byte_count = 6;
}
std::vector<char> bytes;
for (int i = 0; i < byte_count - 1; ++i)
{
bytes.push_back((c32 & 0x3f) | 0x80);
c32 >>= 6;
}
if (byte_count > 1)
{
bytes.push_back((char) (c32 | (0xffffff80 >> (byte_count - 1))));
}
else
{
bytes.push_back((char) (c32));
}
for (int i = 0; i < byte_count; ++i)
{
buffer.Add(bytes[byte_count - 1 - i]);
}
return buffer;
}
Vector<char32_t> String::ToUnicode32() const
{
Vector<char32_t> unicode;
int size = (int) m_string.size();
for (int i = 0; i < size; ++i)
{
char32_t unicode32 = 0;
int byte_count = Utf8ToUnicode32(&m_string[i], unicode32);
if (byte_count > 0)
{
unicode.Add(unicode32);
i += byte_count - 1;
}
else
{
break;
}
}
return unicode;
}
String::String(const char32_t* unicode32)
{
Vector<char> str;
for (int i = 0; unicode32[i] != 0; ++i)
{
char32_t c32 = unicode32[i];
auto bytes = Unicode32ToUtf8(c32);
str.AddRange(&bytes[0], bytes.Size());
}
str.Add(0);
m_string = &str[0];
}
String::String(const char32_t* unicode32, int size)
{
Vector<char> str;
for (int i = 0; i < size; ++i)
{
char32_t c32 = unicode32[i];
auto bytes = Unicode32ToUtf8(c32);
str.AddRange(&bytes[0], bytes.Size());
}
str.Add(0);
m_string = &str[0];
}
String String::ToLower() const
{
Vector<char> str;
for (auto c : m_string)
{
if (c >= 'A' && c <= 'Z')
{
c -= 'A' - 'a';
}
str.Add(c);
}
str.Add(0);
return String(&str[0]);
}
String String::ToUpper() const
{
Vector<char> str;
for (auto c : m_string)
{
if (c >= 'a' && c <= 'z')
{
c += 'A' - 'a';
}
str.Add(c);
}
str.Add(0);
return String(&str[0]);
}
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="utf-8"?>
<!--
This file is automatically generated by Visual Studio .Net. It is
used to store generic object data source configuration information.
Renaming the file extension or editing the content of this file may
cause the file to be unrecognizable by the program.
-->
<GenericObjectDataSource DisplayName="GetNextDeparturesWithDetailsResponse" Version="1.0" xmlns="urn:schemas-microsoft-com:xml-msdatasource">
<TypeInfo>Huxley.ldbStaffServiceReference.GetNextDeparturesWithDetailsResponse, Service References.ldbStaffServiceReference.Reference.cs.dll, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null</TypeInfo>
</GenericObjectDataSource>
|
{
"pile_set_name": "Github"
}
|
/*
* $Id: TestRequestUtilsPopulate.java 471754 2006-11-06 14:55:09Z husted $
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.struts.util;
import javax.servlet.ServletException;
import junit.framework.Test;
import junit.framework.TestSuite;
import org.apache.struts.action.ActionMapping;
import org.apache.struts.util.RequestUtils;
import org.apache.struts.Globals;
import org.apache.struts.mock.TestMockBase;
import org.apache.struts.mock.MockFormBean;
import org.apache.struts.mock.MockMultipartRequestHandler;
/**
* Unit tests for the RequestUtil's <code>populate</code> method.
*
* @version $Rev: 471754 $
*/
public class TestRequestUtilsPopulate extends TestMockBase {
/**
* Defines the testcase name for JUnit.
*
* @param theName the testcase's name.
*/
public TestRequestUtilsPopulate(String theName) {
super(theName);
}
/**
* Start the tests.
*
* @param theArgs the arguments. Not used
*/
public static void main(String[] theArgs) {
junit.awtui.TestRunner.main(
new String[] { TestRequestUtilsPopulate.class.getName()});
}
/**
* @return a test suite (<code>TestSuite</code>) that includes all methods
* starting with "test"
*/
public static Test suite() {
// All methods starting with "test" will be executed in the test suite.
return new TestSuite(TestRequestUtilsPopulate.class);
}
public void setUp() {
super.setUp();
}
public void tearDown() {
super.tearDown();
}
/**
* Ensure that the getMultipartRequestHandler cannot be seen in
* a subclass of ActionForm.
*
* The purpose of this test is to ensure that Bug #38534 is fixed.
*
*/
public void testMultipartVisibility() throws Exception {
String mockMappingName = "mockMapping";
String stringValue = "Test";
MockFormBean mockForm = new MockFormBean();
ActionMapping mapping = new ActionMapping();
mapping.setName(mockMappingName);
// Set up the mock HttpServletRequest
request.setMethod("POST");
request.setContentType("multipart/form-data");
request.setAttribute(Globals.MULTIPART_KEY, MockMultipartRequestHandler.class.getName());
request.setAttribute(Globals.MAPPING_KEY, mapping);
request.addParameter("stringProperty", stringValue);
request.addParameter("multipartRequestHandler.mapping.name", "Bad");
// Check the Mapping/ActionForm before
assertNull("Multipart Handler already set", mockForm.getMultipartRequestHandler());
assertEquals("Mapping name not set correctly", mockMappingName, mapping.getName());
// Try to populate
try {
RequestUtils.populate(mockForm, request);
} catch(ServletException se) {
// Expected BeanUtils.populate() to throw a NestedNullException
// which gets wrapped in RequestUtils in a ServletException
assertEquals("Unexpected type of Exception thrown", "BeanUtils.populate", se.getMessage());
}
// Check the Mapping/ActionForm after
assertNotNull("Multipart Handler Missing", mockForm.getMultipartRequestHandler());
assertEquals("Mapping name has been modified", mockMappingName, mapping.getName());
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2016 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package stroom.explorer.client.event;
import com.google.gwt.event.shared.EventHandler;
import com.google.gwt.event.shared.GwtEvent;
import com.google.gwt.event.shared.HasHandlers;
import com.gwtplatform.mvp.client.Layer;
import stroom.widget.tab.client.presenter.TabData;
public class OpenExplorerTabEvent extends GwtEvent<OpenExplorerTabEvent.Handler> {
private static Type<Handler> TYPE;
private final TabData tabData;
private final Layer layer;
private OpenExplorerTabEvent(final TabData tabData, final Layer layer) {
this.tabData = tabData;
this.layer = layer;
}
public static void fire(final HasHandlers handlers, final TabData tabData, final Layer layer) {
handlers.fireEvent(new OpenExplorerTabEvent(tabData, layer));
}
public static Type<Handler> getType() {
if (TYPE == null) {
TYPE = new Type<>();
}
return TYPE;
}
@Override
public Type<Handler> getAssociatedType() {
return getType();
}
@Override
protected void dispatch(final Handler handler) {
handler.onOpen(this);
}
public TabData getTabData() {
return tabData;
}
public Layer getLayer() {
return layer;
}
public interface Handler extends EventHandler {
void onOpen(OpenExplorerTabEvent event);
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "google/cloud/storage/client.h"
#include "google/cloud/storage/oauth2/google_credentials.h"
#include <nlohmann/json.hpp>
#include <iostream>
int main() {
// Adding openssl to the global namespace when the user does not explicitly
// asks for it is too much namespace pollution. The application may not want
// that many dependencies. Also, on Windows that may drag really unwanted
// dependencies.
#ifdef OPENSSL_VERSION_NUMBER
#error "OPENSSL should not be included by storage public headers"
#endif // OPENSSL_VERSION_NUMBER
// Adding libcurl to the global namespace when the user does not explicitly
// asks for it is too much namespace pollution. The application may not want
// that many dependencies. Also, on Windows that may drag really unwanted
// dependencies.
#ifdef LIBCURL_VERSION
#error "LIBCURL should not be included by storage public headers"
#endif // OPENSSL_VERSION_NUMBER
std::cout << "PASSED: this is a compile-time test\n";
return 0;
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2017 The CrunchyCrypt Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef CRUNCHY_ALGS_MAC_MAC_INTERFACE_H_
#define CRUNCHY_ALGS_MAC_MAC_INTERFACE_H_
#include <stddef.h>
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "crunchy/util/status.h"
namespace crunchy {
// An interface for computing a cryptographic MAC.
class MacInterface {
public:
virtual ~MacInterface() = default;
// Signs the given std::string and returns the signature.
virtual StatusOr<std::string> Sign(absl::string_view input) const = 0;
// Verifies a signature.
virtual Status Verify(absl::string_view input,
absl::string_view signature) const = 0;
// Returns the length of a signature
virtual size_t GetSignatureLength() const = 0;
};
class MacFactory {
public:
virtual ~MacFactory() = default;
virtual size_t GetKeyLength() const = 0;
virtual size_t GetSignatureLength() const = 0;
virtual StatusOr<std::unique_ptr<MacInterface>> Make(
absl::string_view key) const = 0;
};
} // namespace crunchy
#endif // CRUNCHY_ALGS_MAC_MAC_INTERFACE_H_
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2013-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cloudfoundry.client.v2.spacequotadefinitions;
import com.fasterxml.jackson.annotation.JsonIgnore;
import org.immutables.value.Value;
/**
* The request payload for the Retrieve a Particular Space Quota Definition operation
*/
@Value.Immutable
abstract class _GetSpaceQuotaDefinitionRequest {
/**
* The space quota definition id
*/
@JsonIgnore
abstract String getSpaceQuotaDefinitionId();
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2000-2012 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.codeInsight.completion.originInfo;
/**
* @author Max Medvedev
*/
public interface OriginInfoAwareElement {
@javax.annotation.Nullable
String getOriginInfo();
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.integtests.tooling.r22
import org.gradle.integtests.tooling.CancellationSpec
import org.gradle.integtests.tooling.fixture.TestResultHandler
import org.gradle.integtests.tooling.r18.BrokenAction
import org.gradle.tooling.BuildCancelledException
import org.gradle.tooling.GradleConnector
import org.gradle.tooling.ProjectConnection
import org.gradle.tooling.model.GradleProject
class CancellationCrossVersionSpec extends CancellationSpec {
def "can cancel build during settings phase"() {
settingsFile << waitForCancel()
buildFile << """
throw new RuntimeException("should not run")
"""
def cancel = GradleConnector.newCancellationTokenSource()
def sync = server.expectAndBlock("registered")
def resultHandler = new TestResultHandler()
when:
withConnection { ProjectConnection connection ->
def build = connection.newBuild()
build.forTasks(':sub:broken')
build.withCancellationToken(cancel.token())
collectOutputs(build)
build.run(resultHandler)
sync.waitForAllPendingCalls(resultHandler)
cancel.cancel()
sync.releaseAll()
resultHandler.finished()
}
then:
buildWasCancelled(resultHandler)
}
def "can cancel build during configuration phase"() {
file("gradle.properties") << "org.gradle.configureondemand=${configureOnDemand}"
setupCancelInConfigurationBuild()
def cancel = GradleConnector.newCancellationTokenSource()
def sync = server.expectAndBlock("registered")
def resultHandler = new TestResultHandler()
when:
withConnection { ProjectConnection connection ->
def build = connection.newBuild()
build.forTasks(':sub:broken')
build.withCancellationToken(cancel.token())
collectOutputs(build)
build.run(resultHandler)
sync.waitForAllPendingCalls(resultHandler)
cancel.cancel()
sync.releaseAll()
resultHandler.finished()
}
then:
buildWasCancelled(resultHandler)
where:
configureOnDemand << [true, false]
}
def "can cancel model creation during configuration phase"() {
file("gradle.properties") << "org.gradle.configureondemand=${configureOnDemand}"
setupCancelInConfigurationBuild()
def cancel = GradleConnector.newCancellationTokenSource()
def sync = server.expectAndBlock("registered")
def resultHandler = new TestResultHandler()
when:
withConnection { ProjectConnection connection ->
def model = connection.model(GradleProject)
model.withCancellationToken(cancel.token())
collectOutputs(model)
model.get(resultHandler)
sync.waitForAllPendingCalls(resultHandler)
cancel.cancel()
sync.releaseAll()
resultHandler.finished()
}
then:
configureWasCancelled(resultHandler, "Could not fetch model of type 'GradleProject' using")
where:
configureOnDemand << [true, false]
}
def "can cancel build action execution during configuration phase"() {
file("gradle.properties") << "org.gradle.configureondemand=${configureOnDemand}"
setupCancelInConfigurationBuild()
def cancel = GradleConnector.newCancellationTokenSource()
def sync = server.expectAndBlock("registered")
def resultHandler = new TestResultHandler()
when:
withConnection { ProjectConnection connection ->
def action = connection.action(new BrokenAction())
action.withCancellationToken(cancel.token())
collectOutputs(action)
action.run(resultHandler)
sync.waitForAllPendingCalls(resultHandler)
cancel.cancel()
sync.releaseAll()
resultHandler.finished()
}
then:
configureWasCancelled(resultHandler, "Could not run build action using")
where:
configureOnDemand << [true, false]
}
def "can cancel build and skip some tasks"() {
buildFile << """
task hang {
doLast {
${waitForCancel()}
}
}
task notExecuted(dependsOn: hang) {
doLast {
throw new RuntimeException("should not run")
}
}
"""
def cancel = GradleConnector.newCancellationTokenSource()
def sync = server.expectAndBlock("registered")
def resultHandler = new TestResultHandler()
when:
withConnection { ProjectConnection connection ->
def build = connection.newBuild()
build.forTasks('notExecuted')
build.withCancellationToken(cancel.token())
collectOutputs(build)
build.run(resultHandler)
sync.waitForAllPendingCalls(resultHandler)
cancel.cancel()
sync.releaseAll()
resultHandler.finished()
}
then:
taskWasCancelled(resultHandler, ":hang")
}
def "does not fail when scheduled tasks complete within the cancellation timeout"() {
buildFile << """
task hang {
doLast {
${waitForCancel()}
}
}
"""
def cancel = GradleConnector.newCancellationTokenSource()
def sync = server.expectAndBlock("registered")
def resultHandler = new TestResultHandler()
when:
withConnection { ProjectConnection connection ->
def build = connection.newBuild()
build.forTasks('hang')
build.withCancellationToken(cancel.token())
collectOutputs(build)
build.run(resultHandler)
sync.waitForAllPendingCalls(resultHandler)
cancel.cancel()
sync.releaseAll()
resultHandler.finished()
}
then:
noExceptionThrown()
}
def "can cancel build through forced stop"() {
// in-process call does not support forced stop
toolingApi.requireDaemons()
buildFile << """
task hang {
doLast {
${server.callFromBuild("waiting")}
}
}
"""
def cancel = GradleConnector.newCancellationTokenSource()
def sync = server.expectAndBlock("waiting")
def resultHandler = new TestResultHandler()
when:
withConnection { ProjectConnection connection ->
def build = connection.newBuild()
build.forTasks('hang')
build.withCancellationToken(cancel.token())
collectOutputs(build)
build.run(resultHandler)
sync.waitForAllPendingCalls(resultHandler)
cancel.cancel()
resultHandler.finished()
}
then:
resultHandler.assertFailedWith(BuildCancelledException)
resultHandler.failure.message.startsWith("Could not execute build using")
if (targetDist.toolingApiHasCauseOnForcedCancel) {
resultHandler.failure.cause.message.startsWith("Daemon was stopped to handle build cancel request.")
}
// TODO - should have a failure report in the logging output
}
}
|
{
"pile_set_name": "Github"
}
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC Cores.
These modules create a DNC core. They take input, pass parameters to the memory
access module, and integrate the output of memory to form an output.
"""
from __future__ import absolute_import, division, print_function
import collections
import numpy as np
import tensorflow as tf
import access
import sonnet as snt
DNCState = collections.namedtuple('DNCState', ('access_output', 'access_state',
'controller_state'))
class DNC(snt.RNNCore):
"""DNC core module.
Contains controller and memory access module.
"""
def __init__(self,
access_config,
controller_config,
output_size,
clip_value=None,
name='dnc'):
"""Initializes the DNC core.
Args:
access_config: dictionary of access module configurations.
controller_config: dictionary of controller (LSTM) module configurations.
output_size: output dimension size of core.
clip_value: clips controller and core output values to between
`[-clip_value, clip_value]` if specified.
name: module name (default 'dnc').
Raises:
TypeError: if direct_input_size is not None for any access module other
than KeyValueMemory.
"""
super(DNC, self).__init__(name=name)
with self._enter_variable_scope():
self._controller = snt.LSTM(**controller_config)
self._access = access.MemoryAccess(**access_config)
self._access_output_size = np.prod(self._access.output_size.as_list())
self._output_size = output_size
self._clip_value = clip_value or 0
self._output_size = tf.TensorShape([output_size])
self._state_size = DNCState(
access_output=self._access_output_size,
access_state=self._access.state_size,
controller_state=self._controller.state_size)
def _clip_if_enabled(self, x):
if self._clip_value > 0:
return tf.clip_by_value(x, -self._clip_value, self._clip_value)
else:
return x
def _build(self, inputs, prev_state):
"""Connects the DNC core into the graph.
Args:
inputs: Tensor input.
prev_state: A `DNCState` tuple containing the fields `access_output`,
`access_state` and `controller_state`. `access_state` is a 3-D Tensor
of shape `[batch_size, num_reads, word_size]` containing read words.
`access_state` is a tuple of the access module's state, and
`controller_state` is a tuple of controller module's state.
Returns:
A tuple `(output, next_state)` where `output` is a tensor and `next_state`
is a `DNCState` tuple containing the fields `access_output`,
`access_state`, and `controller_state`.
"""
prev_access_output = prev_state.access_output
prev_access_state = prev_state.access_state
prev_controller_state = prev_state.controller_state
batch_flatten = snt.BatchFlatten()
controller_input = tf.concat(
[batch_flatten(inputs), batch_flatten(prev_access_output)], 1)
controller_output, controller_state = self._controller(
controller_input, prev_controller_state)
controller_output = self._clip_if_enabled(controller_output)
controller_state = snt.nest.map(self._clip_if_enabled, controller_state)
access_output, access_state = self._access(controller_output,
prev_access_state)
output = tf.concat([controller_output, batch_flatten(access_output)], 1)
output = snt.Linear(
output_size=self._output_size.as_list()[0],
name='output_linear')(output)
output = self._clip_if_enabled(output)
return output, DNCState(
access_output=access_output,
access_state=access_state,
controller_state=controller_state)
def initial_state(self, batch_size, dtype=tf.float32):
return DNCState(
controller_state=self._controller.initial_state(batch_size, dtype),
access_state=self._access.initial_state(batch_size, dtype),
access_output=tf.zeros(
[batch_size] + self._access.output_size.as_list(), dtype))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="utf-8"?>
<!--
This file is automatically generated by Visual Studio .Net. It is
used to store generic object data source configuration information.
Renaming the file extension or editing the content of this file may
cause the file to be unrecognizable by the program.
-->
<GenericObjectDataSource DisplayName="Task" Version="1.0" xmlns="urn:schemas-microsoft-com:xml-msdatasource">
<TypeInfo>AlanJuden.MvcReportViewer.ReportService.Task, Web References.ReportService.Reference.cs.dll, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null</TypeInfo>
</GenericObjectDataSource>
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright 2016 chronicle.software
~
~ Licensed under the *Apache License, Version 2.0* (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>net.openhft</groupId>
<artifactId>java-parent-pom</artifactId>
<version>1.1.23</version>
<relativePath/>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>compiler</artifactId>
<version>2.3.7-SNAPSHOT</version>
<packaging>bundle</packaging>
<name>OpenHFT/Java-Runtime-Compiler</name>
<description>Java Runtime Compiler library.</description>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>net.openhft</groupId>
<artifactId>third-party-bom</artifactId>
<type>pom</type>
<version>3.19.2</version>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>org.jetbrains</groupId>
<artifactId>annotations</artifactId>
</dependency>
<dependency>
<groupId>org.easymock</groupId>
<artifactId>easymock</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>3.0.0</version>
<executions>
<execution>
<id>compiler-test</id>
<phase>integration-test</phase>
<goals>
<goal>exec</goal>
</goals>
</execution>
</executions>
<configuration>
<classpathScope>test</classpathScope>
<executable>java</executable>
<arguments>
<argument>-classpath</argument>
<classpath/>
<argument>net.openhft.compiler.CompilerTest</argument>
</arguments>
</configuration>
</plugin>
<!--
generate maven dependencies versions file that can be used later
to install the right bundle in test phase.
The file is:
target/classes/META-INF/maven/dependencies.properties
-->
<plugin>
<groupId>org.apache.servicemix.tooling</groupId>
<artifactId>depends-maven-plugin</artifactId>
<version>1.4.0</version>
<executions>
<execution>
<id>generate-depends-file</id>
<goals>
<goal>generate-depends-file</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
<version>5.1.1</version>
<extensions>true</extensions>
<configuration>
<instructions>
<Bundle-SymbolicName>${project.groupId}.${project.artifactId}</Bundle-SymbolicName>
<Bundle-Name>OpenHFT :: ${project.artifactId}</Bundle-Name>
<Bundle-Version>${project.version}</Bundle-Version>
<Export-Package>
net.openhft.compiler.*;-noimport:=true
</Export-Package>
</instructions>
</configuration>
<executions>
<!--
This execution makes sure that the manifest is available
when the tests are executed
-->
<execution>
<goals>
<goal>manifest</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<scm>
<url>scm:git:git@github.com:OpenHFT/Java-Runtime-Compiler.git</url>
<connection>scm:git:git@github.com:OpenHFT/Java-Runtime-Compiler.git</connection>
<developerConnection>scm:git:git@github.com:OpenHFT/Java-Runtime-Compiler.git
</developerConnection>
<tag>master</tag>
</scm>
</project>
|
{
"pile_set_name": "Github"
}
|
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright (c) 1997-2017 Oracle and/or its affiliates. All rights reserved.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common Development
* and Distribution License("CDDL") (collectively, the "License"). You
* may not use this file except in compliance with the License. You can
* obtain a copy of the License at
* https://oss.oracle.com/licenses/CDDL+GPL-1.1
* or LICENSE.txt. See the License for the specific
* language governing permissions and limitations under the License.
*
* When distributing the software, include this License Header Notice in each
* file and include the License file at LICENSE.txt.
*
* GPL Classpath Exception:
* Oracle designates this particular file as subject to the "Classpath"
* exception as provided by Oracle in the GPL Version 2 section of the License
* file that accompanied this code.
*
* Modifications:
* If applicable, add the following below the License Header, with the fields
* enclosed by brackets [] replaced by your own identifying information:
* "Portions Copyright [year] [name of copyright owner]"
*
* Contributor(s):
* If you wish your version of this file to be governed by only the CDDL or
* only the GPL Version 2, indicate your decision by adding "[Contributor]
* elects to include this software in this distribution under the [CDDL or GPL
* Version 2] license." If you don't indicate a single choice of license, a
* recipient has the option to distribute your version of this file under
* either the CDDL, the GPL Version 2 or to extend the choice of license to
* its licensees as provided above. However, if you add GPL Version 2 code
* and therefore, elected the GPL Version 2 license, then the option applies
* only if the new code is made subject to such option by the copyright
* holder.
*/
package com.sun.xml.bind.v2.model.nav;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.lang.reflect.WildcardType;
/**
* @author Kohsuke Kawaguchi
*/
abstract class TypeVisitor<T,P> {
public final T visit( Type t, P param ) {
assert t!=null;
if (t instanceof Class)
return onClass((Class)t,param);
if (t instanceof ParameterizedType)
return onParameterizdType( (ParameterizedType)t,param);
if(t instanceof GenericArrayType)
return onGenericArray((GenericArrayType)t,param);
if(t instanceof WildcardType)
return onWildcard((WildcardType)t,param);
if(t instanceof TypeVariable)
return onVariable((TypeVariable)t,param);
// covered all the cases
assert false;
throw new IllegalArgumentException();
}
protected abstract T onClass(Class c, P param);
protected abstract T onParameterizdType(ParameterizedType p, P param);
protected abstract T onGenericArray(GenericArrayType g, P param);
protected abstract T onVariable(TypeVariable v, P param);
protected abstract T onWildcard(WildcardType w, P param);
}
|
{
"pile_set_name": "Github"
}
|
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem LUCENE-9471: workaround for gradle leaving junk temp. files behind.
SET GRADLE_TEMPDIR=%DIRNAME%\.gradle\tmp
IF NOT EXIST "%GRADLE_TEMPDIR%" MKDIR "%GRADLE_TEMPDIR%"
SET DEFAULT_JVM_OPTS=%DEFAULT_JVM_OPTS% "-Djava.io.tmpdir=%GRADLE_TEMPDIR%"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem LUCENE-9266: verify and download the gradle wrapper jar if we don't have one.
set GRADLE_WRAPPER_JAR=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
"%JAVA_EXE%" --source 11 "%APP_HOME%/buildSrc/src/main/java/org/apache/lucene/gradle/WrapperDownloader.java" "%GRADLE_WRAPPER_JAR%"
IF %ERRORLEVEL% NEQ 0 goto fail
@rem Setup the command line
set CLASSPATH=%GRADLE_WRAPPER_JAR%
@rem Don't fork a daemon mode on initial run that generates local defaults.
SET GRADLE_DAEMON_CTRL=
IF NOT EXIST "%DIRNAME%\gradle.properties" SET GRADLE_DAEMON_CTRL=--no-daemon
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %GRADLE_DAEMON_CTRL% %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (C) 2010 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
dd
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<android.support.v7.view.menu.ActionMenuItemView
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_gravity="center"
android:gravity="center"
android:focusable="true"
android:paddingTop="4dip"
android:paddingBottom="4dip"
android:paddingLeft="8dip"
android:paddingRight="8dip"
android:textAppearance="?attr/actionMenuTextAppearance"
android:textColor="?attr/actionMenuTextColor"
style="?attr/actionButtonStyle"/>
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
v1beta1 "k8s.io/api/events/v1beta1"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/client-go/testing"
)
// CreateWithEventNamespace creats a new event. Returns the copy of the event the server returns, or an error.
func (c *FakeEvents) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) {
action := core.NewRootCreateAction(eventsResource, event)
if c.ns != "" {
action = core.NewCreateAction(eventsResource, c.ns, event)
}
obj, err := c.Fake.Invokes(action, event)
if obj == nil {
return nil, err
}
return obj.(*v1beta1.Event), err
}
// UpdateWithEventNamespace replaces an existing event. Returns the copy of the event the server returns, or an error.
func (c *FakeEvents) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) {
action := core.NewRootUpdateAction(eventsResource, event)
if c.ns != "" {
action = core.NewUpdateAction(eventsResource, c.ns, event)
}
obj, err := c.Fake.Invokes(action, event)
if obj == nil {
return nil, err
}
return obj.(*v1beta1.Event), err
}
// PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error.
func (c *FakeEvents) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) {
pt := types.StrategicMergePatchType
action := core.NewRootPatchAction(eventsResource, event.Name, pt, data)
if c.ns != "" {
action = core.NewPatchAction(eventsResource, c.ns, event.Name, pt, data)
}
obj, err := c.Fake.Invokes(action, event)
if obj == nil {
return nil, err
}
return obj.(*v1beta1.Event), err
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysds.runtime.data;
import org.apache.commons.lang.math.IntRange;
import org.apache.sysds.common.Types.ValueType;
import org.apache.sysds.runtime.DMLRuntimeException;
import java.io.Serializable;
import java.util.Arrays;
import java.util.stream.IntStream;
import static org.apache.sysds.runtime.data.TensorBlock.DEFAULT_DIMS;
public class DataTensorBlock implements Serializable {
private static final long serialVersionUID = 3410679389807309521L;
private static final int VALID_VALUE_TYPES_LENGTH = ValueType.values().length - 1;
protected int[] _dims;
protected BasicTensorBlock[] _colsdata = new BasicTensorBlock[VALID_VALUE_TYPES_LENGTH];
protected ValueType[] _schema = null;
/**
* Contains the (column) index in `_colsdata` for a certain column of the `DataTensor`. Which `_colsdata` to use is specified by the `_schema`
*/
protected int[] _colsToIx = null;
/**
* Contains the column of `DataTensor` an `_colsdata` (column) index corresponds to.
*/
protected int[][] _ixToCols = null;
public DataTensorBlock() {
this(new ValueType[0], DEFAULT_DIMS);
}
public DataTensorBlock(int ncols, ValueType vt) {
this(vt, new int[]{0, ncols});
}
public DataTensorBlock(ValueType[] schema) {
_dims = new int[]{0, schema.length};
_schema = schema;
_colsToIx = new int[_schema.length];
_ixToCols = new int[VALID_VALUE_TYPES_LENGTH][];
int[] typeIxCounter = new int[VALID_VALUE_TYPES_LENGTH];
for (int i = 0; i < schema.length; i++) {
int type = schema[i].ordinal();
_colsToIx[i] = typeIxCounter[type]++;
}
for (int i = 0; i < schema.length; i++) {
int type = schema[i].ordinal();
if (_ixToCols[type] == null) {
_ixToCols[type] = new int[typeIxCounter[type]];
typeIxCounter[type] = 0;
}
_ixToCols[type][typeIxCounter[type]++] = i;
}
}
public DataTensorBlock(ValueType[] schema, int[] dims) {
_dims = dims;
_schema = schema;
_colsToIx = new int[_schema.length];
_ixToCols = new int[VALID_VALUE_TYPES_LENGTH][];
int[] typeIxCounter = new int[VALID_VALUE_TYPES_LENGTH];
for (int i = 0; i < schema.length; i++) {
int type = schema[i].ordinal();
_colsToIx[i] = typeIxCounter[type]++;
}
for (int i = 0; i < schema.length; i++) {
int type = schema[i].ordinal();
if (_ixToCols[type] == null) {
_ixToCols[type] = new int[typeIxCounter[type]];
typeIxCounter[type] = 0;
}
_ixToCols[type][typeIxCounter[type]++] = i;
}
reset();
}
public DataTensorBlock(ValueType vt, int[] dims) {
_dims = dims;
_schema = new ValueType[getDim(1)];
Arrays.fill(_schema, vt);
_colsToIx = new IntRange(0, getDim(1)).toArray();
_ixToCols = new int[VALID_VALUE_TYPES_LENGTH][];
_ixToCols[vt.ordinal()] = new IntRange(0, getDim(1)).toArray();
reset();
}
public DataTensorBlock(ValueType[] schema, int[] dims, String[][] data) {
this(schema, dims);
allocateBlock();
for (int i = 0; i < schema.length; i++) {
int[] ix = new int[dims.length];
ix[1] = _colsToIx[i];
BasicTensorBlock current = _colsdata[schema[i].ordinal()];
for (int j = 0; j < data[i].length; j++) {
current.set(ix, data[i][j]);
TensorBlock.getNextIndexes(_dims, ix);
if (ix[1] != _colsToIx[i]) {
// We want to stay in the current column
if (ix[1] == 0)
ix[1] = _colsToIx[i];
else {
ix[1] = _colsToIx[i];
ix[0]++;
}
}
}
}
}
public DataTensorBlock(double val) {
_dims = new int[]{1, 1};
_schema = new ValueType[]{ValueType.FP64};
_colsToIx = new int[]{0};
_ixToCols = new int[VALID_VALUE_TYPES_LENGTH][];
_ixToCols[ValueType.FP64.ordinal()] = new int[]{0};
_colsdata = new BasicTensorBlock[VALID_VALUE_TYPES_LENGTH];
_colsdata[ValueType.FP64.ordinal()] = new BasicTensorBlock(val);
}
public DataTensorBlock(DataTensorBlock that) {
copy(that);
}
public DataTensorBlock(BasicTensorBlock that) {
_dims = that._dims;
_schema = new ValueType[_dims[1]];
Arrays.fill(_schema, that._vt);
_colsToIx = IntStream.range(0, _dims[1]).toArray();
_ixToCols = new int[VALID_VALUE_TYPES_LENGTH][];
_ixToCols[that._vt.ordinal()] = IntStream.range(0, _dims[1]).toArray();
_colsdata = new BasicTensorBlock[VALID_VALUE_TYPES_LENGTH];
_colsdata[that._vt.ordinal()] = that;
}
public void reset() {
reset(_dims);
}
public void reset(int[] dims) {
if (dims.length < 2)
throw new DMLRuntimeException("DataTensor.reset(int[]) invalid number of tensor dimensions: " + dims.length);
if (dims[1] > _dims[1])
throw new DMLRuntimeException("DataTensor.reset(int[]) columns can not be added without a provided schema," +
" use reset(int[],ValueType[]) instead");
for (int i = 0; i < dims.length; i++) {
if (dims[i] < 0)
throw new DMLRuntimeException("DataTensor.reset(int[]) invalid dimension " + i + ": " + dims[i]);
}
_dims = dims;
if (getDim(1) < _schema.length) {
ValueType[] schema = new ValueType[getDim(1)];
System.arraycopy(_schema, 0, schema, 0, getDim(1));
_schema = schema;
}
reset(_dims, _schema);
}
public void reset(int[] dims, ValueType[] schema) {
if (dims.length < 2)
throw new DMLRuntimeException("DataTensor.reset(int[],ValueType[]) invalid number of tensor dimensions: " + dims.length);
if (dims[1] != schema.length)
throw new DMLRuntimeException("DataTensor.reset(int[],ValueType[]) column dimension and schema length does not match");
for (int i = 0; i < dims.length; i++)
if (dims[i] < 0)
throw new DMLRuntimeException("DataTensor.reset(int[],ValueType[]) invalid dimension " + i + ": " + dims[i]);
_dims = dims;
_schema = schema;
_colsToIx = new int[_schema.length];
int[] typeIxCounter = new int[VALID_VALUE_TYPES_LENGTH];
for (int i = 0; i < schema.length; i++) {
int type = schema[i].ordinal();
_colsToIx[i] = typeIxCounter[type]++;
}
int[] colCounters = new int[VALID_VALUE_TYPES_LENGTH];
for (int i = 0; i < getDim(1); i++) {
int type = _schema[i].ordinal();
if (_ixToCols[type] == null || _ixToCols[type].length != typeIxCounter[type]) {
_ixToCols[type] = new int[typeIxCounter[type]];
}
_ixToCols[type][colCounters[type]++] = i;
}
// typeIxCounter now has the length of the BasicTensors
if (_colsdata == null) {
allocateBlock();
}
else {
for (int i = 0; i < _colsdata.length; i++) {
if (_colsdata[i] != null) {
_colsdata[i].reset(toInternalDims(dims, typeIxCounter[i]));
}
else if (typeIxCounter[i] != 0) {
int[] colDims = toInternalDims(_dims, typeIxCounter[i]);
_colsdata[i] = new BasicTensorBlock(ValueType.values()[i], colDims, false);
_colsdata[i].allocateBlock();
}
}
}
}
public DataTensorBlock allocateBlock() {
if (_colsdata == null)
_colsdata = new BasicTensorBlock[VALID_VALUE_TYPES_LENGTH];
int[] colDataColumnLength = new int[_colsdata.length];
for (ValueType valueType : _schema)
colDataColumnLength[valueType.ordinal()]++;
for (int i = 0; i < _colsdata.length; i++) {
if (colDataColumnLength[i] != 0) {
int[] dims = toInternalDims(_dims, colDataColumnLength[i]);
// TODO sparse
_colsdata[i] = new BasicTensorBlock(ValueType.values()[i], dims, false);
_colsdata[i].allocateBlock();
}
}
return this;
}
public boolean isAllocated() {
if (_colsdata == null)
return false;
for (BasicTensorBlock block : _colsdata) {
if (block != null && block.isAllocated())
return true;
}
return false;
}
public boolean isEmpty(boolean safe) {
if (!isAllocated()) {
return true;
}
for (BasicTensorBlock tb : _colsdata) {
if (tb != null && !tb.isEmpty(safe))
return false;
}
return true;
}
public long getNonZeros() {
if (!isAllocated()) {
return 0;
}
long nnz = 0;
for (BasicTensorBlock bt : _colsdata) {
if (bt != null)
nnz += bt.getNonZeros();
}
return nnz;
}
public int getNumRows() {
return getDim(0);
}
public int getNumColumns() {
return getDim(1);
}
public int getNumDims() {
return _dims.length;
}
public int getDim(int i) {
return _dims[i];
}
public int[] getDims() {
return _dims;
}
public ValueType[] getSchema() {
return _schema;
}
public ValueType getColValueType(int col) {
return _schema[col];
}
public Object get(int[] ix) {
int columns = ix[1];
int[] internalIx = toInternalIx(ix, _colsToIx[columns]);
return _colsdata[_schema[columns].ordinal()].get(internalIx);
}
public double get(int r, int c) {
if (getNumDims() != 2)
throw new DMLRuntimeException("DataTensor.get(int,int) dimension mismatch: expected=2 actual=" + getNumDims());
return _colsdata[_schema[c].ordinal()].get(r, _colsToIx[c]);
}
public void set(Object v) {
for (BasicTensorBlock bt : _colsdata)
bt.set(v);
}
public void set(int[] ix, Object v) {
int columns = ix[1];
int[] internalIx = toInternalIx(ix, _colsToIx[columns]);
_colsdata[_schema[columns].ordinal()].set(internalIx, v);
}
public void set(int r, int c, double v) {
if (getNumDims() != 2)
throw new DMLRuntimeException("DataTensor.set(int,int,double) dimension mismatch: expected=2 actual=" + getNumDims());
_colsdata[_schema[c].ordinal()].set(r, _colsToIx[c], v);
}
public void copy(DataTensorBlock that) {
_dims = that._dims.clone();
_schema = that._schema.clone();
_colsToIx = that._colsToIx.clone();
_ixToCols = new int[that._ixToCols.length][];
for (int i = 0; i < _ixToCols.length; i++)
if (that._ixToCols[i] != null)
_ixToCols[i] = that._ixToCols[i].clone();
if (that.isAllocated()) {
for (int i = 0; i < _colsdata.length; i++) {
if (that._colsdata[i] != null) {
_colsdata[i] = new BasicTensorBlock(that._colsdata[i]);
}
}
}
}
/**
* Copy a part of another <code>DataTensorBlock</code>
* @param lower lower index of elements to copy (inclusive)
* @param upper upper index of elements to copy (exclusive)
* @param src source <code>DataTensorBlock</code>
*/
public void copy(int[] lower, int[] upper, DataTensorBlock src) {
int[] subLower = lower.clone();
if (upper[1] == 0) {
upper[1] = getDim(1);
upper[0]--;
}
int[] subUpper = upper.clone();
for (int i = 0; i < VALID_VALUE_TYPES_LENGTH; i++) {
if (src._colsdata[i] == null)
continue;
subLower[1] = lower[1];
subUpper[1] = lower[1] + src._colsdata[i].getNumColumns();
_colsdata[i].copy(subLower, subUpper, src._colsdata[i]);
}
}
private static int[] toInternalIx(int[] ix, int col) {
int[] internalIx = ix.clone();
internalIx[1] = col;
return internalIx;
}
private static int[] toInternalDims(int[] dims, int cols) {
int[] internalDims = dims.clone();
internalDims[1] = cols;
return internalDims;
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.ml;
import org.apache.spark.sql.SparkSession;
// $example on$
import java.util.Arrays;
import org.apache.spark.ml.feature.VectorAssembler;
import org.apache.spark.ml.linalg.VectorUDT;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.*;
import static org.apache.spark.sql.types.DataTypes.*;
// $example off$
public class JavaVectorAssemblerExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaVectorAssemblerExample")
.getOrCreate();
// $example on$
StructType schema = createStructType(new StructField[]{
createStructField("id", IntegerType, false),
createStructField("hour", IntegerType, false),
createStructField("mobile", DoubleType, false),
createStructField("userFeatures", new VectorUDT(), false),
createStructField("clicked", DoubleType, false)
});
Row row = RowFactory.create(0, 18, 1.0, Vectors.dense(0.0, 10.0, 0.5), 1.0);
Dataset<Row> dataset = spark.createDataFrame(Arrays.asList(row), schema);
VectorAssembler assembler = new VectorAssembler()
.setInputCols(new String[]{"hour", "mobile", "userFeatures"})
.setOutputCol("features");
Dataset<Row> output = assembler.transform(dataset);
System.out.println("Assembled columns 'hour', 'mobile', 'userFeatures' to vector column " +
"'features'");
output.select("features", "clicked").show(false);
// $example off$
spark.stop();
}
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2013 KLab Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// CiOSTmpFile.cpp
// GameEngine
//
//
//
#include <unistd.h>
#include <fcntl.h>
#include "CiOSPathConv.h"
#include "CiOSTmpFile.h"
#include "CPFInterface.h"
CiOSTmpFile::CiOSTmpFile(const char * path) : m_fullpath(0)
{
m_fullpath = CiOSPathConv::getInstance().fullpath(path);
// 平成24年11月27日(火)
// ファイルが存在しない場合に該当ファイルが生成されない事への対応と
// 権限の付与を行いました。
m_fd = open(m_fullpath, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
CPFInterface::getInstance().platform().excludePathFromBackup(m_fullpath);
}
CiOSTmpFile::~CiOSTmpFile()
{
if(m_fd > 0) {
close(m_fd);
}
delete [] m_fullpath;
}
size_t
CiOSTmpFile::writeTmp(void *ptr, size_t size)
{
return write(m_fd, ptr, size);
}
|
{
"pile_set_name": "Github"
}
|
/* This file is part of the OWL API.
* The contents of this file are subject to the LGPL License, Version 3.0.
* Copyright 2014, The University of Manchester
*
* This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.
*
* Alternatively, the contents of this file may be used under the terms of the Apache License, Version 2.0 in which case, the provisions of the Apache License Version 2.0 are applicable instead of those above.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */
package org.semanticweb.owlapi.io;
import static org.semanticweb.owlapi.util.OWLAPIPreconditions.checkNotNull;
import static org.semanticweb.owlapi.util.OWLAPIPreconditions.emptyOptional;
import static org.semanticweb.owlapi.util.OWLAPIPreconditions.optional;
import java.io.BufferedOutputStream;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Writer;
import java.util.Optional;
import org.semanticweb.owlapi.model.IRI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An {@code OWLOntologyDocumentTarget} that supports writing out to a
* {@code File}.
*
* @author Matthew Horridge, The University of Manchester, Bio-Health Informatics Group
* @since 3.2.0
*/
public class FileDocumentTarget implements OWLOntologyDocumentTarget {
private static final Logger LOGGER = LoggerFactory.getLogger(FileDocumentTarget.class);
private final File file;
/**
* Constructs the document target, with the target being the specified file.
*
* @param file The file that is the target.
*/
public FileDocumentTarget(File file) {
this.file = checkNotNull(file, "file cannot be null");
}
@Override
public Optional<Writer> getWriter() {
try {
return optional(new BufferedWriter(new FileWriter(file)));
} catch (IOException e) {
LOGGER.error("Writer cannot be created", e);
return emptyOptional();
}
}
@Override
public Optional<OutputStream> getOutputStream() {
try {
return optional(new BufferedOutputStream(new FileOutputStream(file)));
} catch (IOException e) {
LOGGER.error("Input stream cannot be created", e);
return emptyOptional();
}
}
@Override
public Optional<IRI> getDocumentIRI() {
return optional(IRI.create(file));
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2010 Todd Ditchendorf
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#import "TDJsonParserTest.h"
#import "TDJsonParser.h"
#import "TDFastJsonParser.h"
@implementation TDJsonParserTest
- (void)setUp {
p = (TDJsonParser *)[TDJsonParser parser];
}
- (void)testForAppleBossResultTokenization {
NSString *path = [[NSBundle bundleForClass:[self class]] pathForResource:@"apple-boss" ofType:@"json"];
s = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:nil];
PKTokenizer *t = [[[PKTokenizer alloc] initWithString:s] autorelease];
PKToken *eof = [PKToken EOFToken];
PKToken *tok = nil;
while (eof != (tok = [t nextToken])) {
//NSLog(@"tok: %@", tok);
}
}
- (void)testForAppleBossResult {
NSString *path = [[NSBundle bundleForClass:[self class]] pathForResource:@"apple-boss" ofType:@"json"];
s = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:nil];
@try {
result = [p parse:s];
}
@catch (NSException *e) {
//NSLog(@"\n\n\nexception:\n\n %@", [e reason]);
}
//NSLog(@"result %@", result);
}
- (void)testEmptyString {
s = @"";
a = [PKTokenAssembly assemblyWithString:s];
result = [p bestMatchFor:a];
TDNil(result);
}
- (void)testNum {
s = @"456";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p numberParser] bestMatchFor:a];
TDNotNil(result);
TDEqualObjects(@"[456]456^", [result description]);
id obj = [result pop];
TDNotNil(obj);
TDEqualObjects([NSNumber numberWithFloat:456], obj);
s = @"-3.47";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p numberParser] bestMatchFor:a];
TDNotNil(result);
TDEqualObjects(@"[-3.47]-3.47^", [result description]);
obj = [result pop];
TDNotNil(obj);
TDEqualObjects([NSNumber numberWithFloat:-3.47], obj);
}
- (void)testString {
s = @"'foobar'";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p stringParser] bestMatchFor:a];
TDNotNil(result);
TDEqualObjects(@"[foobar]'foobar'^", [result description]);
id obj = [result pop];
TDNotNil(obj);
TDEqualObjects(@"foobar", obj);
s = @"\"baz boo boo\"";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p stringParser] bestMatchFor:a];
TDNotNil(result);
TDEqualObjects(@"[baz boo boo]\"baz boo boo\"^", [result description]);
obj = [result pop];
TDNotNil(obj);
TDEqualObjects(@"baz boo boo", obj);
}
- (void)testBoolean {
s = @"true";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p booleanParser] bestMatchFor:a];
TDNotNil(result);
TDEqualObjects(@"[1]true^", [result description]);
id obj = [result pop];
TDNotNil(obj);
TDEqualObjects([NSNumber numberWithBool:YES], obj);
s = @"false";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p booleanParser] bestMatchFor:a];
TDNotNil(result);
TDEqualObjects(@"[0]false^", [result description]);
obj = [result pop];
TDNotNil(obj);
TDEqualObjects([NSNumber numberWithBool:NO], obj);
}
- (void)testArray {
s = @"[1, 2, 3]";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p arrayParser] bestMatchFor:a];
// NSLog(@"result: %@", result);
TDNotNil(result);
id obj = [result pop];
TDEquals((int)3, (int)[obj count]);
TDEqualObjects([NSNumber numberWithInteger:1], [obj objectAtIndex:0]);
TDEqualObjects([NSNumber numberWithInteger:2], [obj objectAtIndex:1]);
TDEqualObjects([NSNumber numberWithInteger:3], [obj objectAtIndex:2]);
TDEqualObjects(@"[][/1/,/2/,/3/]^", [result description]);
s = @"[true, 'garlic jazz!', .888]";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p arrayParser] bestMatchFor:a];
TDNotNil(result);
//TDEqualObjects(@"[true, 'garlic jazz!', .888]true/'garlic jazz!'/.888^", [result description]);
obj = [result pop];
TDEqualObjects([NSNumber numberWithBool:YES], [obj objectAtIndex:0]);
TDEqualObjects(@"garlic jazz!", [obj objectAtIndex:1]);
TDEqualObjects([NSNumber numberWithFloat:.888], [obj objectAtIndex:2]);
s = @"[1, [2, [3, 4]]]";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p arrayParser] bestMatchFor:a];
TDNotNil(result);
//NSLog(@"result: %@", [a stack]);
TDEqualObjects([NSNumber numberWithInteger:1], [obj objectAtIndex:0]);
}
- (void)testObject {
s = @"{'key': 'value'}";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p objectParser] bestMatchFor:a];
TDNotNil(result);
id obj = [result pop];
TDEqualObjects([obj objectForKey:@"key"], @"value");
s = @"{'foo': false, 'bar': true, \"baz\": -9.457}";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p objectParser] bestMatchFor:a];
TDNotNil(result);
obj = [result pop];
TDEqualObjects([obj objectForKey:@"foo"], [NSNumber numberWithBool:NO]);
TDEqualObjects([obj objectForKey:@"bar"], [NSNumber numberWithBool:YES]);
TDEqualObjects([obj objectForKey:@"baz"], [NSNumber numberWithFloat:-9.457]);
s = @"{'baz': {'foo': [1,2]}}";
a = [PKTokenAssembly assemblyWithString:s];
result = [[p objectParser] bestMatchFor:a];
TDNotNil(result);
obj = [result pop];
NSDictionary *dict = [obj objectForKey:@"baz"];
TDTrue([dict isKindOfClass:[NSDictionary class]]);
NSArray *arr = [dict objectForKey:@"foo"];
TDTrue([arr isKindOfClass:[NSArray class]]);
TDEqualObjects([NSNumber numberWithInteger:1], [arr objectAtIndex:0]);
// TDEqualObjects(@"['baz', 'foo', 1, 2]'baz'/'foo'/1/2^", [result description]);
}
- (void)testCrunchBaseJsonParser {
NSString *path = [[NSBundle bundleForClass:[self class]] pathForResource:@"yahoo" ofType:@"json"];
s = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:nil];
TDJsonParser *parser = [[[TDJsonParser alloc] init] autorelease];
[parser parse:s];
// id res = [parser parse:s];
//NSLog(@"res %@", res);
}
- (void)testCrunchBaseJsonParserTokenization {
NSString *path = [[NSBundle bundleForClass:[self class]] pathForResource:@"yahoo" ofType:@"json"];
s = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:nil];
PKTokenizer *t = [[[PKTokenizer alloc] initWithString:s] autorelease];
PKToken *eof = [PKToken EOFToken];
PKToken *tok = nil;
while (eof != (tok = [t nextToken])) {
//NSLog(@"tok: %@", tok);
}
}
- (void)testCrunchBaseJsonTokenParser {
NSString *path = [[NSBundle bundleForClass:[self class]] pathForResource:@"yahoo" ofType:@"json"];
s = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:nil];
TDFastJsonParser *parser = [[[TDFastJsonParser alloc] init] autorelease];
[parser parse:s];
// id res = [parser parse:s];
//NSLog(@"res %@", res);
}
- (void)testYahoo1 {
s =
@"{"
@"\"name\": \"Yahoo!\","
@"\"permalink\": \"yahoo\","
@"\"homepage_url\": \"http://www.yahoo.com\","
@"\"blog_url\": \"http://yodel.yahoo.com/\","
@"\"blog_feed_url\": \"http://ycorpblog.com/feed/\","
@"\"category_code\": \"web\","
@"\"number_of_employees\": 13600,"
@"\"founded_year\": 1994,"
@"\"founded_month\": null,"
@"\"founded_day\": null,"
@"\"deadpooled_year\": null,"
@"\"deadpooled_month\": null,"
@"\"deadpooled_day\": null,"
@"\"deadpooled_url\": null,"
@"\"tag_list\": \"search, portal, webmail, photos\","
@"\"email_address\": \"\","
@"\"phone_number\": \"(408) 349-3300\""
@"}";
result = [p parse:s];
//NSLog(@"result %@", result);
TDNotNil(result);
id d = result;
TDNotNil(d);
TDTrue([d isKindOfClass:[NSDictionary class]]);
TDEqualObjects([d objectForKey:@"name"], @"Yahoo!");
TDEqualObjects([d objectForKey:@"permalink"], @"yahoo");
TDEqualObjects([d objectForKey:@"homepage_url"], @"http://www.yahoo.com");
TDEqualObjects([d objectForKey:@"blog_url"], @"http://yodel.yahoo.com/");
TDEqualObjects([d objectForKey:@"blog_feed_url"], @"http://ycorpblog.com/feed/");
TDEqualObjects([d objectForKey:@"category_code"], @"web");
TDEqualObjects([d objectForKey:@"number_of_employees"], [NSNumber numberWithInteger:13600]);
TDEqualObjects([d objectForKey:@"founded_year"], [NSNumber numberWithInteger:1994]);
TDEqualObjects([d objectForKey:@"founded_month"], [NSNull null]);
TDEqualObjects([d objectForKey:@"founded_day"], [NSNull null]);
TDEqualObjects([d objectForKey:@"deadpooled_year"], [NSNull null]);
TDEqualObjects([d objectForKey:@"deadpooled_month"], [NSNull null]);
TDEqualObjects([d objectForKey:@"deadpooled_day"], [NSNull null]);
TDEqualObjects([d objectForKey:@"deadpooled_url"], [NSNull null]);
TDEqualObjects([d objectForKey:@"tag_list"], @"search, portal, webmail, photos");
TDEqualObjects([d objectForKey:@"email_address"], @"");
TDEqualObjects([d objectForKey:@"phone_number"], @"(408) 349-3300");
}
- (void)testYahoo2 {
s = @"{\"image\":"
@" {\"available_sizes\":"
@" [[[150, 37],"
@" \"assets/images/resized/0001/0836/10836v1-max-250x150.png\"],"
@" [[200, 50],"
@" \"assets/images/resized/0001/0836/10836v1-max-250x250.png\"],"
@" [[200, 50],"
@" \"assets/images/resized/0001/0836/10836v1-max-450x450.png\"]],"
@" \"attribution\": null}"
@"}";
result = [p parse:s];
//NSLog(@"result %@", result);
TDNotNil(result);
id d = result;
TDNotNil(d);
TDTrue([d isKindOfClass:[NSDictionary class]]);
id image = [d objectForKey:@"image"];
TDNotNil(image);
TDTrue([image isKindOfClass:[NSDictionary class]]);
NSArray *sizes = [image objectForKey:@"available_sizes"];
TDNotNil(sizes);
TDTrue([sizes isKindOfClass:[NSArray class]]);
TDEquals(3, (int)[sizes count]);
NSArray *first = [sizes objectAtIndex:0];
TDNotNil(first);
TDTrue([first isKindOfClass:[NSArray class]]);
TDEquals(2, (int)[first count]);
NSArray *firstKey = [first objectAtIndex:0];
TDNotNil(firstKey);
TDTrue([firstKey isKindOfClass:[NSArray class]]);
TDEquals(2, (int)[firstKey count]);
TDEqualObjects([NSNumber numberWithInteger:150], [firstKey objectAtIndex:0]);
TDEqualObjects([NSNumber numberWithInteger:37], [firstKey objectAtIndex:1]);
NSArray *second = [sizes objectAtIndex:1];
TDNotNil(second);
TDTrue([second isKindOfClass:[NSArray class]]);
TDEquals(2, (int)[second count]);
NSArray *secondKey = [second objectAtIndex:0];
TDNotNil(secondKey);
TDTrue([secondKey isKindOfClass:[NSArray class]]);
TDEquals(2, (int)[secondKey count]);
TDEqualObjects([NSNumber numberWithInteger:200], [secondKey objectAtIndex:0]);
TDEqualObjects([NSNumber numberWithInteger:50], [secondKey objectAtIndex:1]);
NSArray *third = [sizes objectAtIndex:2];
TDNotNil(third);
TDTrue([third isKindOfClass:[NSArray class]]);
TDEquals(2, (int)[third count]);
NSArray *thirdKey = [third objectAtIndex:0];
TDNotNil(thirdKey);
TDTrue([thirdKey isKindOfClass:[NSArray class]]);
TDEquals(2, (int)[thirdKey count]);
TDEqualObjects([NSNumber numberWithInteger:200], [thirdKey objectAtIndex:0]);
TDEqualObjects([NSNumber numberWithInteger:50], [thirdKey objectAtIndex:1]);
// TDEqualObjects([d objectForKey:@"name"], @"Yahoo!");
}
- (void)testYahoo3 {
s =
@"{\"products\":"
@"["
@"{\"name\": \"Yahoo.com\", \"permalink\": \"yahoo-com\"},"
@"{\"name\": \"Yahoo! Mail\", \"permalink\": \"yahoo-mail\"},"
@"{\"name\": \"Yahoo! Search\", \"permalink\": \"yahoo-search\"},"
@"{\"name\": \"Yahoo! Directory\", \"permalink\": \"yahoo-directory\"},"
@"{\"name\": \"Yahoo! Finance\", \"permalink\": \"yahoo-finance\"},"
@"{\"name\": \"My Yahoo\", \"permalink\": \"my-yahoo\"},"
@"{\"name\": \"Yahoo! News\", \"permalink\": \"yahoo-news\"},"
@"{\"name\": \"Yahoo! Groups\", \"permalink\": \"yahoo-groups\"},"
@"{\"name\": \"Yahoo! Messenger\", \"permalink\": \"yahoo-messenger\"},"
@"{\"name\": \"Yahoo! Games\", \"permalink\": \"yahoo-games\"},"
@"{\"name\": \"Yahoo! People Search\", \"permalink\": \"yahoo-people-search\"},"
@"{\"name\": \"Yahoo! Movies\", \"permalink\": \"yahoo-movies\"},"
@"{\"name\": \"Yahoo! Weather\", \"permalink\": \"yahoo-weather\"},"
@"{\"name\": \"Yahoo! Video\", \"permalink\": \"yahoo-video\"},"
@"{\"name\": \"Yahoo! Music\", \"permalink\": \"yahoo-music\"},"
@"{\"name\": \"Yahoo! Sports\", \"permalink\": \"yahoo-sports\"},"
@"{\"name\": \"Yahoo! Maps\", \"permalink\": \"yahoo-maps\"},"
@"{\"name\": \"Yahoo! Auctions\", \"permalink\": \"yahoo-auctions\"},"
@"{\"name\": \"Yahoo! Widgets\", \"permalink\": \"yahoo-widgets\"},"
@"{\"name\": \"Yahoo! Shopping\", \"permalink\": \"yahoo-shopping\"},"
@"{\"name\": \"Yahoo! Real Estate\", \"permalink\": \"yahoo-real-estate\"},"
@"{\"name\": \"Yahoo! Travel\", \"permalink\": \"yahoo-travel\"},"
@"{\"name\": \"Yahoo! Classifieds\", \"permalink\": \"yahoo-classifieds\"},"
@"{\"name\": \"Yahoo! Answers\", \"permalink\": \"yahoo-answers\"},"
@"{\"name\": \"Yahoo! Mobile\", \"permalink\": \"yahoo-mobile\"},"
@"{\"name\": \"Yahoo! Buzz\", \"permalink\": \"yahoo-buzz\"},"
@"{\"name\": \"Yahoo! Open Search Platform\", \"permalink\": \"yahoo-open-search-platform\"},"
@"{\"name\": \"Fire Eagle\", \"permalink\": \"fireeagle\"},"
@"{\"name\": \"Shine\", \"permalink\": \"shine\"},"
@"{\"name\": \"Yahoo! Shortcuts\", \"permalink\": \"yahoo-shortcuts\"}"
@"]"
@"}";
result = [p parse:s];
//NSLog(@"result %@", result);
TDNotNil(result);
id d = result;
TDNotNil(d);
TDTrue([d isKindOfClass:[NSDictionary class]]);
NSArray *products = [d objectForKey:@"products"];
TDNotNil(products);
TDTrue([products isKindOfClass:[NSArray class]]);
}
- (void)testYahoo4 {
s = @"["
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,"
@"1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1"
@"]";
p = (id)[[[TDFastJsonParser alloc] init] autorelease];
result = [p parse:s];
//NSLog(@"result %@", result);
TDNotNil(result);
id d = result;
TDNotNil(d);
TDTrue([d isKindOfClass:[NSArray class]]);
// NSArray *products = [d objectForKey:@"products"];
// TDNotNil(products);
// TDTrue([products isKindOfClass:[NSArray class]]);
}
@end
|
{
"pile_set_name": "Github"
}
|
/*****************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
****************************************************************/
package org.apache.cayenne.testdo.java8;
import org.apache.cayenne.testdo.java8.auto._LocalTimeTestEntity;
public class LocalTimeTestEntity extends _LocalTimeTestEntity {
private static final long serialVersionUID = 1L;
}
|
{
"pile_set_name": "Github"
}
|
.nh
.TH restic backup(1)Jan 2017
generated by \fB\fCrestic generate\fR
.SH NAME
.PP
restic\-cache \- Operate on local cache directories
.SH SYNOPSIS
.PP
\fBrestic cache [flags]\fP
.SH DESCRIPTION
.PP
The "cache" command allows listing and cleaning local cache directories.
.SH EXIT STATUS
.PP
Exit status is 0 if the command was successful, and non\-zero if there was any error.
.SH OPTIONS
.PP
\fB\-\-cleanup\fP[=false]
remove old cache directories
.PP
\fB\-h\fP, \fB\-\-help\fP[=false]
help for cache
.PP
\fB\-\-max\-age\fP=30
max age in \fB\fCdays\fR for cache directories to be considered old
.PP
\fB\-\-no\-size\fP[=false]
do not output the size of the cache directories
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB\-\-cacert\fP=[]
\fB\fCfile\fR to load root certificates from (default: use system certificates)
.PP
\fB\-\-cache\-dir\fP=""
set the cache \fB\fCdirectory\fR\&. (default: use system default cache directory)
.PP
\fB\-\-cleanup\-cache\fP[=false]
auto remove old cache directories
.PP
\fB\-\-json\fP[=false]
set output mode to JSON for commands that support it
.PP
\fB\-\-key\-hint\fP=""
\fB\fCkey\fR ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT)
.PP
\fB\-\-limit\-download\fP=0
limits downloads to a maximum rate in KiB/s. (default: unlimited)
.PP
\fB\-\-limit\-upload\fP=0
limits uploads to a maximum rate in KiB/s. (default: unlimited)
.PP
\fB\-\-no\-cache\fP[=false]
do not use a local cache
.PP
\fB\-\-no\-lock\fP[=false]
do not lock the repo, this allows some operations on read\-only repos
.PP
\fB\-o\fP, \fB\-\-option\fP=[]
set extended option (\fB\fCkey=value\fR, can be specified multiple times)
.PP
\fB\-\-password\-command\fP=""
shell \fB\fCcommand\fR to obtain the repository password from (default: $RESTIC\_PASSWORD\_COMMAND)
.PP
\fB\-p\fP, \fB\-\-password\-file\fP=""
\fB\fCfile\fR to read the repository password from (default: $RESTIC\_PASSWORD\_FILE)
.PP
\fB\-q\fP, \fB\-\-quiet\fP[=false]
do not output comprehensive progress report
.PP
\fB\-r\fP, \fB\-\-repo\fP=""
\fB\fCrepository\fR to backup to or restore from (default: $RESTIC\_REPOSITORY)
.PP
\fB\-\-tls\-client\-cert\fP=""
path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
.PP
\fB\-v\fP, \fB\-\-verbose\fP[=0]
be verbose (specify \-\-verbose multiple times or level \-\-verbose=\fB\fCn\fR)
.SH SEE ALSO
.PP
\fBrestic(1)\fP
|
{
"pile_set_name": "Github"
}
|
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package attributes defines a generic key/value store used in various gRPC
// components.
//
// All APIs in this package are EXPERIMENTAL.
package attributes
import "fmt"
// Attributes is an immutable struct for storing and retrieving generic
// key/value pairs. Keys must be hashable, and users should define their own
// types for keys.
type Attributes struct {
m map[interface{}]interface{}
}
// New returns a new Attributes containing all key/value pairs in kvs. If the
// same key appears multiple times, the last value overwrites all previous
// values for that key. Panics if len(kvs) is not even.
func New(kvs ...interface{}) *Attributes {
if len(kvs)%2 != 0 {
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
}
a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
for i := 0; i < len(kvs)/2; i++ {
a.m[kvs[i*2]] = kvs[i*2+1]
}
return a
}
// WithValues returns a new Attributes containing all key/value pairs in a and
// kvs. Panics if len(kvs) is not even. If the same key appears multiple
// times, the last value overwrites all previous values for that key. To
// remove an existing key, use a nil value.
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
if a == nil {
return New(kvs...)
}
if len(kvs)%2 != 0 {
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
}
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
for k, v := range a.m {
n.m[k] = v
}
for i := 0; i < len(kvs)/2; i++ {
n.m[kvs[i*2]] = kvs[i*2+1]
}
return n
}
// Value returns the value associated with these attributes for key, or nil if
// no value is associated with key.
func (a *Attributes) Value(key interface{}) interface{} {
if a == nil {
return nil
}
return a.m[key]
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Id$
*/
#if !defined(XERCESC_INCLUDE_GUARD_KEYVALUEPAIR_HPP)
#define XERCESC_INCLUDE_GUARD_KEYVALUEPAIR_HPP
#include <xercesc/util/XMemory.hpp>
XERCES_CPP_NAMESPACE_BEGIN
template <class TKey, class TValue> class KeyValuePair : public XMemory
{
public :
// -------------------------------------------------------------------
// Constructors and Destructor
// -------------------------------------------------------------------
KeyValuePair();
KeyValuePair(const TKey& key, const TValue& value);
KeyValuePair(const KeyValuePair<TKey,TValue>& toCopy);
~KeyValuePair();
// -------------------------------------------------------------------
// Getters
// -------------------------------------------------------------------
const TKey& getKey() const;
TKey& getKey();
const TValue& getValue() const;
TValue& getValue();
// -------------------------------------------------------------------
// Setters
// -------------------------------------------------------------------
TKey& setKey(const TKey& newKey);
TValue& setValue(const TValue& newValue);
private :
// unimplemented:
KeyValuePair<TKey,TValue>& operator=(const KeyValuePair<TKey,TValue>&);
// -------------------------------------------------------------------
// Private data members
//
// fKey
// The object that represents the key of the pair
//
// fValue
// The object that represents the value of the pair
// -------------------------------------------------------------------
TKey fKey;
TValue fValue;
};
XERCES_CPP_NAMESPACE_END
#if !defined(XERCES_TMPLSINC)
#include <xercesc/util/KeyValuePair.c>
#endif
#endif
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2007-2008 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api;
/**
* <p>A {@code Rule} represents some action to perform when an unknown domain object is referenced. The rule can use the
* domain object name to add an implicit domain object.</p>
*/
public interface Rule {
/**
* Returns the description of the rule. This is used for reporting purposes.
*
* @return the description. should not return null.
*/
String getDescription();
/**
* Applies this rule for the given unknown domain object. The rule can choose to ignore this name, or add a domain
* object with the given name.
*
* @param domainObjectName The name of the unknown domain object.
*/
void apply(String domainObjectName);
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import static com.google.common.base.Preconditions.checkArgument;
import java.io.File;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.conf.PropertyType;
import org.apache.commons.configuration2.CompositeConfiguration;
import org.apache.commons.configuration2.Configuration;
import org.apache.commons.configuration2.MapConfiguration;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.configuration2.builder.FileBasedConfigurationBuilder;
import org.apache.commons.configuration2.builder.fluent.Parameters;
import org.apache.commons.configuration2.ex.ConfigurationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* Contains a list of property keys recognized by the Accumulo client and convenience methods for
* setting them.
*
* @since 1.6.0
* @deprecated since 2.0.0, replaced by {@link Accumulo#newClient()}
*/
@Deprecated(since = "2.0.0")
public class ClientConfiguration {
private static final Logger log = LoggerFactory.getLogger(ClientConfiguration.class);
public static final String USER_ACCUMULO_DIR_NAME = ".accumulo";
public static final String USER_CONF_FILENAME = "config";
public static final String GLOBAL_CONF_FILENAME = "client.conf";
private final CompositeConfiguration compositeConfig;
public enum ClientProperty {
// SSL
RPC_SSL_TRUSTSTORE_PATH(Property.RPC_SSL_TRUSTSTORE_PATH),
RPC_SSL_TRUSTSTORE_PASSWORD(Property.RPC_SSL_TRUSTSTORE_PASSWORD),
RPC_SSL_TRUSTSTORE_TYPE(Property.RPC_SSL_TRUSTSTORE_TYPE),
RPC_SSL_KEYSTORE_PATH(Property.RPC_SSL_KEYSTORE_PATH),
RPC_SSL_KEYSTORE_PASSWORD(Property.RPC_SSL_KEYSTORE_PASSWORD),
RPC_SSL_KEYSTORE_TYPE(Property.RPC_SSL_KEYSTORE_TYPE),
RPC_USE_JSSE(Property.RPC_USE_JSSE),
GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS),
INSTANCE_RPC_SSL_CLIENT_AUTH(Property.INSTANCE_RPC_SSL_CLIENT_AUTH),
INSTANCE_RPC_SSL_ENABLED(Property.INSTANCE_RPC_SSL_ENABLED),
// ZooKeeper
INSTANCE_ZK_HOST(Property.INSTANCE_ZK_HOST),
INSTANCE_ZK_TIMEOUT(Property.INSTANCE_ZK_TIMEOUT),
// Instance information
INSTANCE_NAME("instance.name", null, PropertyType.STRING,
"Name of Accumulo instance to connect to"),
INSTANCE_ID("instance.id", null, PropertyType.STRING,
"UUID of Accumulo instance to connect to"),
// Tracing
TRACE_SPAN_RECEIVERS(Property.TRACE_SPAN_RECEIVERS),
TRACE_SPAN_RECEIVER_PREFIX(Property.TRACE_SPAN_RECEIVER_PREFIX),
TRACE_ZK_PATH(Property.TRACE_ZK_PATH),
// SASL / GSSAPI(Kerberos)
/**
* @since 1.7.0
*/
INSTANCE_RPC_SASL_ENABLED(Property.INSTANCE_RPC_SASL_ENABLED),
/**
* @since 1.7.0
*/
RPC_SASL_QOP(Property.RPC_SASL_QOP),
/**
* @since 1.7.0
*/
KERBEROS_SERVER_PRIMARY("kerberos.server.primary", "accumulo", PropertyType.STRING,
"The first component of the Kerberos principal, the 'primary', "
+ "that Accumulo servers use to login");
private String key;
private String defaultValue;
private PropertyType type;
private String description;
private ClientProperty(Property prop) {
this(prop.getKey(), prop.getDefaultValue(), prop.getType(), prop.getDescription());
}
private ClientProperty(String key, String defaultValue, PropertyType type, String description) {
this.key = key;
this.defaultValue = defaultValue;
this.type = type;
this.description = description;
}
public String getKey() {
return key;
}
public String getDefaultValue() {
return defaultValue;
}
private PropertyType getType() {
return type;
}
public String getDescription() {
return description;
}
public static ClientProperty getPropertyByKey(String key) {
for (ClientProperty prop : ClientProperty.values())
if (prop.getKey().equals(key))
return prop;
return null;
}
}
private ClientConfiguration(List<? extends Configuration> configs) {
compositeConfig = new CompositeConfiguration(configs);
}
/**
* Attempts to load a configuration file from the system using the default search paths. Uses the
* <em>ACCUMULO_CLIENT_CONF_PATH</em> environment variable, split on <em>File.pathSeparator</em>,
* for a list of target files.
* <p>
* If <em>ACCUMULO_CLIENT_CONF_PATH</em> is not set, uses the following in this order:
* <ul>
* <li>~/.accumulo/config
* <li><em>$ACCUMULO_CONF_DIR</em>/client.conf, if <em>$ACCUMULO_CONF_DIR</em> is defined.
* <li>/etc/accumulo/client.conf
* <li>/etc/accumulo/conf/client.conf
* </ul>
* <p>
* A client configuration will then be read from each location using
* <em>PropertiesConfiguration</em> to construct a configuration. That means the latest item will
* be the one in the configuration.
*
* @see PropertiesConfiguration
* @see File#pathSeparator
*/
public static ClientConfiguration loadDefault() {
return loadFromSearchPath(getDefaultSearchPath());
}
/**
* Initializes an empty configuration object to be further configured with other methods on the
* class.
*
* @since 1.9.0
*/
public static ClientConfiguration create() {
return new ClientConfiguration(Collections.emptyList());
}
/**
* Initializes a configuration object from the contents of a configuration file. Currently
* supports Java "properties" files. The returned object can be further configured with subsequent
* calls to other methods on this class.
*
* @param file
* the path to the configuration file
* @since 1.9.0
*/
public static ClientConfiguration fromFile(File file) {
FileBasedConfigurationBuilder<PropertiesConfiguration> propsBuilder =
new FileBasedConfigurationBuilder<>(PropertiesConfiguration.class)
.configure(new Parameters().properties().setFile(file));
try {
return new ClientConfiguration(Collections.singletonList(propsBuilder.getConfiguration()));
} catch (ConfigurationException e) {
throw new IllegalArgumentException("Bad configuration file: " + file, e);
}
}
/**
* Initializes a configuration object from the contents of a map. The returned object can be
* further configured with subsequent calls to other methods on this class.
*
* @param properties
* a map containing the configuration properties to use
* @since 1.9.0
*/
public static ClientConfiguration fromMap(Map<String,String> properties) {
MapConfiguration mapConf = new MapConfiguration(properties);
return new ClientConfiguration(Collections.singletonList(mapConf));
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "process runs in same security context as user who provided path")
private static ClientConfiguration loadFromSearchPath(List<String> paths) {
List<Configuration> configs = new LinkedList<>();
for (String path : paths) {
File conf = new File(path);
if (conf.isFile() && conf.canRead()) {
FileBasedConfigurationBuilder<PropertiesConfiguration> propsBuilder =
new FileBasedConfigurationBuilder<>(PropertiesConfiguration.class)
.configure(new Parameters().properties().setFile(conf));
try {
configs.add(propsBuilder.getConfiguration());
log.info("Loaded client configuration file {}", conf);
} catch (ConfigurationException e) {
throw new IllegalStateException("Error loading client configuration file " + conf, e);
}
}
}
// We couldn't find the client configuration anywhere
if (configs.isEmpty()) {
log.debug(
"Found no client.conf in default paths. Using default client configuration values.");
}
return new ClientConfiguration(configs);
}
public static ClientConfiguration deserialize(String serializedConfig) {
PropertiesConfiguration propConfig = new PropertiesConfiguration();
try {
propConfig.getLayout().load(propConfig, new StringReader(serializedConfig));
} catch (ConfigurationException e) {
throw new IllegalArgumentException(
"Error deserializing client configuration: " + serializedConfig, e);
}
return new ClientConfiguration(Collections.singletonList(propConfig));
}
/**
* Muck the value of {@code clientConfPath} if it points to a directory by appending
* {@code client.conf} to the end of the file path. This is a no-op if the value is not a
* directory on the filesystem.
*
* @param clientConfPath
* The value of ACCUMULO_CLIENT_CONF_PATH.
*/
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "process runs in same security context as user who provided path")
static String getClientConfPath(String clientConfPath) {
if (clientConfPath == null) {
return null;
}
File filePath = new File(clientConfPath);
// If clientConfPath is a directory, tack on the default client.conf file name.
if (filePath.exists() && filePath.isDirectory()) {
return new File(filePath, "client.conf").toString();
}
return clientConfPath;
}
private static List<String> getDefaultSearchPath() {
String clientConfSearchPath = getClientConfPath(System.getenv("ACCUMULO_CLIENT_CONF_PATH"));
List<String> clientConfPaths;
if (clientConfSearchPath != null) {
clientConfPaths = Arrays.asList(clientConfSearchPath.split(File.pathSeparator));
} else {
// if $ACCUMULO_CLIENT_CONF_PATH env isn't set, priority from top to bottom is:
// ~/.accumulo/config
// $ACCUMULO_CONF_DIR/client.conf
// /etc/accumulo/client.conf
// /etc/accumulo/conf/client.conf
clientConfPaths = new LinkedList<>();
clientConfPaths.add(System.getProperty("user.home") + File.separator + USER_ACCUMULO_DIR_NAME
+ File.separator + USER_CONF_FILENAME);
if (System.getenv("ACCUMULO_CONF_DIR") != null) {
clientConfPaths
.add(System.getenv("ACCUMULO_CONF_DIR") + File.separator + GLOBAL_CONF_FILENAME);
}
clientConfPaths.add("/etc/accumulo/" + GLOBAL_CONF_FILENAME);
clientConfPaths.add("/etc/accumulo/conf/" + GLOBAL_CONF_FILENAME);
}
return clientConfPaths;
}
public String serialize() {
PropertiesConfiguration propConfig = new PropertiesConfiguration();
propConfig.copy(compositeConfig);
StringWriter writer = new StringWriter();
try {
propConfig.getLayout().save(propConfig, writer);
} catch (ConfigurationException e) {
// this should never happen
throw new IllegalStateException(e);
}
return writer.toString();
}
/**
* Returns the value for prop, the default value if not present.
*
*/
public String get(ClientProperty prop) {
if (compositeConfig.containsKey(prop.getKey()))
return compositeConfig.getString(prop.getKey());
else
return prop.getDefaultValue();
}
private void checkType(ClientProperty property, PropertyType type) {
if (!property.getType().equals(type)) {
String msg = "Configuration method intended for type " + type + " called with a "
+ property.getType() + " argument (" + property.getKey() + ")";
throw new IllegalArgumentException(msg);
}
}
/**
* Gets all properties under the given prefix in this configuration.
*
* @param property
* prefix property, must be of type PropertyType.PREFIX
* @return a map of property keys to values
* @throws IllegalArgumentException
* if property is not a prefix
*/
public Map<String,String> getAllPropertiesWithPrefix(ClientProperty property) {
checkType(property, PropertyType.PREFIX);
Map<String,String> propMap = new HashMap<>();
String prefix = property.getKey();
if (prefix.endsWith(".")) {
prefix = prefix.substring(0, prefix.length() - 1);
}
Iterator<?> iter = compositeConfig.getKeys(prefix);
while (iter.hasNext()) {
String p = (String) iter.next();
propMap.put(p, compositeConfig.getString(p));
}
return propMap;
}
/**
* Sets the value of property to value
*
*/
public void setProperty(ClientProperty prop, String value) {
with(prop, value);
}
/**
* Same as {@link #setProperty(ClientProperty, String)} but returns the ClientConfiguration for
* chaining purposes
*/
public ClientConfiguration with(ClientProperty prop, String value) {
return with(prop.getKey(), value);
}
/**
* Sets the value of property to value
*
* @since 1.9.0
*/
public void setProperty(String prop, String value) {
with(prop, value);
}
/**
* Same as {@link #setProperty(String, String)} but returns the ClientConfiguration for chaining
* purposes
*
* @since 1.9.0
*/
public ClientConfiguration with(String prop, String value) {
compositeConfig.setProperty(prop, value);
return this;
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_NAME
*
*/
public ClientConfiguration withInstance(String instanceName) {
checkArgument(instanceName != null, "instanceName is null");
return with(ClientProperty.INSTANCE_NAME, instanceName);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_ID
*
*/
public ClientConfiguration withInstance(UUID instanceId) {
checkArgument(instanceId != null, "instanceId is null");
return with(ClientProperty.INSTANCE_ID, instanceId.toString());
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_ZK_HOST
*
*/
public ClientConfiguration withZkHosts(String zooKeepers) {
checkArgument(zooKeepers != null, "zooKeepers is null");
return with(ClientProperty.INSTANCE_ZK_HOST, zooKeepers);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_ZK_TIMEOUT
*
*/
public ClientConfiguration withZkTimeout(int timeout) {
return with(ClientProperty.INSTANCE_ZK_TIMEOUT, String.valueOf(timeout));
}
/**
* Same as {@link #withSsl(boolean, boolean)} with useJsseConfig set to false
*
*/
public ClientConfiguration withSsl(boolean sslEnabled) {
return withSsl(sslEnabled, false);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_RPC_SSL_ENABLED and
* ClientProperty.RPC_USE_JSSE
*
*/
public ClientConfiguration withSsl(boolean sslEnabled, boolean useJsseConfig) {
return with(ClientProperty.INSTANCE_RPC_SSL_ENABLED, String.valueOf(sslEnabled))
.with(ClientProperty.RPC_USE_JSSE, String.valueOf(useJsseConfig));
}
/**
* Same as {@link #withTruststore(String, String, String)} with password null and type null
*
*/
public ClientConfiguration withTruststore(String path) {
return withTruststore(path, null, null);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.RPC_SSL_TRUSTORE_PATH,
* ClientProperty.RPC_SSL_TRUSTORE_PASSWORD, and ClientProperty.RPC_SSL_TRUSTORE_TYPE
*
*/
public ClientConfiguration withTruststore(String path, String password, String type) {
checkArgument(path != null, "path is null");
setProperty(ClientProperty.RPC_SSL_TRUSTSTORE_PATH, path);
if (password != null)
setProperty(ClientProperty.RPC_SSL_TRUSTSTORE_PASSWORD, password);
if (type != null)
setProperty(ClientProperty.RPC_SSL_TRUSTSTORE_TYPE, type);
return this;
}
/**
* Same as {@link #withKeystore(String, String, String)} with password null and type null
*
*/
public ClientConfiguration withKeystore(String path) {
return withKeystore(path, null, null);
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH,
* ClientProperty.RPC_SSL_KEYSTORE_PATH, ClientProperty.RPC_SSL_KEYSTORE_PASSWORD, and
* ClientProperty.RPC_SSL_KEYSTORE_TYPE
*
*/
public ClientConfiguration withKeystore(String path, String password, String type) {
checkArgument(path != null, "path is null");
setProperty(ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH, "true");
setProperty(ClientProperty.RPC_SSL_KEYSTORE_PATH, path);
if (password != null)
setProperty(ClientProperty.RPC_SSL_KEYSTORE_PASSWORD, password);
if (type != null)
setProperty(ClientProperty.RPC_SSL_KEYSTORE_TYPE, type);
return this;
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_RPC_SASL_ENABLED.
*
* @since 1.7.0
*/
public ClientConfiguration withSasl(boolean saslEnabled) {
return with(ClientProperty.INSTANCE_RPC_SASL_ENABLED, String.valueOf(saslEnabled));
}
/**
* Show whether SASL has been set on this configuration.
*
* @since 1.9.0
*/
public boolean hasSasl() {
return compositeConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(),
Boolean.parseBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getDefaultValue()));
}
/**
* Same as {@link #with(ClientProperty, String)} for ClientProperty.INSTANCE_RPC_SASL_ENABLED and
* ClientProperty.GENERAL_KERBEROS_PRINCIPAL.
*
* @param saslEnabled
* Should SASL(kerberos) be enabled
* @param kerberosServerPrimary
* The 'primary' component of the Kerberos principal Accumulo servers use to login (e.g.
* 'accumulo' in 'accumulo/_HOST@REALM')
* @since 1.7.0
*/
public ClientConfiguration withSasl(boolean saslEnabled, String kerberosServerPrimary) {
return withSasl(saslEnabled).with(ClientProperty.KERBEROS_SERVER_PRIMARY,
kerberosServerPrimary);
}
public boolean containsKey(String key) {
return compositeConfig.containsKey(key);
}
public Iterator<String> getKeys() {
return compositeConfig.getKeys();
}
public String getString(String key) {
return compositeConfig.getString(key);
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
// Object Storage Service API
//
// Common set of Object Storage and Archive Storage APIs for managing buckets, objects, and related resources.
// For more information, see Overview of Object Storage (https://docs.cloud.oracle.com/Content/Object/Concepts/objectstorageoverview.htm) and
// Overview of Archive Storage (https://docs.cloud.oracle.com/Content/Archive/Concepts/archivestorageoverview.htm).
//
package objectstorage
import (
"github.com/oracle/oci-go-sdk/v25/common"
)
// SseCustomerKeyDetails Specifies the details of the customer-provided encryption key (SSE-C) associated with an object.
type SseCustomerKeyDetails struct {
// Specifies the encryption algorithm. The only supported value is "AES256".
Algorithm SseCustomerKeyDetailsAlgorithmEnum `mandatory:"true" json:"algorithm"`
// Specifies the base64-encoded 256-bit encryption key to use to encrypt or decrypt the object data.
Key *string `mandatory:"true" json:"key"`
// Specifies the base64-encoded SHA256 hash of the encryption key. This value is used to check the integrity
// of the encryption key.
KeySha256 *string `mandatory:"true" json:"keySha256"`
}
func (m SseCustomerKeyDetails) String() string {
return common.PointerString(m)
}
// SseCustomerKeyDetailsAlgorithmEnum Enum with underlying type: string
type SseCustomerKeyDetailsAlgorithmEnum string
// Set of constants representing the allowable values for SseCustomerKeyDetailsAlgorithmEnum
const (
SseCustomerKeyDetailsAlgorithmAes256 SseCustomerKeyDetailsAlgorithmEnum = "AES256"
)
var mappingSseCustomerKeyDetailsAlgorithm = map[string]SseCustomerKeyDetailsAlgorithmEnum{
"AES256": SseCustomerKeyDetailsAlgorithmAes256,
}
// GetSseCustomerKeyDetailsAlgorithmEnumValues Enumerates the set of values for SseCustomerKeyDetailsAlgorithmEnum
func GetSseCustomerKeyDetailsAlgorithmEnumValues() []SseCustomerKeyDetailsAlgorithmEnum {
values := make([]SseCustomerKeyDetailsAlgorithmEnum, 0)
for _, v := range mappingSseCustomerKeyDetailsAlgorithm {
values = append(values, v)
}
return values
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/pkg/apis/networking/v1"
rest "k8s.io/client-go/rest"
)
// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface.
// A group's client should implement this interface.
type NetworkPoliciesGetter interface {
NetworkPolicies(namespace string) NetworkPolicyInterface
}
// NetworkPolicyInterface has methods to work with NetworkPolicy resources.
type NetworkPolicyInterface interface {
Create(*v1.NetworkPolicy) (*v1.NetworkPolicy, error)
Update(*v1.NetworkPolicy) (*v1.NetworkPolicy, error)
Delete(name string, options *meta_v1.DeleteOptions) error
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1.NetworkPolicy, error)
List(opts meta_v1.ListOptions) (*v1.NetworkPolicyList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error)
NetworkPolicyExpansion
}
// networkPolicies implements NetworkPolicyInterface
type networkPolicies struct {
client rest.Interface
ns string
}
// newNetworkPolicies returns a NetworkPolicies
func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies {
return &networkPolicies{
client: c.RESTClient(),
ns: namespace,
}
}
// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
func (c *networkPolicies) Create(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) {
result = &v1.NetworkPolicy{}
err = c.client.Post().
Namespace(c.ns).
Resource("networkpolicies").
Body(networkPolicy).
Do().
Into(result)
return
}
// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
func (c *networkPolicies) Update(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) {
result = &v1.NetworkPolicy{}
err = c.client.Put().
Namespace(c.ns).
Resource("networkpolicies").
Name(networkPolicy.Name).
Body(networkPolicy).
Do().
Into(result)
return
}
// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
func (c *networkPolicies) Delete(name string, options *meta_v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("networkpolicies").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *networkPolicies) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("networkpolicies").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
func (c *networkPolicies) Get(name string, options meta_v1.GetOptions) (result *v1.NetworkPolicy, err error) {
result = &v1.NetworkPolicy{}
err = c.client.Get().
Namespace(c.ns).
Resource("networkpolicies").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
func (c *networkPolicies) List(opts meta_v1.ListOptions) (result *v1.NetworkPolicyList, err error) {
result = &v1.NetworkPolicyList{}
err = c.client.Get().
Namespace(c.ns).
Resource("networkpolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested networkPolicies.
func (c *networkPolicies) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("networkpolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Patch applies the patch and returns the patched networkPolicy.
func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) {
result = &v1.NetworkPolicy{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("networkpolicies").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
|
{
"pile_set_name": "Github"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.