text stringlengths 1 1.05M |
|---|
<reponame>gridgain/gridgain
/*
* Copyright 2019 GridGain Systems, Inc. and Contributors.
*
* Licensed under the GridGain Community Edition License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.managers.communication;
import java.io.Serializable;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Deque;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Queue;
import java.util.UUID;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteException;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.events.DiscoveryEvent;
import org.apache.ignite.events.Event;
import org.apache.ignite.internal.GridJobExecuteResponse;
import org.apache.ignite.internal.GridKernalContext;
import org.apache.ignite.internal.GridTopic;
import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException;
import org.apache.ignite.internal.IgniteComponentType;
import org.apache.ignite.internal.IgniteDeploymentCheckedException;
import org.apache.ignite.internal.IgniteFeatures;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
import org.apache.ignite.internal.direct.DirectMessageReader;
import org.apache.ignite.internal.direct.DirectMessageWriter;
import org.apache.ignite.internal.managers.GridManagerAdapter;
import org.apache.ignite.internal.managers.deployment.GridDeployment;
import org.apache.ignite.internal.managers.discovery.CustomEventListener;
import org.apache.ignite.internal.managers.discovery.IgniteDiscoverySpi;
import org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager;
import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsAbstractMessage;
import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccMessage;
import org.apache.ignite.internal.processors.metric.MetricRegistry;
import org.apache.ignite.internal.processors.platform.message.PlatformMessageFilter;
import org.apache.ignite.internal.processors.pool.PoolProcessor;
import org.apache.ignite.internal.processors.security.IgniteSecurity;
import org.apache.ignite.internal.processors.security.OperationSecurityContext;
import org.apache.ignite.internal.processors.security.SecurityContext;
import org.apache.ignite.internal.processors.timeout.GridTimeoutObject;
import org.apache.ignite.internal.processors.tracing.MTC;
import org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings;
import org.apache.ignite.internal.processors.tracing.Span;
import org.apache.ignite.internal.processors.tracing.SpanTags;
import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashSet;
import org.apache.ignite.internal.util.IgniteUtils;
import org.apache.ignite.internal.util.StripedCompositeReadWriteLock;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
import org.apache.ignite.internal.util.lang.IgnitePair;
import org.apache.ignite.internal.util.tostring.GridToStringInclude;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.T2;
import org.apache.ignite.internal.util.typedef.X;
import org.apache.ignite.internal.util.typedef.internal.LT;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteBiPredicate;
import org.apache.ignite.lang.IgniteInClosure;
import org.apache.ignite.lang.IgniteRunnable;
import org.apache.ignite.lang.IgniteUuid;
import org.apache.ignite.marshaller.Marshaller;
import org.apache.ignite.marshaller.MarshallerUtils;
import org.apache.ignite.plugin.extensions.communication.Message;
import org.apache.ignite.plugin.extensions.communication.MessageFactory;
import org.apache.ignite.plugin.extensions.communication.MessageFormatter;
import org.apache.ignite.plugin.extensions.communication.MessageReader;
import org.apache.ignite.plugin.extensions.communication.MessageWriter;
import org.apache.ignite.spi.IgniteSpiException;
import org.apache.ignite.spi.communication.CommunicationListener;
import org.apache.ignite.spi.communication.CommunicationSpi;
import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
import org.apache.ignite.spi.communication.tcp.internal.ConnectionRequestor;
import org.apache.ignite.spi.communication.tcp.internal.TcpConnectionRequestDiscoveryMessage;
import org.apache.ignite.spi.communication.tcp.internal.TcpInverseConnectionResponseMessage;
import org.apache.ignite.thread.IgniteThreadFactory;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import static org.apache.ignite.cluster.ClusterState.INACTIVE;
import static org.apache.ignite.events.EventType.EVT_NODE_FAILED;
import static org.apache.ignite.events.EventType.EVT_NODE_JOINED;
import static org.apache.ignite.events.EventType.EVT_NODE_LEFT;
import static org.apache.ignite.internal.GridTopic.TOPIC_CACHE_COORDINATOR;
import static org.apache.ignite.internal.GridTopic.TOPIC_COMM_SYSTEM;
import static org.apache.ignite.internal.GridTopic.TOPIC_COMM_USER;
import static org.apache.ignite.internal.GridTopic.TOPIC_IO_TEST;
import static org.apache.ignite.internal.IgniteFeatures.IGNITE_SECURITY_PROCESSOR;
import static org.apache.ignite.internal.IgniteFeatures.IGNITE_SECURITY_PROCESSOR_V2;
import static org.apache.ignite.internal.IgniteFeatures.INVERSE_TCP_CONNECTION;
import static org.apache.ignite.internal.IgniteFeatures.allNodesSupports;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.AFFINITY_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.DATA_STREAMER_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.IDX_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.MANAGEMENT_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.P2P_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.PUBLIC_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.QUERY_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.REBALANCE_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SCHEMA_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SERVICE_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SYSTEM_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.UTILITY_CACHE_POOL;
import static org.apache.ignite.internal.managers.communication.GridIoPolicy.isReservedGridIoPolicy;
import static org.apache.ignite.internal.processors.metric.impl.MetricUtils.metricName;
import static org.apache.ignite.internal.processors.tracing.MTC.support;
import static org.apache.ignite.internal.processors.tracing.SpanType.COMMUNICATION_ORDERED_PROCESS;
import static org.apache.ignite.internal.processors.tracing.SpanType.COMMUNICATION_REGULAR_PROCESS;
import static org.apache.ignite.internal.processors.tracing.messages.TraceableMessagesTable.traceName;
import static org.apache.ignite.internal.util.nio.GridNioBackPressureControl.threadProcessingMessage;
import static org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi.ATTR_PAIRED_CONN;
import static org.jsr166.ConcurrentLinkedHashMap.QueuePolicy.PER_SEGMENT_Q_OPTIMIZED_RMV;
/**
* Grid communication manager.
*/
public class GridIoManager extends GridManagerAdapter<CommunicationSpi<Serializable>> {
/** Io communication metrics registry name. */
public static final String COMM_METRICS = metricName("io", "communication");
/** Outbound message queue size metric name. */
public static final String OUTBOUND_MSG_QUEUE_CNT = "OutboundMessagesQueueSize";
/** Sent messages count metric name. */
public static final String SENT_MSG_CNT = "SentMessagesCount";
/** Sent bytes count metric name. */
public static final String SENT_BYTES_CNT = "SentBytesCount";
/** Received messages count metric name. */
public static final String RCVD_MSGS_CNT = "ReceivedMessagesCount";
/** Received bytes count metric name. */
public static final String RCVD_BYTES_CNT = "ReceivedBytesCount";
/** Empty array of message factories. */
public static final MessageFactory[] EMPTY = {};
/** Max closed topics to store. */
public static final int MAX_CLOSED_TOPICS = 10240;
/** Direct protocol version attribute name. */
public static final String DIRECT_PROTO_VER_ATTR = "comm.direct.proto.ver";
/** Direct protocol version. */
public static final byte DIRECT_PROTO_VER = 3;
/** Current IO policy. */
private static final ThreadLocal<Byte> CUR_PLC = new ThreadLocal<>();
/** Listeners by topic. */
private final ConcurrentMap<Object, GridMessageListener> lsnrMap = new ConcurrentHashMap<>();
/** System listeners. */
private volatile GridMessageListener[] sysLsnrs;
/** Mutex for system listeners. */
private final Object sysLsnrsMux = new Object();
/** Disconnect listeners. */
private final Collection<GridDisconnectListener> disconnectLsnrs = new ConcurrentLinkedQueue<>();
/** Pool processor. */
private final PoolProcessor pools;
/** Discovery listener. */
private GridLocalEventListener discoLsnr;
/** */
private final ConcurrentMap<Object, ConcurrentMap<UUID, GridCommunicationMessageSet>> msgSetMap =
new ConcurrentHashMap<>();
/** Local node ID. */
private volatile UUID locNodeId;
/** Cache for messages that were received prior to discovery. */
private final ConcurrentMap<UUID, Deque<DelayedMessage>> waitMap = new ConcurrentHashMap<>();
/** Communication message listener. */
private CommunicationListener<Serializable> commLsnr;
/** Grid marshaller. */
private final Marshaller marsh;
/** Busy lock. */
private final ReadWriteLock busyLock =
new StripedCompositeReadWriteLock(Runtime.getRuntime().availableProcessors());
/** Lock to sync maps access. */
private final ReadWriteLock lock = new ReentrantReadWriteLock();
/** Fully started flag. When set to true, can send and receive messages. */
private volatile boolean started;
/** Closed topics. */
private final GridBoundedConcurrentLinkedHashSet<Object> closedTopics =
new GridBoundedConcurrentLinkedHashSet<>(MAX_CLOSED_TOPICS, MAX_CLOSED_TOPICS, 0.75f, 256,
PER_SEGMENT_Q_OPTIMIZED_RMV);
/** */
private MessageFactory msgFactory;
/** */
private MessageFormatter formatter;
/** Stopping flag. */
private boolean stopping;
/** */
private final AtomicReference<ConcurrentHashMap<Long, IoTestFuture>> ioTestMap = new AtomicReference<>();
/** */
private final AtomicLong ioTestId = new AtomicLong();
/** */
private final TcpCommunicationInverseConnectionHandler invConnHandler = new TcpCommunicationInverseConnectionHandler();
/** No-op runnable. */
private static final IgniteRunnable NOOP = () -> {};
/** Version of security processor feature supported by cluster, {@code null} if not supported. */
@Nullable private volatile IgniteFeatures secProcSupported;
/**
* @param ctx Grid kernal context.
*/
@SuppressWarnings("deprecation")
public GridIoManager(GridKernalContext ctx) {
super(ctx, ctx.config().getCommunicationSpi());
pools = ctx.pools();
assert pools != null;
locNodeId = ctx.localNodeId();
marsh = ctx.config().getMarshaller();
synchronized (sysLsnrsMux) {
sysLsnrs = new GridMessageListener[GridTopic.values().length];
}
}
/**
* @return Message factory.
*/
public MessageFactory messageFactory() {
assert msgFactory != null;
return msgFactory;
}
/**
* @return Message writer factory.
*/
public MessageFormatter formatter() {
assert formatter != null;
return formatter;
}
/**
* Resets metrics for this manager.
*/
public void resetMetrics() {
getSpi().resetMetrics();
}
/** {@inheritDoc} */
@Override public void start() throws IgniteCheckedException {
ctx.addNodeAttribute(DIRECT_PROTO_VER_ATTR, DIRECT_PROTO_VER);
MessageFormatter[] formatterExt = ctx.plugins().extensions(MessageFormatter.class);
if (formatterExt != null && formatterExt.length > 0) {
if (formatterExt.length > 1)
throw new IgniteCheckedException("More than one MessageFormatter extension is defined. Check your " +
"plugins configuration and make sure that only one of them provides custom message format.");
formatter = formatterExt[0];
}
else {
formatter = new MessageFormatter() {
@Override public MessageWriter writer(UUID rmtNodeId) throws IgniteCheckedException {
assert rmtNodeId != null;
return new DirectMessageWriter(U.directProtocolVersion(ctx, rmtNodeId));
}
@Override public MessageReader reader(UUID rmtNodeId, MessageFactory msgFactory)
throws IgniteCheckedException {
return new DirectMessageReader(msgFactory,
rmtNodeId != null ? U.directProtocolVersion(ctx, rmtNodeId) : DIRECT_PROTO_VER);
}
};
}
MessageFactory[] msgs = ctx.plugins().extensions(MessageFactory.class);
if (msgs == null)
msgs = EMPTY;
List<MessageFactory> compMsgs = new ArrayList<>();
compMsgs.add(new GridIoMessageFactory());
for (IgniteComponentType compType : IgniteComponentType.values()) {
MessageFactory f = compType.messageFactory();
if (f != null)
compMsgs.add(f);
}
if (!compMsgs.isEmpty())
msgs = F.concat(msgs, compMsgs.toArray(new MessageFactory[compMsgs.size()]));
msgFactory = new IgniteMessageFactoryImpl(msgs);
CommunicationSpi<Serializable> spi = getSpi();
if ((CommunicationSpi<?>)spi instanceof TcpCommunicationSpi)
getTcpCommunicationSpi().setConnectionRequestor(invConnHandler);
startSpi();
MetricRegistry ioMetric = ctx.metric().registry(COMM_METRICS);
ioMetric.register(OUTBOUND_MSG_QUEUE_CNT, spi::getOutboundMessagesQueueSize,
"Outbound messages queue size.");
ioMetric.register(SENT_MSG_CNT, spi::getSentMessagesCount, "Sent messages count.");
ioMetric.register(SENT_BYTES_CNT, spi::getSentBytesCount, "Sent bytes count.");
ioMetric.register(RCVD_MSGS_CNT, spi::getReceivedMessagesCount,
"Received messages count.");
ioMetric.register(RCVD_BYTES_CNT, spi::getReceivedBytesCount, "Received bytes count.");
getSpi().setListener(commLsnr = new CommunicationListener<Serializable>() {
@Override public void onMessage(UUID nodeId, Serializable msg, IgniteRunnable msgC) {
try {
onMessage0(nodeId, (GridIoMessage)msg, msgC);
}
catch (ClassCastException ignored) {
U.error(log, "Communication manager received message of unknown type (will ignore): " +
msg.getClass().getName() + ". Most likely GridCommunicationSpi is being used directly, " +
"which is illegal - make sure to send messages only via GridProjection API.");
}
}
@Override public void onDisconnected(UUID nodeId) {
for (GridDisconnectListener lsnr : disconnectLsnrs)
lsnr.onNodeDisconnected(nodeId);
}
});
if (log.isDebugEnabled())
log.debug(startInfo());
addMessageListener(GridTopic.TOPIC_IO_TEST, new GridMessageListener() {
@Override public void onMessage(UUID nodeId, Object msg, byte plc) {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null)
return;
IgniteIoTestMessage msg0 = (IgniteIoTestMessage)msg;
msg0.senderNodeId(nodeId);
if (msg0.request()) {
IgniteIoTestMessage res = new IgniteIoTestMessage(msg0.id(), false, null);
res.flags(msg0.flags());
res.onRequestProcessed();
res.copyDataFromRequest(msg0);
try {
sendToGridTopic(node, GridTopic.TOPIC_IO_TEST, res, GridIoPolicy.SYSTEM_POOL);
}
catch (IgniteCheckedException e) {
U.error(log, "Failed to send IO test response [msg=" + msg0 + "]", e);
}
}
else {
IoTestFuture fut = ioTestMap().get(msg0.id());
msg0.onResponseProcessed();
if (fut == null)
U.warn(log, "Failed to find IO test future [msg=" + msg0 + ']');
else
fut.onResponse(msg0);
}
}
});
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> onReconnected(boolean clusterRestarted) throws IgniteCheckedException {
locNodeId = ctx.localNodeId();
return super.onReconnected(clusterRestarted);
}
/**
* @param nodes Nodes.
* @param payload Payload.
* @param procFromNioThread If {@code true} message is processed from NIO thread.
* @return Response future.
*/
public IgniteInternalFuture sendIoTest(List<ClusterNode> nodes, byte[] payload, boolean procFromNioThread) {
long id = ioTestId.getAndIncrement();
IoTestFuture fut = new IoTestFuture(id, nodes.size());
IgniteIoTestMessage msg = new IgniteIoTestMessage(id, true, payload);
msg.processFromNioThread(procFromNioThread);
ioTestMap().put(id, fut);
for (int i = 0; i < nodes.size(); i++) {
ClusterNode node = nodes.get(i);
try {
sendToGridTopic(node, GridTopic.TOPIC_IO_TEST, msg, GridIoPolicy.SYSTEM_POOL);
}
catch (IgniteCheckedException e) {
ioTestMap().remove(msg.id());
return new GridFinishedFuture(e);
}
}
return fut;
}
/**
* @param node Node.
* @param payload Payload.
* @param procFromNioThread If {@code true} message is processed from NIO thread.
* @return Response future.
*/
public IgniteInternalFuture<List<IgniteIoTestMessage>> sendIoTest(
ClusterNode node,
byte[] payload,
boolean procFromNioThread
) {
long id = ioTestId.getAndIncrement();
IoTestFuture fut = new IoTestFuture(id, 1);
IgniteIoTestMessage msg = new IgniteIoTestMessage(id, true, payload);
msg.processFromNioThread(procFromNioThread);
ioTestMap().put(id, fut);
try {
sendToGridTopic(node, GridTopic.TOPIC_IO_TEST, msg, GridIoPolicy.SYSTEM_POOL);
}
catch (IgniteCheckedException e) {
ioTestMap().remove(msg.id());
return new GridFinishedFuture(e);
}
return fut;
}
/**
* @return IO test futures map.
*/
private ConcurrentHashMap<Long, IoTestFuture> ioTestMap() {
ConcurrentHashMap<Long, IoTestFuture> map = ioTestMap.get();
if (map == null) {
if (!ioTestMap.compareAndSet(null, map = new ConcurrentHashMap<>()))
map = ioTestMap.get();
}
return map;
}
/**
* @param warmup Warmup duration in milliseconds.
* @param duration Test duration in milliseconds.
* @param threads Thread count.
* @param latencyLimit Max latency in nanoseconds.
* @param rangesCnt Ranges count in resulting histogram.
* @param payLoadSize Payload size in bytes.
* @param procFromNioThread {@code True} to process requests in NIO threads.
* @param nodes Nodes participating in test.
*/
public void runIoTest(
final long warmup,
final long duration,
final int threads,
final long latencyLimit,
final int rangesCnt,
final int payLoadSize,
final boolean procFromNioThread,
final List<ClusterNode> nodes
) {
ExecutorService svc = Executors.newFixedThreadPool(threads + 1);
final AtomicBoolean warmupFinished = new AtomicBoolean();
final AtomicBoolean done = new AtomicBoolean();
final CyclicBarrier bar = new CyclicBarrier(threads + 1);
final LongAdder cnt = new LongAdder();
final long sleepDuration = 5000;
final byte[] payLoad = new byte[payLoadSize];
final Map<UUID, IoTestThreadLocalNodeResults>[] res = new Map[threads];
boolean failed = true;
try {
svc.execute(new Runnable() {
@Override public void run() {
boolean failed = true;
try {
bar.await();
long start = System.currentTimeMillis();
if (log.isInfoEnabled())
log.info("IO test started " +
"[warmup=" + warmup +
", duration=" + duration +
", threads=" + threads +
", latencyLimit=" + latencyLimit +
", rangesCnt=" + rangesCnt +
", payLoadSize=" + payLoadSize +
", procFromNioThreads=" + procFromNioThread + ']'
);
for (;;) {
if (!warmupFinished.get() && System.currentTimeMillis() - start > warmup) {
if (log.isInfoEnabled())
log.info("IO test warmup finished.");
warmupFinished.set(true);
start = System.currentTimeMillis();
}
if (warmupFinished.get() && System.currentTimeMillis() - start > duration) {
if (log.isInfoEnabled())
log.info("IO test finished, will wait for all threads to finish.");
done.set(true);
bar.await();
failed = false;
break;
}
if (log.isInfoEnabled())
log.info("IO test [opsCnt/sec=" + (cnt.sumThenReset() * 1000 / sleepDuration) +
", warmup=" + !warmupFinished.get() +
", elapsed=" + (System.currentTimeMillis() - start) + ']');
Thread.sleep(sleepDuration);
}
// At this point all threads have finished the test and
// stored data to the resulting array of maps.
// Need to iterate it over and sum values for all threads.
printIoTestResults(res);
}
catch (InterruptedException | BrokenBarrierException e) {
U.error(log, "IO test failed.", e);
}
finally {
if (failed)
bar.reset();
}
}
});
for (int i = 0; i < threads; i++) {
final int i0 = i;
res[i] = U.newHashMap(nodes.size());
svc.execute(new Runnable() {
@Override public void run() {
boolean failed = true;
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int size = nodes.size();
Map<UUID, IoTestThreadLocalNodeResults> res0 = res[i0];
try {
boolean warmupFinished0 = false;
bar.await();
for (;;) {
if (done.get())
break;
if (!warmupFinished0)
warmupFinished0 = warmupFinished.get();
ClusterNode node = nodes.get(rnd.nextInt(size));
List<IgniteIoTestMessage> msgs = sendIoTest(node, payLoad, procFromNioThread).get();
cnt.increment();
for (IgniteIoTestMessage msg : msgs) {
UUID nodeId = msg.senderNodeId();
assert nodeId != null;
IoTestThreadLocalNodeResults nodeRes = res0.get(nodeId);
if (nodeRes == null)
res0.put(nodeId,
nodeRes = new IoTestThreadLocalNodeResults(rangesCnt, latencyLimit));
nodeRes.onResult(msg);
}
}
bar.await();
failed = false;
}
catch (Exception e) {
U.error(log, "IO test worker thread failed.", e);
}
finally {
if (failed)
bar.reset();
}
}
});
}
failed = false;
}
finally {
if (failed)
U.shutdownNow(GridIoManager.class, svc, log);
}
}
/**
* @param rawRes Resulting map.
*/
private void printIoTestResults(
Map<UUID, IoTestThreadLocalNodeResults>[] rawRes
) {
Map<UUID, IoTestNodeResults> res = new HashMap<>();
for (Map<UUID, IoTestThreadLocalNodeResults> r : rawRes) {
for (Entry<UUID, IoTestThreadLocalNodeResults> e : r.entrySet()) {
IoTestNodeResults r0 = res.get(e.getKey());
if (r0 == null)
res.put(e.getKey(), r0 = new IoTestNodeResults());
r0.add(e.getValue());
}
}
StringBuilder b = new StringBuilder(U.nl())
.append("IO test results (round-trip count per each latency bin).")
.append(U.nl());
for (Entry<UUID, IoTestNodeResults> e : res.entrySet()) {
ClusterNode node = ctx.discovery().node(e.getKey());
long binLatencyMcs = e.getValue().binLatencyMcs();
b.append("Node ID: ").append(e.getKey()).append(" (addrs=")
.append(node != null ? node.addresses().toString() : "n/a")
.append(", binLatency=").append(binLatencyMcs).append("mcs")
.append(')').append(U.nl());
b.append("Latency bin, mcs | Count exclusive | Percentage exclusive | " +
"Count inclusive | Percentage inclusive ").append(U.nl());
long[] nodeRes = e.getValue().resLatency;
long sum = 0;
for (int i = 0; i < nodeRes.length; i++)
sum += nodeRes[i];
long curSum = 0;
for (int i = 0; i < nodeRes.length; i++) {
curSum += nodeRes[i];
if (i < nodeRes.length - 1)
b.append(String.format("<%11d mcs | %15d | %19.6f%% | %15d | %19.6f%%\n",
(i + 1) * binLatencyMcs,
nodeRes[i], (100.0 * nodeRes[i]) / sum,
curSum, (100.0 * curSum) / sum));
else
b.append(String.format(">%11d mcs | %15d | %19.6f%% | %15d | %19.6f%%\n",
i * binLatencyMcs,
nodeRes[i], (100.0 * nodeRes[i]) / sum,
curSum, (100.0 * curSum) / sum));
}
b.append(U.nl()).append("Total latency (ns): ").append(U.nl())
.append(String.format("%15d", e.getValue().totalLatency)).append(U.nl());
b.append(U.nl()).append("Max latencies (ns):").append(U.nl());
format(b, e.getValue().maxLatency);
b.append(U.nl()).append("Max request send queue times (ns):").append(U.nl());
format(b, e.getValue().maxReqSendQueueTime);
b.append(U.nl()).append("Max request receive queue times (ns):").append(U.nl());
format(b, e.getValue().maxReqRcvQueueTime);
b.append(U.nl()).append("Max response send queue times (ns):").append(U.nl());
format(b, e.getValue().maxResSendQueueTime);
b.append(U.nl()).append("Max response receive queue times (ns):").append(U.nl());
format(b, e.getValue().maxResRcvQueueTime);
b.append(U.nl()).append("Max request wire times (millis):").append(U.nl());
format(b, e.getValue().maxReqWireTimeMillis);
b.append(U.nl()).append("Max response wire times (millis):").append(U.nl());
format(b, e.getValue().maxResWireTimeMillis);
b.append(U.nl());
}
if (log.isInfoEnabled())
log.info(b.toString());
}
/**
* @param b Builder.
* @param pairs Pairs to format.
*/
private static void format(StringBuilder b, Collection<IgnitePair<Long>> pairs) {
for (IgnitePair<Long> p : pairs) {
b.append(String.format("%15d", p.get1()))
.append(" ")
.append(IgniteUtils.DEBUG_DATE_FMT.format(Instant.ofEpochMilli(p.get2())))
.append(U.nl());
}
}
/** {@inheritDoc} */
@SuppressWarnings({"SynchronizationOnLocalVariableOrMethodParameter"})
@Override public void onKernalStart0() {
discoLsnr = new GridLocalEventListener() {
@Override public void onEvent(Event evt) {
assert evt instanceof DiscoveryEvent : "Invalid event: " + evt;
DiscoveryEvent discoEvt = (DiscoveryEvent)evt;
UUID nodeId = discoEvt.eventNode().id();
switch (evt.type()) {
case EVT_NODE_JOINED:
assert waitMap.get(nodeId) == null; // We can't receive messages from undiscovered nodes.
break;
case EVT_NODE_LEFT:
case EVT_NODE_FAILED:
for (Map.Entry<Object, ConcurrentMap<UUID, GridCommunicationMessageSet>> e :
msgSetMap.entrySet()) {
ConcurrentMap<UUID, GridCommunicationMessageSet> map = e.getValue();
GridCommunicationMessageSet set;
boolean empty;
synchronized (map) {
set = map.remove(nodeId);
empty = map.isEmpty();
}
if (set != null) {
if (log.isDebugEnabled())
log.debug("Removed message set due to node leaving grid: " + set);
// Unregister timeout listener.
ctx.timeout().removeTimeoutObject(set);
// Node may still send stale messages for this topic
// even after discovery notification is done.
closedTopics.add(set.topic());
}
if (empty)
msgSetMap.remove(e.getKey(), map);
}
// Clean up delayed and ordered messages (need exclusive lock).
lock.writeLock().lock();
try {
Deque<DelayedMessage> waitList = waitMap.remove(nodeId);
if (log.isDebugEnabled())
log.debug("Removed messages from discovery startup delay list " +
"(sender node left topology): " + waitList);
}
finally {
lock.writeLock().unlock();
}
break;
default:
assert false : "Unexpected event: " + evt;
}
secProcSupported = currentSecurityProcSupport();
}
};
ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_NODE_FAILED);
invConnHandler.onStart();
// Make sure that there are no stale messages due to window between communication
// manager start and kernal start.
// 1. Process wait list.
Collection<Collection<DelayedMessage>> delayedMsgs = new ArrayList<>();
lock.writeLock().lock();
try {
started = true;
for (Entry<UUID, Deque<DelayedMessage>> e : waitMap.entrySet()) {
if (ctx.discovery().node(e.getKey()) != null) {
Deque<DelayedMessage> waitList = waitMap.remove(e.getKey());
if (log.isDebugEnabled())
log.debug("Processing messages from discovery startup delay list: " + waitList);
if (waitList != null)
delayedMsgs.add(waitList);
}
}
}
finally {
lock.writeLock().unlock();
}
// After write lock released.
if (!delayedMsgs.isEmpty()) {
for (Collection<DelayedMessage> col : delayedMsgs)
for (DelayedMessage msg : col)
commLsnr.onMessage(msg.nodeId(), msg.message(), msg.callback());
}
// 2. Process messages sets.
for (Map.Entry<Object, ConcurrentMap<UUID, GridCommunicationMessageSet>> e : msgSetMap.entrySet()) {
ConcurrentMap<UUID, GridCommunicationMessageSet> map = e.getValue();
for (GridCommunicationMessageSet set : map.values()) {
if (ctx.discovery().node(set.nodeId()) == null) {
// All map modifications should be synced for consistency.
boolean rmv;
synchronized (map) {
rmv = map.remove(set.nodeId(), set);
}
if (rmv) {
if (log.isDebugEnabled())
log.debug("Removed message set due to node leaving grid: " + set);
// Unregister timeout listener.
ctx.timeout().removeTimeoutObject(set);
}
}
}
boolean rmv;
synchronized (map) {
rmv = map.isEmpty();
}
if (rmv) {
msgSetMap.remove(e.getKey(), map);
// Node may still send stale messages for this topic
// even after discovery notification is done.
closedTopics.add(e.getKey());
}
}
secProcSupported = currentSecurityProcSupport();
}
/**
* Checks that both local and remote nodes are configured to use paired connections.
*
* @param node Remote node.
* @param tcpCommSpi TcpCommunicationSpi.
* @return {@code True} if both local and remote nodes are configured to use paired connections.
*/
private boolean isPairedConnection(ClusterNode node, TcpCommunicationSpi tcpCommSpi) {
return tcpCommSpi.isUsePairedConnections() &&
Boolean.TRUE.equals(node.attribute(U.spiAttribute(tcpCommSpi, ATTR_PAIRED_CONN)));
}
/**
* @return Instance of {@link TcpCommunicationSpi}. Will throw {@link AssertionError} or {@link ClassCastException}
* if another SPI type is configured. Must be called only if type of SPI has been explicilty asserted earlier.
*/
private TcpCommunicationSpi getTcpCommunicationSpi() {
CommunicationSpi<?> spi = getSpi();
assert spi instanceof TcpCommunicationSpi;
return (TcpCommunicationSpi)spi;
}
/** {@inheritDoc} */
@SuppressWarnings("BusyWait")
@Override public void onKernalStop0(boolean cancel) {
// No more communication messages.
getSpi().setListener(null);
boolean interrupted = false;
// Busy wait is intentional.
while (true) {
try {
if (busyLock.writeLock().tryLock(200, TimeUnit.MILLISECONDS))
break;
else
Thread.sleep(200);
}
catch (InterruptedException ignore) {
// Preserve interrupt status & ignore.
// Note that interrupted flag is cleared.
interrupted = true;
}
}
try {
if (interrupted)
Thread.currentThread().interrupt();
GridEventStorageManager evtMgr = ctx.event();
if (evtMgr != null && discoLsnr != null)
evtMgr.removeLocalEventListener(discoLsnr);
stopping = true;
}
finally {
busyLock.writeLock().unlock();
}
}
/** {@inheritDoc} */
@Override public void stop(boolean cancel) throws IgniteCheckedException {
stopSpi();
invConnHandler.onStop();
if (log.isDebugEnabled())
log.debug(stopInfo());
}
/**
* @param nodeId Node ID.
* @param msg Message bytes.
* @param msgC Closure to call when message processing finished.
*/
private void onMessage0(UUID nodeId, GridIoMessage msg, IgniteRunnable msgC) {
assert nodeId != null;
assert msg != null;
Lock busyLock0 = busyLock.readLock();
busyLock0.lock();
try {
if (stopping) {
if (log.isDebugEnabled())
log.debug("Received communication message while stopping (will ignore) [nodeId=" +
nodeId + ", msg=" + msg + ']');
return;
}
if (msg.topic() == null) {
int topicOrd = msg.topicOrdinal();
msg.topic(topicOrd >= 0 ? GridTopic.fromOrdinal(topicOrd) :
U.unmarshal(marsh, msg.topicBytes(), U.resolveClassLoader(ctx.config())));
}
if (!started) {
lock.readLock().lock();
try {
if (!started) { // Sets to true in write lock, so double checking.
// Received message before valid context is set to manager.
if (log.isDebugEnabled())
log.debug("Adding message to waiting list [senderId=" + nodeId +
", msg=" + msg + ']');
Deque<DelayedMessage> list = F.<UUID, Deque<DelayedMessage>>addIfAbsent(
waitMap,
nodeId,
ConcurrentLinkedDeque::new
);
assert list != null;
list.add(new DelayedMessage(nodeId, msg, msgC));
return;
}
}
finally {
lock.readLock().unlock();
}
}
// If message is P2P, then process in P2P service.
// This is done to avoid extra waiting and potential deadlocks
// as thread pool may not have any available threads to give.
byte plc = msg.message().policy();
// If override policy is not defined use sender defined policy.
if (plc == GridIoPolicy.UNDEFINED)
plc = msg.policy();
switch (plc) {
case P2P_POOL: {
processP2PMessage(nodeId, msg, msgC);
break;
}
case PUBLIC_POOL:
case SYSTEM_POOL:
case MANAGEMENT_POOL:
case AFFINITY_POOL:
case UTILITY_CACHE_POOL:
case IDX_POOL:
case DATA_STREAMER_POOL:
case QUERY_POOL:
case SCHEMA_POOL:
case SERVICE_POOL:
case REBALANCE_POOL:
{
if (msg.isOrdered())
processOrderedMessage(nodeId, msg, plc, msgC);
else
processRegularMessage(nodeId, msg, plc, msgC);
break;
}
default:
assert plc >= 0 : "Negative policy [plc=" + plc + ", msg=" + msg + ']';
if (isReservedGridIoPolicy(plc))
throw new IgniteCheckedException("Failed to process message with policy of reserved range. " +
"[policy=" + plc + ']');
if (msg.isOrdered())
processOrderedMessage(nodeId, msg, plc, msgC);
else
processRegularMessage(nodeId, msg, plc, msgC);
}
}
catch (IgniteCheckedException e) {
U.error(log, "Failed to process message (will ignore): " + msg, e);
}
finally {
busyLock0.unlock();
}
}
/**
* @param nodeId Node ID.
* @param msg Message.
* @param msgC Closure to call when message processing finished.
*/
private void processP2PMessage(
final UUID nodeId,
final GridIoMessage msg,
final IgniteRunnable msgC
) {
Runnable c = new Runnable() {
@Override public void run() {
try {
threadProcessingMessage(true, msgC);
GridMessageListener lsnr = listenerGet0(msg.topic());
if (lsnr == null)
return;
Object obj = msg.message();
assert obj != null;
invokeListener(msg.policy(), lsnr, nodeId, obj, secCtx(msg));
}
finally {
threadProcessingMessage(false, null);
msgC.run();
}
}
};
try {
pools.getPeerClassLoadingExecutorService().execute(c);
}
catch (RejectedExecutionException e) {
U.error(log, "Failed to process P2P message due to execution rejection. Increase the upper bound " +
"on 'ExecutorService' provided by 'IgniteConfiguration.getPeerClassLoadingThreadPoolSize()'. " +
"Will attempt to process message in the listener thread instead.", e);
c.run();
}
}
/**
* @param nodeId Node ID.
* @param msg Message.
* @param plc Execution policy.
* @param msgC Closure to call when message processing finished.
* @throws IgniteCheckedException If failed.
*/
private void processRegularMessage(
final UUID nodeId,
final GridIoMessage msg,
final byte plc,
final IgniteRunnable msgC
) throws IgniteCheckedException {
Runnable c = new TraceRunnable(ctx.tracing(), COMMUNICATION_REGULAR_PROCESS) {
@Override public void execute() {
try {
MTC.span().addTag(SpanTags.MESSAGE, () -> traceName(msg));
threadProcessingMessage(true, msgC);
// The classes which use TransientSerializable must set a version of a node to ThreadLocal via
// MarshallerUtils.jobSenderVersion(node.version()) that created a serializable object.
// We forgot for communication messages.
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null)
node = ctx.discovery().historicalNode(nodeId);
if (node != null)
MarshallerUtils.jobSenderVersion(node.version());
try {
processRegularMessage0(msg, nodeId);
}
finally {
MarshallerUtils.jobSenderVersion(null);
}
}
catch (Throwable e) {
log.error("An error occurred processing the message [msg=" + msg + ", nodeId=" + nodeId + "].", e);
throw e;
} finally {
threadProcessingMessage(false, null);
msgC.run();
}
}
@Override public String toString() {
return "Message closure [msg=" + msg + ']';
}
};
MTC.span().addLog(() -> "Regular process queued");
if (msg.topicOrdinal() == TOPIC_IO_TEST.ordinal()) {
IgniteIoTestMessage msg0 = (IgniteIoTestMessage)msg.message();
if (msg0.processFromNioThread())
c.run();
else
ctx.pools().getStripedExecutorService().execute(-1, c);
return;
}
if (msg.topicOrdinal() == TOPIC_CACHE_COORDINATOR.ordinal()) {
MvccMessage msg0 = (MvccMessage)msg.message();
// see IGNITE-8609
/*if (msg0.processedFromNioThread())
c.run();
else*/
ctx.pools().getStripedExecutorService().execute(-1, c);
return;
}
final int part = msg.partition(); // Store partition to avoid possible recalculation.
if (plc == GridIoPolicy.SYSTEM_POOL && part != GridIoMessage.STRIPE_DISABLED_PART) {
ctx.pools().getStripedExecutorService().execute(part, c);
return;
}
if (plc == GridIoPolicy.DATA_STREAMER_POOL && part != GridIoMessage.STRIPE_DISABLED_PART) {
ctx.pools().getDataStreamerExecutorService().execute(part, c);
return;
}
if (msg.topicOrdinal() == TOPIC_IO_TEST.ordinal()) {
IgniteIoTestMessage msg0 = (IgniteIoTestMessage)msg.message();
if (msg0.processFromNioThread()) {
c.run();
return;
}
}
try {
String execName = msg.executorName();
if (execName != null) {
Executor exec = pools.customExecutor(execName);
if (exec != null) {
exec.execute(c);
return;
}
else {
LT.warn(log, "Custom executor doesn't exist (message will be processed in default " +
"thread pool): " + execName);
}
}
pools.poolForPolicy(plc).execute(c);
}
catch (RejectedExecutionException e) {
if (!ctx.isStopping()) {
U.error(log, "Failed to process regular message due to execution rejection. Will attempt to process " +
"message in the listener thread instead.", e);
c.run();
}
else if (log.isDebugEnabled())
log.debug("Failed to process regular message due to execution rejection: " + msg);
}
}
/**
* @param msg Message.
* @param nodeId Node ID.
*/
private void processRegularMessage0(GridIoMessage msg, UUID nodeId) {
GridMessageListener lsnr = listenerGet0(msg.topic());
if (lsnr == null)
return;
Object obj = msg.message();
assert obj != null;
invokeListener(msg.policy(), lsnr, nodeId, obj, secCtx(msg));
}
/**
* Get listener.
*
* @param topic Topic.
* @return Listener.
*/
@Nullable private GridMessageListener listenerGet0(Object topic) {
if (topic instanceof GridTopic)
return sysLsnrs[systemListenerIndex(topic)];
else
return lsnrMap.get(topic);
}
/**
* Put listener if it is absent.
*
* @param topic Topic.
* @param lsnr Listener.
* @return Old listener (if any).
*/
@Nullable private GridMessageListener listenerPutIfAbsent0(Object topic, GridMessageListener lsnr) {
if (topic instanceof GridTopic) {
synchronized (sysLsnrsMux) {
int idx = systemListenerIndex(topic);
GridMessageListener old = sysLsnrs[idx];
if (old == null)
changeSystemListener(idx, lsnr);
return old;
}
}
else
return lsnrMap.putIfAbsent(topic, lsnr);
}
/**
* Remove listener.
*
* @param topic Topic.
* @return Removed listener (if any).
*/
@Nullable private GridMessageListener listenerRemove0(Object topic) {
if (topic instanceof GridTopic) {
synchronized (sysLsnrsMux) {
int idx = systemListenerIndex(topic);
GridMessageListener old = sysLsnrs[idx];
if (old != null)
changeSystemListener(idx, null);
return old;
}
}
else
return lsnrMap.remove(topic);
}
/**
* Remove listener if it matches expected value.
*
* @param topic Topic.
* @param exp Listener.
* @return Result.
*/
private boolean listenerRemove0(Object topic, GridMessageListener exp) {
if (topic instanceof GridTopic) {
synchronized (sysLsnrsMux) {
return systemListenerChange(topic, exp, null);
}
}
else
return lsnrMap.remove(topic, exp);
}
/**
* Replace listener.
*
* @param topic Topic.
* @param exp Old value.
* @param newVal New value.
* @return Result.
*/
private boolean listenerReplace0(Object topic, GridMessageListener exp, GridMessageListener newVal) {
if (topic instanceof GridTopic) {
synchronized (sysLsnrsMux) {
return systemListenerChange(topic, exp, newVal);
}
}
else
return lsnrMap.replace(topic, exp, newVal);
}
/**
* Change system listener.
*
* @param topic Topic.
* @param exp Expected value.
* @param newVal New value.
* @return Result.
*/
private boolean systemListenerChange(Object topic, GridMessageListener exp, GridMessageListener newVal) {
assert Thread.holdsLock(sysLsnrsMux);
assert topic instanceof GridTopic;
int idx = systemListenerIndex(topic);
GridMessageListener old = sysLsnrs[idx];
if (old != null && old.equals(exp)) {
changeSystemListener(idx, newVal);
return true;
}
return false;
}
/**
* Change systme listener at the given index.
*
* @param idx Index.
* @param lsnr Listener.
*/
private void changeSystemListener(int idx, @Nullable GridMessageListener lsnr) {
assert Thread.holdsLock(sysLsnrsMux);
GridMessageListener[] res = new GridMessageListener[sysLsnrs.length];
System.arraycopy(sysLsnrs, 0, res, 0, sysLsnrs.length);
res[idx] = lsnr;
sysLsnrs = res;
}
/**
* Get index of a system listener.
*
* @param topic Topic.
* @return Index.
*/
private int systemListenerIndex(Object topic) {
assert topic instanceof GridTopic;
return ((GridTopic)topic).ordinal();
}
/**
* @param nodeId Node ID.
* @param msg Ordered message.
* @param plc Execution policy.
* @param msgC Closure to call when message processing finished ({@code null} for sync processing).
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
private void processOrderedMessage(
final UUID nodeId,
final GridIoMessage msg,
final byte plc,
@Nullable final IgniteRunnable msgC
) throws IgniteCheckedException {
assert msg != null;
long timeout = msg.timeout();
boolean skipOnTimeout = msg.skipOnTimeout();
boolean isNew = false;
ConcurrentMap<UUID, GridCommunicationMessageSet> map;
GridCommunicationMessageSet set = null;
while (true) {
map = msgSetMap.get(msg.topic());
if (map == null) {
set = new GridCommunicationMessageSet(plc, msg.topic(), nodeId, timeout, skipOnTimeout, msg, msgC);
map = new ConcurrentHashMap0<>();
map.put(nodeId, set);
ConcurrentMap<UUID, GridCommunicationMessageSet> old = msgSetMap.putIfAbsent(
msg.topic(), map);
if (old != null)
map = old;
else {
isNew = true;
// Put succeeded.
break;
}
}
boolean rmv = false;
synchronized (map) {
if (map.isEmpty())
rmv = true;
else {
set = map.get(nodeId);
if (set == null) {
GridCommunicationMessageSet old = map.putIfAbsent(nodeId,
set = new GridCommunicationMessageSet(plc, msg.topic(),
nodeId, timeout, skipOnTimeout, msg, msgC));
assert old == null;
isNew = true;
// Put succeeded.
break;
}
}
}
if (rmv)
msgSetMap.remove(msg.topic(), map);
else {
assert set != null;
assert !isNew;
set.add(msg, msgC);
break;
}
}
if (isNew && ctx.discovery().node(nodeId) == null) {
if (log.isDebugEnabled())
log.debug("Message is ignored as sender has left the grid: " + msg);
assert map != null;
boolean rmv;
synchronized (map) {
map.remove(nodeId);
rmv = map.isEmpty();
}
if (rmv)
msgSetMap.remove(msg.topic(), map);
return;
}
if (isNew && set.endTime() != Long.MAX_VALUE)
ctx.timeout().addTimeoutObject(set);
final GridMessageListener lsnr = listenerGet0(msg.topic());
if (lsnr == null) {
if (closedTopics.contains(msg.topic())) {
if (log.isDebugEnabled())
log.debug("Message is ignored as it came for the closed topic: " + msg);
assert map != null;
msgSetMap.remove(msg.topic(), map);
}
else if (log.isDebugEnabled()) {
// Note that we simply keep messages if listener is not
// registered yet, until one will be registered.
log.debug("Received message for unknown listener (messages will be kept until a " +
"listener is registered): " + msg);
}
// Mark the message as processed, otherwise reading from the connection
// may stop.
if (msgC != null)
msgC.run();
return;
}
if (msgC == null) {
// Message from local node can be processed in sync manner.
assert locNodeId.equals(nodeId);
unwindMessageSet(set, lsnr);
return;
}
final GridCommunicationMessageSet msgSet0 = set;
Runnable c = new Runnable() {
@Override public void run() {
try {
threadProcessingMessage(true, msgC);
unwindMessageSet(msgSet0, lsnr);
}
finally {
threadProcessingMessage(false, null);
}
}
};
try {
MTC.span().addLog(() -> "Ordered process queued");
pools.poolForPolicy(plc).execute(c);
}
catch (RejectedExecutionException e) {
U.error(log, "Failed to process ordered message due to execution rejection. " +
"Increase the upper bound on executor service provided by corresponding " +
"configuration property. Will attempt to process message in the listener " +
"thread instead [msgPlc=" + plc + ']', e);
c.run();
}
}
/**
* @param msgSet Message set to unwind.
* @param lsnr Listener to notify.
*/
private void unwindMessageSet(GridCommunicationMessageSet msgSet, GridMessageListener lsnr) {
// Loop until message set is empty or
// another thread owns the reservation.
while (true) {
if (msgSet.reserve()) {
try {
msgSet.unwind(lsnr);
}
finally {
msgSet.release();
}
// Check outside of reservation block.
if (!msgSet.changed()) {
if (log.isDebugEnabled())
log.debug("Message set has not been changed: " + msgSet);
break;
}
}
else {
if (log.isDebugEnabled())
log.debug("Another thread owns reservation: " + msgSet);
return;
}
}
}
/**
* Invoke message listener.
*
* @param plc Policy.
* @param lsnr Listener.
* @param nodeId Node ID.
* @param msg Message.
* @param secCtx Optional security context.
*/
private void invokeListener(
Byte plc,
GridMessageListener lsnr,
UUID nodeId,
Object msg,
@Nullable T2<UUID, SecurityContext> secCtx
) {
MTC.span().addLog(() -> "Invoke listener");
Byte oldPlc = CUR_PLC.get();
boolean change = !F.eq(oldPlc, plc);
if (change)
CUR_PLC.set(plc);
SecurityContext secCtx0 = null;
UUID subjId = nodeId;
if (secCtx != null) {
secCtx0 = secCtx.get2();
subjId = secCtx.get1();
}
IgniteSecurity sec = ctx.security();
try (OperationSecurityContext ignored = secCtx0 != null ? sec.withContext(secCtx0) : sec.withContext(nodeId, subjId)) {
lsnr.onMessage(nodeId, msg, plc);
}
finally {
if (change)
CUR_PLC.set(oldPlc);
}
}
/**
* @return Current IO policy
*/
@Nullable public static Byte currentPolicy() {
return CUR_PLC.get();
}
/**
* @param nodeId Node ID.
* @param sndErr Send error.
* @param ping {@code True} if try ping node.
* @return {@code True} if node left.
* @throws IgniteClientDisconnectedCheckedException If ping failed.
*/
public boolean checkNodeLeft(UUID nodeId, IgniteCheckedException sndErr, boolean ping)
throws IgniteClientDisconnectedCheckedException
{
return sndErr instanceof ClusterTopologyCheckedException ||
ctx.discovery().node(nodeId) == null ||
(ping && !ctx.discovery().pingNode(nodeId));
}
/**
* @param node Destination node.
* @param topic Topic to send the message to.
* @param topicOrd GridTopic enumeration ordinal.
* @param msg Message to send.
* @param plc Type of processing.
* @param ordered Ordered flag.
* @param timeout Timeout.
* @param skipOnTimeout Whether message can be skipped on timeout.
* @param ackC Ack closure.
* @param async If {@code true} message for local node will be processed in pool, otherwise in current thread.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
private void send(
ClusterNode node,
Object topic,
int topicOrd,
Message msg,
byte plc,
boolean ordered,
long timeout,
boolean skipOnTimeout,
IgniteInClosure<IgniteException> ackC,
boolean async
) throws IgniteCheckedException {
assert node != null;
assert topic != null;
assert msg != null;
assert !async || msg instanceof GridIoUserMessage : msg; // Async execution was added only for IgniteMessaging.
assert topicOrd >= 0 || !(topic instanceof GridTopic) : msg;
MTC.span().addLog(() -> "Create communication msg - " + traceName(msg));
GridIoMessage ioMsg = createGridIoMessage(topic, topicOrd, msg, plc, ordered, timeout, skipOnTimeout);
if (locNodeId.equals(node.id())) {
assert plc != P2P_POOL;
CommunicationListener commLsnr = this.commLsnr;
if (commLsnr == null)
throw new IgniteCheckedException("Trying to send message when grid is not fully started.");
if (ordered)
processOrderedMessage(locNodeId, ioMsg, plc, null);
else if (async)
processRegularMessage(locNodeId, ioMsg, plc, NOOP);
else
processRegularMessage0(ioMsg, locNodeId);
if (ackC != null)
ackC.apply(null);
}
else {
if (topicOrd < 0)
ioMsg.topicBytes(U.marshal(marsh, topic));
try {
if ((CommunicationSpi<?>)getSpi() instanceof TcpCommunicationSpi)
getTcpCommunicationSpi().sendMessage(node, ioMsg, ackC);
else
getSpi().sendMessage(node, ioMsg);
}
catch (IgniteSpiException e) {
if (e.getCause() instanceof ClusterTopologyCheckedException)
throw (ClusterTopologyCheckedException)e.getCause();
if (!ctx.discovery().alive(node))
throw new ClusterTopologyCheckedException("Failed to send message, node left: " + node.id(), e);
throw new IgniteCheckedException("Failed to send message (node may have left the grid or " +
"TCP connection cannot be established due to firewall issues) " +
"[node=" + node + ", topic=" + topic +
", msg=" + msg + ", policy=" + plc + ']', e);
}
}
}
/** */
private long getInverseConnectionWaitTimeout() {
return ctx.config().getFailureDetectionTimeout();
}
/**
* Remote client node and all server nodes must support {@link IgniteFeatures#INVERSE_TCP_CONNECTION} in order to
* establish connection from the other side.
*/
private boolean inverseTcpConnectionFeatureIsSupported(ClusterNode node) {
if (!IgniteFeatures.nodeSupports(ctx, node, INVERSE_TCP_CONNECTION))
return false;
return IgniteFeatures.allNodesSupport(ctx, INVERSE_TCP_CONNECTION, IgniteDiscoverySpi.SRV_NODES);
}
/**
* @return One of two message wrappers. The first is {@link GridIoMessage}, the second is secured version {@link
* GridIoSecurityAwareMessage}.
*/
private @NotNull GridIoMessage createGridIoMessage(
Object topic,
int topicOrd,
Message msg,
byte plc,
boolean ordered,
long timeout,
boolean skipOnTimeout
) throws IgniteCheckedException {
if (ctx.security().enabled() &&
!(msg instanceof GridDhtPartitionsAbstractMessage) && !(msg instanceof GridJobExecuteResponse)) {
IgniteFeatures secProcSupported = this.secProcSupported;
if (secProcSupported != null) {
SecurityContext secCtx = ctx.security().securityContext();
UUID subjId = secCtx.subject().id();
byte[] serSubj = null;
// Skip sending serialized subject if all nodes support IGNITE_SECURITY_PROCESSOR_V2 and state=ACTIVE.
if (secProcSupported != IGNITE_SECURITY_PROCESSOR_V2 || ctx.state().clusterState().state() == INACTIVE)
serSubj = !locNodeId.equals(subjId) ? U.marshal(marsh, secCtx) : null;
return new GridIoSecurityAwareMessage(subjId, serSubj, plc, topic, topicOrd, msg, ordered, timeout,
skipOnTimeout);
}
}
return new GridIoMessage(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout);
}
/**
* @return Version of security processor feature supported by cluster.
*/
private IgniteFeatures currentSecurityProcSupport() {
Collection<ClusterNode> allNodes = ctx.discovery().allNodes();
if (allNodesSupports(ctx, allNodes, IGNITE_SECURITY_PROCESSOR_V2))
return IGNITE_SECURITY_PROCESSOR_V2;
else if (allNodesSupports(ctx, allNodes, IGNITE_SECURITY_PROCESSOR))
return IGNITE_SECURITY_PROCESSOR;
else
return null;
}
/**
* @param nodeId Id of destination node.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendToCustomTopic(UUID nodeId, Object topic, Message msg, byte plc)
throws IgniteCheckedException {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null)
throw new ClusterTopologyCheckedException("Failed to send message to node (has node left grid?): " + nodeId);
sendToCustomTopic(node, topic, msg, plc);
}
/**
* @param nodeId Id of destination node.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendToGridTopic(UUID nodeId, GridTopic topic, Message msg, byte plc)
throws IgniteCheckedException {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null)
throw new ClusterTopologyCheckedException("Failed to send message to node (has node left grid?): " + nodeId);
send(node, topic, topic.ordinal(), msg, plc, false, 0, false, null, false);
}
/**
* @param node Destination node.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendToGridTopic(ClusterNode node, GridTopic topic, Message msg, byte plc)
throws IgniteCheckedException {
send(node, topic, topic.ordinal(), msg, plc, false, 0, false, null, false);
}
/**
* @param node Destination node.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendToCustomTopic(ClusterNode node, Object topic, Message msg, byte plc)
throws IgniteCheckedException {
send(node, topic, -1, msg, plc, false, 0, false, null, false);
}
/**
* @param node Destination node.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @param span Current span for tracing.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendToGridTopic(ClusterNode node, GridTopic topic, Message msg, byte plc, Span span)
throws IgniteCheckedException {
send(node, topic, topic.ordinal(), msg, plc, false, 0, false, null, false);
}
/**
* @param node Destination node.
* @param topic Topic to send the message to.
* @param topicOrd GridTopic enumeration ordinal.
* @param msg Message to send.
* @param plc Type of processing. *
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendGeneric(ClusterNode node, Object topic, int topicOrd, Message msg, byte plc)
throws IgniteCheckedException {
send(node, topic, topicOrd, msg, plc, false, 0, false, null, false);
}
/**
* @param node Destination node.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @param timeout Timeout to keep a message on receiving queue.
* @param skipOnTimeout Whether message can be skipped on timeout.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendOrderedMessage(
ClusterNode node,
Object topic,
Message msg,
byte plc,
long timeout,
boolean skipOnTimeout
) throws IgniteCheckedException {
assert timeout > 0 || skipOnTimeout;
send(node, topic, (byte)-1, msg, plc, true, timeout, skipOnTimeout, null, false);
}
/**
* @param node Destination nodes.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @param ackC Ack closure.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendToGridTopic(ClusterNode node,
GridTopic topic,
Message msg,
byte plc,
IgniteInClosure<IgniteException> ackC) throws IgniteCheckedException
{
send(node, topic, topic.ordinal(), msg, plc, false, 0, false, ackC, false);
}
/**
* @param nodes Destination node.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @param timeout Timeout to keep a message on receiving queue.
* @param skipOnTimeout Whether message can be skipped on timeout.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
void sendOrderedMessageToGridTopic(
Collection<? extends ClusterNode> nodes,
GridTopic topic,
Message msg,
byte plc,
long timeout,
boolean skipOnTimeout
)
throws IgniteCheckedException {
assert timeout > 0 || skipOnTimeout;
IgniteCheckedException err = null;
for (ClusterNode node : nodes) {
try {
send(node, topic, topic.ordinal(), msg, plc, true, timeout, skipOnTimeout, null, false);
}
catch (IgniteCheckedException e) {
if (err == null)
err = e;
else
err.addSuppressed(e);
}
}
if (err != null)
throw err;
}
/**
* @param nodes Destination nodes.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendToGridTopic(
Collection<? extends ClusterNode> nodes,
GridTopic topic,
Message msg,
byte plc
) throws IgniteCheckedException {
IgniteCheckedException err = null;
for (ClusterNode node : nodes) {
try {
send(node, topic, topic.ordinal(), msg, plc, false, 0, false, null, false);
}
catch (IgniteCheckedException e) {
if (err == null)
err = e;
else
err.addSuppressed(e);
}
}
if (err != null)
throw err;
}
/**
* @param node Destination node.
* @param topic Topic to send the message to.
* @param msg Message to send.
* @param plc Type of processing.
* @param timeout Timeout to keep a message on receiving queue.
* @param skipOnTimeout Whether message can be skipped on timeout.
* @param ackC Ack closure.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void sendOrderedMessage(
ClusterNode node,
Object topic,
Message msg,
byte plc,
long timeout,
boolean skipOnTimeout,
IgniteInClosure<IgniteException> ackC
) throws IgniteCheckedException {
assert timeout > 0 || skipOnTimeout;
send(node, topic, (byte)-1, msg, plc, true, timeout, skipOnTimeout, ackC, false);
}
/**
* Sends a peer deployable user message.
*
* @param nodes Destination nodes.
* @param msg Message to send.
* @param topic Message topic to use.
* @param ordered Is message ordered?
* @param timeout Message timeout in milliseconds for ordered messages.
* @param async Async flag.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
@SuppressWarnings("ConstantConditions")
public void sendUserMessage(Collection<? extends ClusterNode> nodes,
Object msg,
@Nullable Object topic,
boolean ordered,
long timeout,
boolean async) throws IgniteCheckedException
{
boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(locNodeId);
byte[] serMsg = null;
byte[] serTopic = null;
if (!loc) {
serMsg = U.marshal(marsh, msg);
if (topic != null)
serTopic = U.marshal(marsh, topic);
}
GridDeployment dep = null;
String depClsName = null;
if (ctx.config().isPeerClassLoadingEnabled()) {
Class<?> cls0 = U.detectClass(msg);
if (U.isJdk(cls0) && topic != null)
cls0 = U.detectClass(topic);
dep = ctx.deploy().deploy(cls0, U.detectClassLoader(cls0));
if (dep == null)
throw new IgniteDeploymentCheckedException("Failed to deploy user message: " + msg);
depClsName = cls0.getName();
}
Message ioMsg = new GridIoUserMessage(
msg,
serMsg,
depClsName,
topic,
serTopic,
dep != null ? dep.classLoaderId() : null,
dep != null ? dep.deployMode() : null,
dep != null ? dep.userVersion() : null,
dep != null ? dep.participants() : null);
if (ordered)
sendOrderedMessageToGridTopic(nodes, TOPIC_COMM_USER, ioMsg, PUBLIC_POOL, timeout, true);
else if (loc) {
send(F.first(nodes),
TOPIC_COMM_USER,
TOPIC_COMM_USER.ordinal(),
ioMsg,
PUBLIC_POOL,
false,
0,
false,
null,
async
);
}
else {
ClusterNode locNode = F.find(nodes, null, F.localNode(locNodeId));
Collection<? extends ClusterNode> rmtNodes = F.view(nodes, F.remoteNodes(locNodeId));
if (!rmtNodes.isEmpty())
sendToGridTopic(rmtNodes, TOPIC_COMM_USER, ioMsg, PUBLIC_POOL);
// Will call local listeners in current thread synchronously or through pool,
// depending async flag, so must go the last
// to allow remote nodes execute the requested operation in parallel.
if (locNode != null) {
send(locNode,
TOPIC_COMM_USER,
TOPIC_COMM_USER.ordinal(),
ioMsg,
PUBLIC_POOL,
false,
0,
false,
null,
async
);
}
}
}
/**
* Subscribe at messages from a topic.
*
* @param topic Topic to subscribe to.
* @param p Message predicate.
*/
public void addUserMessageListener(final @Nullable Object topic, final @Nullable IgniteBiPredicate<UUID, ?> p) {
addUserMessageListener(topic, p, ctx.localNodeId());
}
/**
* @param topic Topic to subscribe to.
* @param p Message predicate.
*/
public void addUserMessageListener(
final @Nullable Object topic,
final @Nullable IgniteBiPredicate<UUID, ?> p,
final UUID nodeId
) {
if (p != null) {
try {
if (p instanceof PlatformMessageFilter)
((PlatformMessageFilter)p).initialize(ctx);
else
ctx.resource().injectGeneric(p);
addMessageListener(TOPIC_COMM_USER,
new GridUserMessageListener(topic, (IgniteBiPredicate<UUID, Object>)p, nodeId));
}
catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}
/**
* @param topic Topic to unsubscribe from.
* @param p Message predicate.
*/
public void removeUserMessageListener(@Nullable Object topic, IgniteBiPredicate<UUID, ?> p) {
removeMessageListener(TOPIC_COMM_USER,
new GridUserMessageListener(topic, (IgniteBiPredicate<UUID, Object>)p));
}
/**
* @param topic Listener's topic.
* @param lsnr Listener to add.
*/
public void addMessageListener(GridTopic topic, GridMessageListener lsnr) {
addMessageListener((Object)topic, lsnr);
}
/**
* @param lsnr Listener to add.
*/
public void addDisconnectListener(GridDisconnectListener lsnr) {
disconnectLsnrs.add(lsnr);
}
/**
* @param lsnr Listener to remove.
*/
public void removeDisconnectListener(GridDisconnectListener lsnr) {
disconnectLsnrs.remove(lsnr);
}
/**
* @param topic Listener's topic.
* @param lsnr Listener to add.
*/
public void addMessageListener(Object topic, final GridMessageListener lsnr) {
assert lsnr != null;
assert topic != null;
// Make sure that new topic is not in the list of closed topics.
closedTopics.remove(topic);
GridMessageListener lsnrs;
for (;;) {
lsnrs = listenerPutIfAbsent0(topic, lsnr);
if (lsnrs == null) {
lsnrs = lsnr;
break;
}
assert lsnrs != null;
if (!(lsnrs instanceof ArrayListener)) { // We are putting the second listener, creating array.
GridMessageListener arrLsnr = new ArrayListener(lsnrs, lsnr);
if (listenerReplace0(topic, lsnrs, arrLsnr)) {
lsnrs = arrLsnr;
break;
}
}
else {
if (((ArrayListener)lsnrs).add(lsnr))
break;
// Add operation failed because array is already empty and is about to be removed, helping and retrying.
listenerRemove0(topic, lsnrs);
}
}
Map<UUID, GridCommunicationMessageSet> map = msgSetMap.get(topic);
Collection<GridCommunicationMessageSet> msgSets = map != null ? map.values() : null;
if (msgSets != null) {
final GridMessageListener lsnrs0 = lsnrs;
try {
for (final GridCommunicationMessageSet msgSet : msgSets) {
pools.poolForPolicy(msgSet.policy()).execute(
new Runnable() {
@Override public void run() {
unwindMessageSet(msgSet, lsnrs0);
}
});
}
}
catch (RejectedExecutionException e) {
U.error(log, "Failed to process delayed message due to execution rejection. Increase the upper bound " +
"on executor service provided in 'IgniteConfiguration.getPublicThreadPoolSize()'). Will attempt to " +
"process message in the listener thread instead.", e);
for (GridCommunicationMessageSet msgSet : msgSets)
unwindMessageSet(msgSet, lsnr);
}
catch (IgniteCheckedException ice) {
throw new IgniteException(ice);
}
}
}
/**
* @param topic Message topic.
* @return Whether or not listener was indeed removed.
*/
public boolean removeMessageListener(GridTopic topic) {
return removeMessageListener((Object)topic);
}
/**
* @param topic Message topic.
* @return Whether or not listener was indeed removed.
*/
public boolean removeMessageListener(Object topic) {
return removeMessageListener(topic, null);
}
/**
* @param topic Listener's topic.
* @param lsnr Listener to remove.
* @return Whether or not the lsnr was removed.
*/
public boolean removeMessageListener(GridTopic topic, @Nullable GridMessageListener lsnr) {
return removeMessageListener((Object)topic, lsnr);
}
/**
* @param topic Listener's topic.
* @param lsnr Listener to remove.
* @return Whether or not the lsnr was removed.
*/
public boolean removeMessageListener(Object topic, @Nullable GridMessageListener lsnr) {
assert topic != null;
boolean rmv = true;
Collection<GridCommunicationMessageSet> msgSets = null;
// If listener is null, then remove all listeners.
if (lsnr == null) {
closedTopics.add(topic);
lsnr = listenerRemove0(topic);
rmv = lsnr != null;
Map<UUID, GridCommunicationMessageSet> map = msgSetMap.remove(topic);
if (map != null)
msgSets = map.values();
}
else {
for (;;) {
GridMessageListener lsnrs = listenerGet0(topic);
// If removing listener before subscription happened.
if (lsnrs == null) {
closedTopics.add(topic);
Map<UUID, GridCommunicationMessageSet> map = msgSetMap.remove(topic);
if (map != null)
msgSets = map.values();
rmv = false;
break;
}
else {
boolean empty = false;
if (!(lsnrs instanceof ArrayListener)) {
if (lsnrs.equals(lsnr)) {
if (!listenerRemove0(topic, lsnrs))
continue; // Retry because it can be packed to array listener.
empty = true;
}
else
rmv = false;
}
else {
ArrayListener arrLsnr = (ArrayListener)lsnrs;
if (arrLsnr.remove(lsnr))
empty = arrLsnr.isEmpty();
else
// Listener was not found.
rmv = false;
if (empty)
listenerRemove0(topic, lsnrs);
}
// If removing last subscribed listener.
if (empty) {
closedTopics.add(topic);
Map<UUID, GridCommunicationMessageSet> map = msgSetMap.remove(topic);
if (map != null)
msgSets = map.values();
}
break;
}
}
}
if (msgSets != null)
for (GridCommunicationMessageSet msgSet : msgSets)
ctx.timeout().removeTimeoutObject(msgSet);
if (rmv && log.isDebugEnabled())
log.debug("Removed message listener [topic=" + topic + ", lsnr=" + lsnr + ']');
if (lsnr instanceof ArrayListener) {
for (GridMessageListener childLsnr : ((ArrayListener)lsnr).arr)
closeListener(childLsnr);
}
else
closeListener(lsnr);
return rmv;
}
/**
* Closes a listener, if applicable.
*
* @param lsnr Listener.
*/
private void closeListener(GridMessageListener lsnr) {
if (lsnr instanceof GridUserMessageListener) {
GridUserMessageListener userLsnr = (GridUserMessageListener)lsnr;
if (userLsnr.predLsnr instanceof PlatformMessageFilter)
((PlatformMessageFilter)userLsnr.predLsnr).onClose();
}
}
/**
* Gets sent messages count.
*
* @return Sent messages count.
*/
public int getSentMessagesCount() {
return getSpi().getSentMessagesCount();
}
/**
* Gets sent bytes count.
*
* @return Sent bytes count.
*/
public long getSentBytesCount() {
return getSpi().getSentBytesCount();
}
/**
* Gets received messages count.
*
* @return Received messages count.
*/
public int getReceivedMessagesCount() {
return getSpi().getReceivedMessagesCount();
}
/**
* Gets received bytes count.
*
* @return Received bytes count.
*/
public long getReceivedBytesCount() {
return getSpi().getReceivedBytesCount();
}
/**
* Gets outbound messages queue size.
*
* @return Outbound messages queue size.
*/
public int getOutboundMessagesQueueSize() {
return getSpi().getOutboundMessagesQueueSize();
}
/**
* Dumps SPI stats to diagnostic logs in case TcpCommunicationSpi is used, no-op otherwise.
*/
public void dumpStats() {
CommunicationSpi spi = getSpi();
if (spi instanceof TcpCommunicationSpi)
((TcpCommunicationSpi)spi).dumpStats();
}
/** {@inheritDoc} */
@Override public void printMemoryStats() {
X.println(">>>");
X.println(">>> IO manager memory stats [igniteInstanceName=" + ctx.igniteInstanceName() + ']');
X.println(">>> lsnrMapSize: " + lsnrMap.size());
X.println(">>> msgSetMapSize: " + msgSetMap.size());
X.println(">>> closedTopicsSize: " + closedTopics.sizex());
X.println(">>> discoWaitMapSize: " + waitMap.size());
}
/**
* Linked chain of listeners.
*/
private static class ArrayListener implements GridMessageListener {
/** */
private volatile GridMessageListener[] arr;
/**
* @param arr Array of listeners.
*/
ArrayListener(GridMessageListener... arr) {
this.arr = arr;
}
/**
* Passes message to the whole chain.
*
* @param nodeId Node ID.
* @param msg Message.
*/
@Override public void onMessage(UUID nodeId, Object msg, byte plc) {
GridMessageListener[] arr0 = arr;
if (arr0 == null)
return;
for (GridMessageListener l : arr0)
l.onMessage(nodeId, msg, plc);
}
/**
* @return {@code true} If this instance is empty.
*/
boolean isEmpty() {
return arr == null;
}
/**
* @param l Listener.
* @return {@code true} If listener was removed.
*/
synchronized boolean remove(GridMessageListener l) {
GridMessageListener[] arr0 = arr;
if (arr0 == null)
return false;
if (arr0.length == 1) {
if (!arr0[0].equals(l))
return false;
arr = null;
return true;
}
for (int i = 0; i < arr0.length; i++) {
if (arr0[i].equals(l)) {
int newLen = arr0.length - 1;
if (i == newLen) // Remove last.
arr = Arrays.copyOf(arr0, newLen);
else {
GridMessageListener[] arr1 = new GridMessageListener[newLen];
if (i != 0) // Not remove first.
System.arraycopy(arr0, 0, arr1, 0, i);
System.arraycopy(arr0, i + 1, arr1, i, newLen - i);
arr = arr1;
}
return true;
}
}
return false;
}
/**
* @param l Listener.
* @return {@code true} if listener was added. Add can fail if this instance is empty and is about to be removed
* from map.
*/
synchronized boolean add(GridMessageListener l) {
GridMessageListener[] arr0 = arr;
if (arr0 == null)
return false;
int oldLen = arr0.length;
arr0 = Arrays.copyOf(arr0, oldLen + 1);
arr0[oldLen] = l;
arr = arr0;
return true;
}
}
/**
* This class represents a message listener wrapper that knows about peer deployment.
*/
private class GridUserMessageListener implements GridMessageListener {
/** Predicate listeners. */
private final IgniteBiPredicate<UUID, Object> predLsnr;
/** User message topic. */
private final Object topic;
/** Initial node id. */
private final UUID initNodeId;
/**
* @param topic User topic.
* @param predLsnr Predicate listener.
* @param initNodeId Node id that registered given listener.
*/
GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate<UUID, Object> predLsnr,
@Nullable UUID initNodeId) {
this.topic = topic;
this.predLsnr = predLsnr;
this.initNodeId = initNodeId;
}
/**
* @param topic User topic.
* @param predLsnr Predicate listener.
*/
GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate<UUID, Object> predLsnr) {
this(topic, predLsnr, null);
}
/** {@inheritDoc} */
@SuppressWarnings({"ConstantConditions"
})
@Override public void onMessage(UUID nodeId, Object msg, byte plc) {
if (!(msg instanceof GridIoUserMessage)) {
U.error(log, "Received unknown message (potentially fatal problem): " + msg);
return;
}
GridIoUserMessage ioMsg = (GridIoUserMessage)msg;
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null) {
U.warn(log, "Failed to resolve sender node (did the node left grid?): " + nodeId);
return;
}
Lock lock = busyLock.readLock();
lock.lock();
try {
if (stopping) {
if (log.isDebugEnabled())
log.debug("Received user message while stopping (will ignore) [nodeId=" +
nodeId + ", msg=" + msg + ']');
return;
}
Object msgBody = ioMsg.body();
assert msgBody != null || ioMsg.bodyBytes() != null;
try {
byte[] msgTopicBytes = ioMsg.topicBytes();
Object msgTopic = ioMsg.topic();
GridDeployment dep = ioMsg.deployment();
if (dep == null && ctx.config().isPeerClassLoadingEnabled() &&
ioMsg.deploymentClassName() != null) {
dep = ctx.deploy().getGlobalDeployment(
ioMsg.deploymentMode(),
ioMsg.deploymentClassName(),
ioMsg.deploymentClassName(),
ioMsg.userVersion(),
nodeId,
ioMsg.classLoaderId(),
ioMsg.loaderParticipants(),
null);
if (dep == null)
throw new IgniteDeploymentCheckedException(
"Failed to obtain deployment information for user message. " +
"If you are using custom message or topic class, try implementing " +
"GridPeerDeployAware interface. [msg=" + ioMsg + ']');
ioMsg.deployment(dep); // Cache deployment.
}
// Unmarshall message topic if needed.
if (msgTopic == null && msgTopicBytes != null) {
msgTopic = U.unmarshal(marsh, msgTopicBytes,
U.resolveClassLoader(dep != null ? dep.classLoader() : null, ctx.config()));
ioMsg.topic(msgTopic); // Save topic to avoid future unmarshallings.
}
if (!F.eq(topic, msgTopic))
return;
if (msgBody == null) {
msgBody = U.unmarshal(marsh, ioMsg.bodyBytes(),
U.resolveClassLoader(dep != null ? dep.classLoader() : null, ctx.config()));
ioMsg.body(msgBody); // Save body to avoid future unmarshallings.
}
// Resource injection.
if (dep != null)
ctx.resource().inject(dep, dep.deployedClass(ioMsg.deploymentClassName()), msgBody);
}
catch (IgniteCheckedException e) {
U.error(log, "Failed to unmarshal user message [node=" + nodeId + ", message=" +
msg + ']', e);
}
if (msgBody != null) {
if (predLsnr != null) {
try (OperationSecurityContext s = ctx.security().withContext(initNodeId)) {
if (!predLsnr.apply(nodeId, msgBody))
removeMessageListener(TOPIC_COMM_USER, this);
}
}
}
}
finally {
lock.unlock();
}
}
/** {@inheritDoc} */
@Override public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
GridUserMessageListener l = (GridUserMessageListener)o;
return F.eq(predLsnr, l.predLsnr) && F.eq(topic, l.topic);
}
/** {@inheritDoc} */
@Override public int hashCode() {
int res = predLsnr != null ? predLsnr.hashCode() : 0;
res = 31 * res + (topic != null ? topic.hashCode() : 0);
return res;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(GridUserMessageListener.class, this);
}
}
/**
* Ordered communication message set.
*/
private class GridCommunicationMessageSet implements GridTimeoutObject {
/** */
private final UUID nodeId;
/** */
private long endTime;
/** */
private final IgniteUuid timeoutId;
/** */
@GridToStringInclude
private final Object topic;
/** */
private final byte plc;
/** */
@GridToStringInclude
private final Queue<OrderedMessageContainer> msgs = new ConcurrentLinkedDeque<>();
/** */
private final AtomicBoolean reserved = new AtomicBoolean();
/** */
private final long timeout;
/** */
private final boolean skipOnTimeout;
/** */
private long lastTs;
/**
* @param plc Communication policy.
* @param topic Communication topic.
* @param nodeId Node ID.
* @param timeout Timeout.
* @param skipOnTimeout Whether message can be skipped on timeout.
* @param msg Message to add immediately.
* @param msgC Message closure (may be {@code null}).
*/
GridCommunicationMessageSet(
byte plc,
Object topic,
UUID nodeId,
long timeout,
boolean skipOnTimeout,
GridIoMessage msg,
@Nullable IgniteRunnable msgC
) {
assert nodeId != null;
assert topic != null;
assert msg != null;
this.plc = plc;
this.nodeId = nodeId;
this.topic = topic;
this.timeout = timeout == 0 ? ctx.config().getNetworkTimeout() : timeout;
this.skipOnTimeout = skipOnTimeout;
endTime = endTime(timeout);
timeoutId = IgniteUuid.randomUuid();
lastTs = U.currentTimeMillis();
msgs.add(new OrderedMessageContainer(msg, lastTs, msgC, MTC.span()));
}
/** {@inheritDoc} */
@Override public IgniteUuid timeoutId() {
return timeoutId;
}
/** {@inheritDoc} */
@Override public long endTime() {
return endTime;
}
/** {@inheritDoc} */
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override public void onTimeout() {
GridMessageListener lsnr = listenerGet0(topic);
if (lsnr != null) {
long delta = 0;
if (skipOnTimeout) {
while (true) {
delta = 0;
boolean unwind = false;
synchronized (this) {
if (!msgs.isEmpty()) {
delta = U.currentTimeMillis() - lastTs;
if (delta >= timeout)
unwind = true;
}
}
if (unwind)
unwindMessageSet(this, lsnr);
else
break;
}
}
// Someone is still listening to messages, so delay set removal.
endTime = endTime(timeout - delta);
ctx.timeout().addTimeoutObject(this);
return;
}
if (log.isDebugEnabled())
log.debug("Removing message set due to timeout: " + this);
ConcurrentMap<UUID, GridCommunicationMessageSet> map = msgSetMap.get(topic);
if (map != null) {
boolean rmv;
synchronized (map) {
rmv = map.remove(nodeId, this) && map.isEmpty();
}
if (rmv)
msgSetMap.remove(topic, map);
}
}
/**
* @return ID of node that sent the messages in the set.
*/
UUID nodeId() {
return nodeId;
}
/**
* @return Communication policy.
*/
byte policy() {
return plc;
}
/**
* @return Message topic.
*/
Object topic() {
return topic;
}
/**
* @return {@code True} if successful.
*/
boolean reserve() {
return reserved.compareAndSet(false, true);
}
/**
* @return {@code True} if set is reserved.
*/
boolean reserved() {
return reserved.get();
}
/**
* Releases reservation.
*/
void release() {
assert reserved.get() : "Message set was not reserved: " + this;
reserved.set(false);
}
/**
* @param lsnr Listener to notify.
*/
void unwind(GridMessageListener lsnr) {
assert reserved.get();
for (OrderedMessageContainer mc = msgs.poll(); mc != null; mc = msgs.poll()) {
try (TraceSurroundings ignore = support(ctx.tracing().create(
COMMUNICATION_ORDERED_PROCESS, mc.parentSpan))) {
try {
OrderedMessageContainer fmc = mc;
MTC.span().addTag(SpanTags.MESSAGE, () -> traceName(fmc.message));
invokeListener(plc, lsnr, nodeId, mc.message.message(), secCtx(mc.message));
}
finally {
if (mc.closure != null)
mc.closure.run();
}
}
}
}
/**
* @param msg Message to add.
* @param msgC Message closure (may be {@code null}).
*/
void add(
GridIoMessage msg,
@Nullable IgniteRunnable msgC
) {
msgs.add(new OrderedMessageContainer(msg, U.currentTimeMillis(), msgC, MTC.span()));
}
/**
* @return {@code True} if set has messages to unwind.
*/
boolean changed() {
return !msgs.isEmpty();
}
/**
* Calculates end time with overflow check.
*
* @param timeout Timeout in milliseconds.
* @return End time in milliseconds.
*/
private long endTime(long timeout) {
long endTime = U.currentTimeMillis() + timeout;
// Account for overflow.
if (endTime < 0)
endTime = Long.MAX_VALUE;
return endTime;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(GridCommunicationMessageSet.class, this);
}
}
/**
* DTO for handling of communication message.
*/
private static class OrderedMessageContainer {
/** */
GridIoMessage message;
/** */
long addedTime;
/** */
IgniteRunnable closure;
/** */
Span parentSpan;
/**
*
* @param msg Received message.
* @param addedTime Time of added to queue.
* @param c Message closure.
* @param parentSpan Span of process which added this message.
*/
private OrderedMessageContainer(GridIoMessage msg, Long addedTime, IgniteRunnable c, Span parentSpan) {
this.message = msg;
this.addedTime = addedTime;
this.closure = c;
this.parentSpan = parentSpan;
}
}
/**
*
*/
private static class ConcurrentHashMap0<K, V> extends ConcurrentHashMap<K, V> {
/** */
private static final long serialVersionUID = 0L;
/** */
private int hash;
/**
* @param o Object to be compared for equality with this map.
* @return {@code True} only for {@code this}.
*/
@Override public boolean equals(Object o) {
return o == this;
}
/**
* @return Identity hash code.
*/
@Override public int hashCode() {
if (hash == 0) {
int hash0 = System.identityHashCode(this);
hash = hash0 != 0 ? hash0 : -1;
}
return hash;
}
}
/**
*
*/
private static class DelayedMessage {
/** */
private final UUID nodeId;
/** */
private final GridIoMessage msg;
/** */
private final IgniteRunnable msgC;
/**
* @param nodeId Node ID.
* @param msg Message.
* @param msgC Callback.
*/
private DelayedMessage(UUID nodeId, GridIoMessage msg, IgniteRunnable msgC) {
this.nodeId = nodeId;
this.msg = msg;
this.msgC = msgC;
}
/**
* @return Message char.
*/
public IgniteRunnable callback() {
return msgC;
}
/**
* @return Message.
*/
public GridIoMessage message() {
return msg;
}
/**
* @return Node id.
*/
public UUID nodeId() {
return nodeId;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(DelayedMessage.class, this, super.toString());
}
}
/**
*
*/
private class IoTestFuture extends GridFutureAdapter<List<IgniteIoTestMessage>> {
/** */
private final long id;
/** */
private final int cntr;
/** */
private final List<IgniteIoTestMessage> ress;
/**
* @param id ID.
* @param cntr Counter.
*/
IoTestFuture(long id, int cntr) {
assert cntr > 0 : cntr;
this.id = id;
this.cntr = cntr;
ress = new ArrayList<>(cntr);
}
/**
*
*/
void onResponse(IgniteIoTestMessage res) {
boolean complete;
synchronized (this) {
ress.add(res);
complete = cntr == ress.size();
}
if (complete)
onDone(ress);
}
/** {@inheritDoc} */
@Override public boolean onDone(List<IgniteIoTestMessage> res, @Nullable Throwable err) {
if (super.onDone(res, err)) {
ioTestMap().remove(id);
return true;
}
return false;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(IoTestFuture.class, this);
}
}
/**
*
*/
private static class IoTestThreadLocalNodeResults {
/** */
private final long[] resLatency;
/** */
private final int rangesCnt;
/** */
private long totalLatency;
/** */
private long maxLatency;
/** */
private long maxLatencyTs;
/** */
private long maxReqSendQueueTime;
/** */
private long maxReqSendQueueTimeTs;
/** */
private long maxReqRcvQueueTime;
/** */
private long maxReqRcvQueueTimeTs;
/** */
private long maxResSendQueueTime;
/** */
private long maxResSendQueueTimeTs;
/** */
private long maxResRcvQueueTime;
/** */
private long maxResRcvQueueTimeTs;
/** */
private long maxReqWireTimeMillis;
/** */
private long maxReqWireTimeTs;
/** */
private long maxResWireTimeMillis;
/** */
private long maxResWireTimeTs;
/** */
private final long latencyLimit;
/**
* @param rangesCnt Ranges count.
* @param latencyLimit
*/
public IoTestThreadLocalNodeResults(int rangesCnt, long latencyLimit) {
this.rangesCnt = rangesCnt;
this.latencyLimit = latencyLimit;
resLatency = new long[rangesCnt + 1];
}
/**
* @param msg
*/
public void onResult(IgniteIoTestMessage msg) {
long now = System.currentTimeMillis();
long latency = msg.responseProcessedTs() - msg.requestCreateTs();
int idx = latency >= latencyLimit ?
rangesCnt /* Timed out. */ :
(int)Math.floor((1.0 * latency) / ((1.0 * latencyLimit) / rangesCnt));
resLatency[idx]++;
totalLatency += latency;
if (maxLatency < latency) {
maxLatency = latency;
maxLatencyTs = now;
}
long reqSndQueueTime = msg.requestSendTs() - msg.requestCreateTs();
if (maxReqSendQueueTime < reqSndQueueTime) {
maxReqSendQueueTime = reqSndQueueTime;
maxReqSendQueueTimeTs = now;
}
long reqRcvQueueTime = msg.requestProcessTs() - msg.requestReceiveTs();
if (maxReqRcvQueueTime < reqRcvQueueTime) {
maxReqRcvQueueTime = reqRcvQueueTime;
maxReqRcvQueueTimeTs = now;
}
long resSndQueueTime = msg.responseSendTs() - msg.requestProcessTs();
if (maxResSendQueueTime < resSndQueueTime) {
maxResSendQueueTime = resSndQueueTime;
maxResSendQueueTimeTs = now;
}
long resRcvQueueTime = msg.responseProcessedTs() - msg.responseReceiveTs();
if (maxResRcvQueueTime < resRcvQueueTime) {
maxResRcvQueueTime = resRcvQueueTime;
maxResRcvQueueTimeTs = now;
}
long reqWireTimeMillis = msg.requestReceivedTsMillis() - msg.requestSendTsMillis();
if (maxReqWireTimeMillis < reqWireTimeMillis) {
maxReqWireTimeMillis = reqWireTimeMillis;
maxReqWireTimeTs = now;
}
long resWireTimeMillis = msg.responseReceivedTsMillis() - msg.requestSendTsMillis();
if (maxResWireTimeMillis < resWireTimeMillis) {
maxResWireTimeMillis = resWireTimeMillis;
maxResWireTimeTs = now;
}
}
}
/**
*
*/
private static class IoTestNodeResults {
/** */
private long latencyLimit;
/** */
private long[] resLatency;
/** */
private long totalLatency;
/** */
private Collection<IgnitePair<Long>> maxLatency = new ArrayList<>();
/** */
private Collection<IgnitePair<Long>> maxReqSendQueueTime = new ArrayList<>();
/** */
private Collection<IgnitePair<Long>> maxReqRcvQueueTime = new ArrayList<>();
/** */
private Collection<IgnitePair<Long>> maxResSendQueueTime = new ArrayList<>();
/** */
private Collection<IgnitePair<Long>> maxResRcvQueueTime = new ArrayList<>();
/** */
private Collection<IgnitePair<Long>> maxReqWireTimeMillis = new ArrayList<>();
/** */
private Collection<IgnitePair<Long>> maxResWireTimeMillis = new ArrayList<>();
/**
* @param res Node results to add.
*/
public void add(IoTestThreadLocalNodeResults res) {
if (resLatency == null) {
resLatency = res.resLatency.clone();
latencyLimit = res.latencyLimit;
}
else {
assert latencyLimit == res.latencyLimit;
assert resLatency.length == res.resLatency.length;
for (int i = 0; i < resLatency.length; i++)
resLatency[i] += res.resLatency[i];
}
totalLatency += res.totalLatency;
maxLatency.add(F.pair(res.maxLatency, res.maxLatencyTs));
maxReqSendQueueTime.add(F.pair(res.maxReqSendQueueTime, res.maxReqSendQueueTimeTs));
maxReqRcvQueueTime.add(F.pair(res.maxReqRcvQueueTime, res.maxReqRcvQueueTimeTs));
maxResSendQueueTime.add(F.pair(res.maxResSendQueueTime, res.maxResSendQueueTimeTs));
maxResRcvQueueTime.add(F.pair(res.maxResRcvQueueTime, res.maxResRcvQueueTimeTs));
maxReqWireTimeMillis.add(F.pair(res.maxReqWireTimeMillis, res.maxReqWireTimeTs));
maxResWireTimeMillis.add(F.pair(res.maxResWireTimeMillis, res.maxResWireTimeTs));
}
/**
* @return Bin latency in microseconds.
*/
public long binLatencyMcs() {
if (resLatency == null)
throw new IllegalStateException();
return latencyLimit / (1000 * (resLatency.length - 1));
}
}
/**
* @param msg Communication message.
* @return A pair that represents a security subject id and security context. The returned value is {@code null}
* if security is not enabled.
*/
private @Nullable T2<UUID, SecurityContext> secCtx(GridIoMessage msg) {
if (ctx.security().enabled() && msg instanceof GridIoSecurityAwareMessage) {
GridIoSecurityAwareMessage secMsg = (GridIoSecurityAwareMessage)msg;
SecurityContext secCtx = null;
try {
secCtx = secMsg.secCtx() != null ?
U.unmarshal(marsh, secMsg.secCtx(), U.resolveClassLoader(ctx.config())) : null;
}
catch (IgniteCheckedException e) {
log.error("Security context unmarshaled with error.", e);
}
return new T2<>(
secMsg.secSubjId(),
secCtx
);
}
return null;
}
/**
* Responsible for handling network situation where server cannot open connection to client and
* has to ask client to establish a connection to specific server.
*
* This includes the following steps:
* <ol>
* <li>
* Server tries to send regular communication message to unreachable client,
* detects that client is unreachagle and directs special discovery message to it.
* After that it wait for client to reply.
* </li>
* <li>
* Client receives discovery message and sends special communication message in response.
* This action opens communication channel between client and server that can be used by both sides.
* </li>
* <li>
* Server on receiving comm message sends original communication message to the client.
* </li>
* </ol>
*/
private final class TcpCommunicationInverseConnectionHandler implements ConnectionRequestor {
/**
* Executor service to send special communication message.
*/
private ExecutorService responseSendService = Executors
.newCachedThreadPool(new IgniteThreadFactory(ctx.igniteInstanceName(), "io-send-service"));
/**
* Discovery event listener (works only on client nodes for now) notified when
* inverse connection request arrives.
*/
private CustomEventListener<TcpConnectionRequestDiscoveryMessage> discoConnReqLsnr = (topVer, snd, msg) -> {
if (!locNodeId.equals(msg.receiverNodeId()))
return;
int connIdx = msg.connectionIndex();
if (log.isInfoEnabled())
log.info("Received inverse communication request from " + snd + " for connection index " + connIdx);
TcpCommunicationSpi tcpCommSpi = getTcpCommunicationSpi();
assert !isPairedConnection(snd, tcpCommSpi);
responseSendService.submit(() -> {
try {
send(snd,
TOPIC_COMM_SYSTEM,
TOPIC_COMM_SYSTEM.ordinal(),
new TcpInverseConnectionResponseMessage(connIdx),
SYSTEM_POOL,
false,
0,
false,
null,
false
);
}
catch (IgniteCheckedException e) {
log.error("Failed to send response to inverse communication connection request from node: " + snd.id(), e);
}
});
};
/** */
public void onStart() {
if (ctx.clientNode())
ctx.discovery().setCustomEventListener(TcpConnectionRequestDiscoveryMessage.class, invConnHandler.discoConnReqLsnr);
addMessageListener(TOPIC_COMM_SYSTEM, (nodeId, msg, plc) -> {
if (msg instanceof TcpInverseConnectionResponseMessage) {
if (log.isInfoEnabled())
log.info("Response for inverse connection received from node " + nodeId +
", connection index is " + ((TcpInverseConnectionResponseMessage)msg).connectionIndex());
}
});
}
/**
* Executes inverse connection protocol by sending discovery request and then waiting on future
* completed when response arrives or timeout is reached.
*
* @param node Unreachable node.
* @param connIdx Connection index.
*/
@Override public void request(ClusterNode node, int connIdx) {
TcpCommunicationSpi tcpCommSpi = getTcpCommunicationSpi();
if (isPairedConnection(node, tcpCommSpi))
throw new IgniteSpiException("Inverse connection protocol doesn't support paired connections");
try {
if (log.isInfoEnabled())
log.info("TCP connection failed, node " + node.id() + " is unreachable," +
" will attempt to request inverse connection via discovery SPI.");
TcpConnectionRequestDiscoveryMessage msg = new TcpConnectionRequestDiscoveryMessage(
node.id(), connIdx
);
ctx.discovery().sendCustomEvent(msg);
}
catch (IgniteCheckedException ex) {
throw new IgniteSpiException(ex);
}
}
/** */
public void onStop() {
U.shutdownNow(
TcpCommunicationInverseConnectionHandler.class,
responseSendService,
log
);
}
}
}
|
<filename>core/src/main/java/org/brunel/action/ActionStep.java
/*
* Copyright (c) 2015 IBM Corporation and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.brunel.action;
import org.brunel.action.parse.ParseGrammar;
import org.brunel.data.Data;
import org.brunel.model.VisException;
import org.brunel.model.VisSingle;
import org.brunel.model.VisTypes;
import java.util.Arrays;
import java.util.Set;
class ActionStep {
// Use the parser to retrieve the list of summary and transform methods
private static final Set<String> SUMMARY_METHODS = ParseGrammar.instance().getSummaryMethods();
private static final Set<String> TRANSFORM_METHODS = ParseGrammar.instance().getTransformMethods();
final String name;
final Param[] parameters;
ActionStep(String actionName, Param... params) {
name = actionName.toLowerCase();
this.parameters = params;
}
public int hashCode() {
return 31 * name.hashCode() + Arrays.hashCode(parameters);
}
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (!(obj instanceof ActionStep)) return false;
ActionStep other = (ActionStep) obj;
return name.equals(other.name) && Arrays.equals(parameters, other.parameters);
}
public String toString() {
if (parameters.length == 0) return name;
return name + '(' + Data.join(parameters) + ')';
}
VisSingle apply(VisSingle item) {
try {
if (name.equals("x")) {
item.x(parameters);
return item;
} else if (name.equals("y")) {
item.y(parameters);
return item;
} else if (name.equals("yrange")) {
if (parameters.length < 1 || parameters.length > 2)
throw new IllegalArgumentException("yrange requires 1-2 fields");
if (parameters.length == 1)
item.yrange(parameters[0], parameters[0]);
else
item.yrange(parameters[0], parameters[1]);
return item;
} else if (name.equals("color")) {
item.color(parameters);
return item;
} else if (name.equals("opacity")) {
item.opacity(parameters);
return item;
} else if (name.equals("size")) {
item.size(parameters);
return item;
} else if (name.equals("split")) {
item.split(parameters);
return item;
} else if (name.equals("label")) {
item.label(parameters);
return item;
} else if (name.equals("tooltip")) {
item.tooltip(parameters);
return item;
} else if (TRANSFORM_METHODS.contains(name)) {
item.transform(name, parameters);
return item;
} else if (SUMMARY_METHODS.contains(name)) {
item.summarize(name, parameters);
return item;
} else if (name.equals("sort")) {
item.sort(parameters);
return item;
} else if (name.equals("at")) {
item.at(parameters);
return item;
} else if (name.equals("filter")) {
item.filter(parameters);
return item;
} else if (name.equals("key")) {
item.key(parameters);
return item;
} else if (name.equals("style")) {
item.style(oneParam());
return item;
} else if (name.equals("axes")) {
item.axes(oneParam());
return item;
} else if (name.equals("legends")) {
item.legends(oneParam());
return item;
} else if (name.equals("interaction")) {
item.interaction(parameters);
return item;
} else if (name.equals("transpose")) {
item.transpose();
return item;
} else if (name.equals("polar")) {
item.polar();
return item;
} else if (name.equals("stack")) {
item.stack();
return item;
} else if (name.equals("using")) {
item.using(oneParam());
return item;
} else if (name.equals("flipx")) {
item.flipx();
return item;
} else if (name.equals("flip")) {
item.flip();
return item;
} else if (name.equals("data")) {
item.data(parameters);
return item;
}
for (VisTypes.Element t : VisTypes.Element.values())
if (t.name().equals(name)) {
item.element(t);
return item;
}
for (VisTypes.Diagram t : VisTypes.Diagram.values())
if (t.name().equals(name)) {
item.diagram(t);
return item;
}
throw new IllegalArgumentException("Cannot apply '" + name + "' to this item");
} catch (Exception ex) {
throw VisException.makeApplying(ex, this);
}
}
private Param oneParam() {
return parameters.length > 0 ? parameters[parameters.length - 1] : null;
}
}
|
#!/usr/bin/env bash
# build and deploy nginx
set -e
# include global vars and functions repository
source .docker/functions.sh
source src/.env # get configuration file
projectUrl=""
# build and deploy nginx
echo "${BLU}Build the ${BLD}nginx${RST} ${BLU}container${RST}"
replaceAllInFile .docker/deploy/docker-compose.yml project $PROJECT_NAME
httpProtocol='http'
# HTTP
nginxHttpHostPort=80
for port in $HTTP_PORTS_LIST
do
if [ $(nc -z 127.0.0.1 $port && echo "USE" || echo "FREE") == 'FREE' ]
then
nginxHttpHostPort=$port
break
fi
done
replaceAllInFile .docker/deploy/docker-compose.yml "host80" "$nginxHttpHostPort:80"
echo "${GRN}HTTP settings have been made successfully.${RST}"
# HTTPS
while true; do
read -rp "Do you want to implement HTTPS access? ${RED}[y/N]${RST}: " yn
case $yn in
[Yy]* )
httpProtocol='https'
nginxHttpsHostPort=463
for port in $HTTPS_PORTS_LIST
do
if [ $(nc -z 127.0.0.1 $port && echo "USE" || echo "FREE") == 'FREE' ]
then
nginxHttpsHostPort=$port
break
fi
done
replaceAllInFile .docker/deploy/docker-compose.yml "host443" "$nginxHttpsHostPort:443"
replaceAllInFile .docker/deploy/docker-compose.yml nginxConf apps.conf
projectUrl="Project URL: https://$PROJECT_URL"
echo "${BLU}Generate self-signed SSL Certificate for 365days${RST}"
replaceAllInFile .docker/deploy/docker-compose.yml localhost $PROJECT_URL
openssl req -subj "/O=$PROJECT_NAME/CN=$PROJECT_URL" -addext "subjectAltName=DNS:$PROJECT_URL,DNS:www.$PROJECT_URL" -x509 -newkey rsa:4096 -nodes -keyout .docker/build/cert/$PROJECT_URL.key -out .docker/build/cert/$PROJECT_URL.pem -days 365
echo "${GRN}HTTPS settings have been made successfully.${RST}"
break;;
[Nn]* )
sed -i '/"host443"/d' .docker/deploy/docker-compose.yml
sed -i '/localhost./d' .docker/deploy/docker-compose.yml
replaceAllInFile .docker/deploy/docker-compose.yml nginxConf app.conf
projectUrl="Project URL: http://$PROJECT_URL"
if [ $nginxHttpHostPort -ne 80 ]
then
projectUrl+=":$nginxHttpHostPort"
fi
break;;
* )
sed -i '/"host443"/d' .docker/deploy/docker-compose.yml
sed -i '/localhost./d' .docker/deploy/docker-compose.yml
replaceAllInFile .docker/deploy/docker-compose.yml nginxConf app.conf
projectUrl="Project URL: http://$PROJECT_URL"
if [ $nginxHttpHostPort -ne 80 ]
then
projectUrl+=":$nginxHttpHostPort"
fi
break;;
esac
done
# etc/hosts
if ! grep -Fxq "127.0.0.1 $PROJECT_URL" /etc/hosts
then
while true; do
read -rp "Do you want to add the project URL($PROJECT_URL) in etc/hosts? ${RED}[y/N]${RST}: " yn
case $yn in
[Yy]* )
echo ${GRN}
printf '\n%s\n%s' "# Project $PROJECT_NAME" "127.0.0.1 $PROJECT_URL" | sudo tee -a /etc/hosts
echo ${RST}
replaceAllInFile .docker/build/nginx/conf.d/app.conf localhost $PROJECT_URL
if [ ${httpProtocol} == 'https' ]
then
replaceAllInFile .docker/build/nginx/conf.d/apps.conf localhost $PROJECT_URL
fi
echo "${GRN}Project URL($PROJECT_URL) have been add successfully.${RST}"
break;;
[Nn]* ) break;;
* ) break;;
esac
done
fi
printf '\n%s\n' "${GRN}Nginx build and deploy have been made successfully.${RST}" |
<reponame>Jarvie8176/valheim-ec2-dashboard
import { Fragment, FunctionComponent } from "react";
import { ServerStatusDto } from "./serverStatus.dto";
import { Grid } from "@material-ui/core";
import { InfoCard } from "./infoCard.ui";
import { isNil } from "lodash";
type Props = {
serverStatus: ServerStatusDto;
};
export const ServerStatusUi: FunctionComponent<Props> = (props) => {
const activePlayerCount = props.serverStatus.server?.players;
const maxPlayerCount = props.serverStatus.server?.max_players;
const playerCountMessage = !isNil(activePlayerCount) ? `${activePlayerCount}/${maxPlayerCount}` : "N/A";
const syncTimestamp = props.serverStatus.syncTimestamp?.toString() || "N/A";
const hostStatusMessage = props.serverStatus.host?.status || "N/A";
const serverStatusMessage = props.serverStatus.server ? "Running" : "Stopped";
console.log(props.serverStatus);
return (
<Fragment>
<Grid item>
<InfoCard title={"Host Status"} content={hostStatusMessage} />
</Grid>
<Grid item>
<InfoCard title={"Server Status"} content={serverStatusMessage} />
</Grid>
<Grid item>
<InfoCard title={"Players Online"} content={playerCountMessage} />
</Grid>
<Grid item>
<InfoCard title={"Synced At"} content={syncTimestamp} />
</Grid>
</Fragment>
);
};
|
/*
* Copyright (C) 2013 Parallel Universe Software Co.
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package co.paralleluniverse.spaceships.render;
import com.jogamp.common.nio.Buffers;
import com.jogamp.opengl.util.GLBuffers;
import com.jogamp.opengl.util.glsl.ShaderProgram;
import java.nio.ByteBuffer;
import java.nio.DoubleBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.util.BitSet;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import javax.media.opengl.GL3;
import javax.media.opengl.GLException;
/**
* See:
* http://www.lighthouse3d.com/tutorials/glsl-core-tutorial/3490-2/
* http://www.arcsynthesis.org/gltut/Positioning/Tut07%20Shared%20Uniforms.html
* http://www.jotschi.de/?p=427
*
* @author pron
*/
public class UBO {
private final AtomicInteger BINDING_POINT_GEN = new AtomicInteger(0);
private final ShaderProgram shader;
private final String blockName;
private final int ubo;
private final int bindingPoint;
private final ByteBuffer buffer;
private final Map<String, Info> attributes = Collections.synchronizedMap(new HashMap<String, Info>());
public UBO(GL3 gl, ShaderProgram shader, String blockName) {
this.shader = shader;
int[] tmp = new int[1];
gl.glGenBuffers(1, tmp, 0);
this.ubo = tmp[0];
this.blockName = blockName;
final int blockIndex = gl.glGetUniformBlockIndex(shader.program(), blockName);
if(blockIndex < 0)
throw new GLException("Uniform block " + blockName + " not found in program " + shader.program());
gl.glGetActiveUniformBlockiv(shader.program(), blockIndex, gl.GL_UNIFORM_BLOCK_DATA_SIZE, tmp, 0);
final int blockSize = tmp[0];
gl.glBindBuffer(gl.GL_UNIFORM_BUFFER, ubo);
gl.glBufferData(gl.GL_UNIFORM_BUFFER, blockSize, null, gl.GL_DYNAMIC_DRAW);
this.buffer = GLBuffers.newDirectByteBuffer(blockSize);
buffer.limit(blockSize);
this.bindingPoint = BINDING_POINT_GEN.getAndIncrement();
gl.glBindBufferBase(gl.GL_UNIFORM_BUFFER, bindingPoint, ubo);
attachProgram(gl, shader, blockName);
}
public int getBindingPoint() {
return bindingPoint;
}
public final void attachProgram(GL3 gl, ShaderProgram shader, String blockName) {
final int blockIndex = gl.glGetUniformBlockIndex(shader.program(), blockName);
gl.glUniformBlockBinding(shader.program(), blockIndex, bindingPoint);
}
public UBO bind(GL3 gl) {
gl.glBindBuffer(gl.GL_UNIFORM_BUFFER, ubo);
return this;
}
public void unbind(GL3 gl) {
gl.glBindBuffer(gl.GL_UNIFORM_BUFFER, 0);
}
private Info getInfo(GL3 gl, String uniformName) {
Info info = attributes.get(uniformName);
if (info == null) {
int[] tmp = new int[1];
gl.glGetUniformIndices(shader.program(), 1, new String[]{blockName + "." + uniformName}, tmp, 0);
final int index = tmp[0];
if (index < 0)
throw new GLException("Uniform " + blockName + "." + uniformName + " not found");
final int[] indices = new int[]{index};
gl.glGetActiveUniformsiv(shader.program(), 1, indices, 0, gl.GL_UNIFORM_OFFSET, tmp, 0);
final int offset = tmp[0];
gl.glGetActiveUniformsiv(shader.program(), 1, indices, 0, gl.GL_UNIFORM_TYPE, tmp, 0);
final int type = tmp[0];
gl.glGetActiveUniformsiv(shader.program(), 1, indices, 0, gl.GL_UNIFORM_SIZE, tmp, 0);
final int size = tmp[0];
info = new Info(offset, type, size);
attributes.put(uniformName, info);
}
return info;
}
private void write(GL3 gl, Info info) {
final int size = info.size * BO.sizeOfGLType(info.type);
gl.glBufferSubData(gl.GL_UNIFORM_BUFFER, info.offset, size, Buffers.slice(buffer, info.offset, size));
}
private Info verifySize(GL3 gl, String uniformName, int size) {
final Info info = getInfo(gl, uniformName);
final int requiredSize = info.size * BO.sizeOfGLType(info.type);
if(requiredSize != size)
throw new GLException("Trying to write " + size + " bytes into uniform " + uniformName + " which is of size " + requiredSize);
return info;
}
public void set(GL3 gl, String attributeName, int value) {
final Info info = verifySize(gl, attributeName, Buffers.SIZEOF_INT);
buffer.position(info.offset);
buffer.putInt(value);
buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, int v0, int v1) {
final Info info = verifySize(gl, attributeName, 2 * Buffers.SIZEOF_INT);
buffer.position(info.offset);
buffer.putInt(v0);
buffer.putInt(v1);
buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, int v0, int v1, int v2) {
final Info info = verifySize(gl, attributeName, 3 * Buffers.SIZEOF_INT);
buffer.position(info.offset);
buffer.putInt(v0);
buffer.putInt(v1);
buffer.putInt(v2);
buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, int v0, int v1, int v2, int v3) {
final Info info = verifySize(gl, attributeName, 4 * Buffers.SIZEOF_INT);
buffer.position(info.offset);
buffer.putInt(v0);
buffer.putInt(v1);
buffer.putInt(v2);
buffer.putInt(v3);
buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, float value) {
final Info info = verifySize(gl, attributeName, Buffers.SIZEOF_FLOAT);
buffer.position(info.offset);
buffer.putFloat(value);
buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, float v0, float v1) {
final Info info = verifySize(gl, attributeName, 2 * Buffers.SIZEOF_FLOAT);
buffer.position(info.offset);
buffer.putFloat(v0);
buffer.putFloat(v1);
buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, float v0, float v1, float v2) {
final Info info = verifySize(gl, attributeName, 3 * Buffers.SIZEOF_FLOAT);
buffer.position(info.offset);
buffer.putFloat(v0);
buffer.putFloat(v1);
buffer.putFloat(v2);
buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, float v0, float v1, float v2, float v3) {
final Info info = verifySize(gl, attributeName, 4 * Buffers.SIZEOF_FLOAT);
buffer.position(info.offset);
buffer.putFloat(v0);
buffer.putFloat(v1);
buffer.putFloat(v2);
buffer.putFloat(v3);
buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, int components, IntBuffer buffer) {
final Info info = verifySize(gl, attributeName, buffer.remaining() * Buffers.SIZEOF_INT);
this.buffer.position(info.offset);
final int pos = buffer.position();
this.buffer.asIntBuffer().put(buffer);
buffer.position(pos);
this.buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, int components, FloatBuffer buffer) {
final Info info = verifySize(gl, attributeName, buffer.remaining() * Buffers.SIZEOF_INT);
this.buffer.position(info.offset);
final int pos = buffer.position();
this.buffer.asFloatBuffer().put(buffer);
buffer.position(pos);
this.buffer.rewind();
write(gl, info);
}
public void set(GL3 gl, String attributeName, int rows, int columns, FloatBuffer buffer) {
final Info info = verifySize(gl, attributeName, buffer.remaining() * Buffers.SIZEOF_INT);
this.buffer.position(info.offset);
final int pos = buffer.position();
this.buffer.asFloatBuffer().put(buffer);
buffer.position(pos);
this.buffer.rewind();
write(gl, info);
}
public void destroy(GL3 gl) {
gl.glBindBufferBase(gl.GL_UNIFORM_BUFFER, bindingPoint, 0);
gl.glBindBuffer(gl.GL_UNIFORM_BUFFER, ubo);
gl.glDeleteBuffers(1, new int[]{ubo}, 0);
gl.glBindBuffer(gl.GL_UNIFORM_BUFFER, 0);
}
private static class Info {
final int offset;
final int type;
final int size;
public Info(int offset, int type, int size) {
this.offset = offset;
this.type = type;
this.size = size;
}
}
}
|
def average(a, b, c, d):
return (a + b + c + d)/4
print(average(10, 3, 12, 4)) |
import re
from ecs_logging import ECS_VERSION
def test_ecs_version_format():
assert re.match(r"[0-9](?:[.0-9]*[0-9])?", ECS_VERSION)
|
<reponame>forci/menu-builder-client-bundle
/*
* Witter for jQuery
*
* Copyright (c) 2014 <NAME>
* Dual licensed under the MIT and GPL licenses.
*
* A Gritter for jQuery ripoff - http://www.boedesign.com/
*
* Copyright (c) 2012 <NAME>
* Dual licensed under the MIT and GPL licenses.
*/
;(function ($) {
$.witter = function (options) {
try {
return new Witter(options || {});
} catch (e) {
var err = 'Witter Error: ' + e;
(typeof(console) != 'undefined' && console.error) ? console.error(err, options) : alert(err);
}
};
$.witter.defaults = {
title: '',
position: 'top-right',
theme: 'dark',
fade: {
in: {
speed: 'medium',
easing: ''
},
out: {
speed: 1000,
easing: ''
}
},
close_selector: '.witter-close',
time: 6000,
image: '',
sticky: false,
restore: true,
callbacks: {
before_open: function () {
//
},
after_open: function () {
//
},
/**
*
* @param options - An object of options passed to the fade method
* @param is_forced - Indicates whether it was closed by clicking on the close button
*/
fade: function (is_forced, options) {
//
},
before_close: function () {
//
},
after_close: function () {
//
},
html: function () {
//
}
},
templates: {
close: '<a class="witter-close" href="#" tabindex="1"><i class="fa fa-times"></i></a>',
simple: '<div class="witter-item-simple">{close}<div class="image">{image}</div><div class="witter-title">{title}</div><p class="witter-text">{text}</p></div>',
item: '<div id="witter-item-{id}" class="witter-item-wrapper {theme} html" style="display:none" role="alert"><div class="witter-item">{html}<div style="clear:both"></div></div></div>',
wrapper: '<div id="witter-wrappers"></div>'
}
};
$.witter.parse = function (template, data) {
return template.replace(/\{([\w\.]*)\}/g, function (str, key) {
var keys = key.split("."), v = data[keys.shift()];
for (var i = 0, l = keys.length; i < l; i++) v = v[keys[i]];
return (typeof v !== "undefined" && v !== null) ? v : "";
});
};
$.witter.registry = {
count: 0,
instances: {},
active: [],
/**
* Increments instances count, then uses it as the instance ID and returns it
* @param instance
* @returns number
*/
add: function (instance) {
var id = ++this.count;
this.instances[id] = instance;
this.setActive(id);
return id;
},
get: function (instance) {
if (instance instanceof Witter) {
return instance;
}
return this.instances[instance];
},
setActive: function (id) {
this.active.push(id);
},
setInactive: function (id) {
this.active = $.grep(this.active, function (value) {
return value != id;
});
},
getActiveIds: function () {
return this.active;
}
};
/**
* Remove a witter notification from the screen
*/
$.witter.remove = function (instance, options) {
$.witter.registry.get(instance).remove(options);
};
/**
* Remove a witter notification from the screen instantly
*/
$.witter.removeNow = function (instance) {
$.witter.registry.get(instance).removeElement();
};
/**
* Remove all notifications
*/
$.witter.removeAll = function (options) {
var ids = $.witter.registry.getActiveIds();
$.each(ids, function (index, instanceId) {
$.witter.registry.get(instanceId).fade(true, options);
});
};
/**
* Remove all notifications instantly
*/
$.witter.removeAllNow = function () {
var ids = $.witter.registry.getActiveIds();
$.each(ids, function (index, instanceId) {
$.witter.registry.get(instanceId).removeElement();
});
};
$(function () {
$.witter.wrappers = $('#witter-wrappers');
if ($.witter.wrappers.length == 0) {
$('body').append($.witter.defaults.templates.wrapper);
$.witter.wrappers = $($.witter.wrappers.selector);
$(['top-right', 'bottom-right', 'bottom-left', 'top-left', 'top', 'bottom']).each(function (index, className) {
var div = $('<div/>').addClass('wrapper').addClass(className).appendTo($.witter.wrappers);
});
}
});
var Witter = function (options) {
if (!(this instanceof arguments.callee)) {
return new arguments.callee(options);
}
if (typeof(options) == 'string') {
options = {text: options};
}
if (options.text === null) {
throw 'You must supply "text" parameter.';
}
options = $.extend(true, {}, $.witter.defaults, options);
this.options = options;
var instance = this;
var id = $.witter.registry.add(this);
this.id = id;
this.callbacks = options.callbacks;
var html;
if (options.html) {
html = options.html;
} else {
var image_str = options.image ? '<img src="' + options.image + '" class="witter-image" />' : '';
html = $.witter.parse(options.templates.simple, {
close: options.templates.close,
image: image_str,
title: options.title,
text: options.text
});
}
var itemTemplate = $.witter.parse(options.templates.item, {
html: html,
id: id,
theme: options.theme
});
if (this.callbacks.before_open.apply(this) === false) {
// if 'before_open' callback returns false - do not show at all
return this;
}
$.witter.wrappers.find('.' + options.position).append(itemTemplate);
this.element = $('#witter-item-' + id);
this.element.fadeIn({
duration: options.fade.in.speed,
easing: options.fade.in.easing,
complete: function () {
instance.callbacks.after_open.apply(instance);
}
});
if (!options.sticky) {
this.setFadeTimer();
}
$(this.element).on('mouseenter', function () {
if (!options.sticky && options.restore) {
instance.restoreItemIfFading();
}
$(this).addClass('hover');
}).on('mouseleave', function () {
if (!options.sticky && options.restore) {
instance.setFadeTimer(instance, options);
}
$(this).removeClass('hover');
});
$(this.element).on('click', options.close_selector, function (event) {
event.preventDefault();
instance.fade(options, true);
});
};
Witter.prototype.title = function (title) {
if (!title) {
return this.element.find('.witter-item-simple .witter-title').text();
}
this.element.find('.witter-item-simple .witter-title').text(title);
return this;
};
Witter.prototype.text = function (text) {
if (!text) {
return this.element.find('.witter-item-simple .witter-text').text();
}
this.element.find('.witter-item-simple .witter-text').text(text);
return this;
};
Witter.prototype.html = function (html) {
if (!html) {
return this.element.find('.witter-item').html();
}
var callback = this.callbacks.html.apply(this, [html]);
if (false === callback) {
// if 'html' callback returns false, do not change html
return;
}
if (callback) {
html = callback;
}
this.element.find('.witter-item').html(html);
return this;
};
Witter.prototype.remove = function (options) {
this.fade(true, options);
return this;
};
/**
* Set the notification to fade out after a certain amount of time
*/
Witter.prototype.setFadeTimer = function () {
var that = this;
this.fadeTimer = setTimeout(function () {
that.fade();
}, this.options.time);
return this;
};
/**
* Fade out an element after it's been on the screen for x amount of time
* @param params
* @param isForced
*/
Witter.prototype.fade = function (isForced, params) {
var opts = $.extend(true, {}, this.options, params || {});
this.callbacks.fade.apply(this, [isForced, opts]);
if (isForced) {
this.element.off('mouseenter mouseleave');
clearTimeout(this.fadeTimer);
}
var instance = this;
if (opts.fade.out.speed) {
this.element.animate({
opacity: 0
}, opts.fade.out.speed, function () {
$(this).slideUp(300, function () {
instance.removeElement();
});
});
return;
}
this.removeElement();
return this;
};
Witter.prototype.removeElement = function () {
this.callbacks.before_close.apply(this);
$(this.element).remove();
$.witter.registry.setInactive(this.id);
this.callbacks.after_close.apply(this);
return this;
};
Witter.prototype.restoreItemIfFading = function () {
clearTimeout(this.fadeTimer);
this.element.stop().css({opacity: '', height: ''});
return this;
};
})(jQuery); |
package mekanism.common.recipe.lookup.cache;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.BiPredicate;
import java.util.function.Function;
import java.util.function.Predicate;
import javax.annotation.Nullable;
import mekanism.api.recipes.MekanismRecipe;
import mekanism.api.recipes.inputs.InputIngredient;
import mekanism.common.recipe.MekanismRecipeType;
import mekanism.common.recipe.lookup.cache.type.IInputCache;
import net.minecraft.world.World;
/**
* Similar in concept to {@link DoubleInputRecipeCache} except that it requires both input types to be the same and also allows for them to be in any order.
*/
public abstract class EitherSideInputRecipeCache<INPUT, INGREDIENT extends InputIngredient<INPUT>, RECIPE extends MekanismRecipe & BiPredicate<INPUT, INPUT>,
CACHE extends IInputCache<INPUT, INGREDIENT, RECIPE>> extends AbstractInputRecipeCache<RECIPE> {
private final Set<RECIPE> complexRecipes = new HashSet<>();
private final Function<RECIPE, INGREDIENT> inputAExtractor;
private final Function<RECIPE, INGREDIENT> inputBExtractor;
private final CACHE cache;
protected EitherSideInputRecipeCache(MekanismRecipeType<RECIPE, ?> recipeType, Function<RECIPE, INGREDIENT> inputAExtractor,
Function<RECIPE, INGREDIENT> inputBExtractor, CACHE cache) {
super(recipeType);
this.inputAExtractor = inputAExtractor;
this.inputBExtractor = inputBExtractor;
this.cache = cache;
}
@Override
public void clear() {
super.clear();
cache.clear();
complexRecipes.clear();
}
/**
* Checks if there is a matching recipe that has the given input.
*
* @param world World.
* @param input Recipe input.
*
* @return {@code true} if there is a match, {@code false} if there isn't.
*/
public boolean containsInput(@Nullable World world, INPUT input) {
if (cache.isEmpty(input)) {
//Don't allow empty inputs
return false;
}
initCacheIfNeeded(world);
return cache.contains(input) || complexRecipes.stream().anyMatch(recipe -> inputAExtractor.apply(recipe).testType(input) ||
inputBExtractor.apply(recipe).testType(input));
}
/**
* Checks is there is a matching recipe with the given inputs. This method exists as a helper for insertion predicates and will return true if inputA is not empty and
* inputB is empty without doing any extra validation on inputA. This is similar to {@link DoubleInputRecipeCache#containsInputAB(World, Object, Object)} and {@link
* DoubleInputRecipeCache#containsInputBA(World, Object, Object)} except that because {@link EitherSideInputRecipeCache} assumes both inputs are the same type and
* that the order doesn't matter we just have one method and require the inputs to be passed in the corresponding order instead.
*
* @param world World.
* @param inputA Recipe input A.
* @param inputB Recipe input B.
*
* @return {@code true} if there is a match or if inputA is not empty and inputB is empty.
*
* @apiNote Pass the input you are trying to insert as inputA and the input you already have as inputB.
*/
public boolean containsInput(@Nullable World world, INPUT inputA, INPUT inputB) {
if (cache.isEmpty(inputA)) {
//Note: We don't bother checking if b is empty here as it will be verified in containsInputB
return containsInput(world, inputB);
} else if (cache.isEmpty(inputB)) {
return true;
}
initCacheIfNeeded(world);
//Note: Even though we know the cache contains input A we need to check both input A and input B
// This is because we want to ensure that we allow the inputs being in either order, but in our
// secondary validation we check inputB first as we know the recipe contains inputA as one of the
// inputs, but we want to make sure that we only mark it as valid if the same input is on both sides
// if the recipe combines two of the same type of ingredient
if (cache.contains(inputA, recipe -> {
INGREDIENT ingredientA = inputAExtractor.apply(recipe);
INGREDIENT ingredientB = inputBExtractor.apply(recipe);
return ingredientB.testType(inputB) && ingredientA.testType(inputA) || ingredientA.testType(inputB) && ingredientB.testType(inputA);
})) {
return true;
}
//Our quick lookup cache does not contain it, check any recipes where the ingredients are complex
return complexRecipes.stream().anyMatch(recipe -> {
INGREDIENT ingredientA = inputAExtractor.apply(recipe);
INGREDIENT ingredientB = inputBExtractor.apply(recipe);
return ingredientA.testType(inputA) && ingredientB.testType(inputB) || ingredientB.testType(inputA) && ingredientA.testType(inputB);
});
}
/**
* Finds the first recipe that matches the given inputs.
*
* @param world World.
* @param inputA Recipe input A.
* @param inputB Recipe input B.
*
* @return Recipe matching the given inputs, or {@code null} if no recipe matches.
*/
@Nullable
public RECIPE findFirstRecipe(@Nullable World world, INPUT inputA, INPUT inputB) {
if (cache.isEmpty(inputA) || cache.isEmpty(inputB)) {
//Don't allow empty inputs
return null;
}
initCacheIfNeeded(world);
//Note: The recipe's test method checks both directions
Predicate<RECIPE> matchPredicate = r -> r.test(inputA, inputB);
//Lookup a recipe from the input map
RECIPE recipe = cache.findFirstRecipe(inputA, matchPredicate);
// if there is no recipe, then check if any of our complex recipes match
return recipe == null ? findFirstRecipe(complexRecipes, matchPredicate) : recipe;
}
@Override
protected void initCache(List<RECIPE> recipes) {
for (RECIPE recipe : recipes) {
boolean complexA = cache.mapInputs(recipe, inputAExtractor.apply(recipe));
boolean complexB = cache.mapInputs(recipe, inputBExtractor.apply(recipe));
if (complexA || complexB) {
complexRecipes.add(recipe);
}
}
}
} |
#!/bin/bash
region=$(cat /genomics/regions_varcall.conf | tr ',' ' ')
/usr/bin/time samtools view -h -b -o /genomics/scratch/tmp.bam /mnt/Platypus-NA12878/CEUTrio.HiSeq.WGS.b37_decoy.NA12878.clean.dedup.recal.20120117.bam $region
/usr/bin/time samtools index /genomics/scratch/tmp.bam
|
#!/bin/bash
set -e
REQUIRED_ENVS=("MESATEE_PROJECT_ROOT" "MESATEE_BUILD_ROOT" "CMAKE_C_COMPILER")
for var in "${REQUIRED_ENVS[@]}"; do
[ -z "${!var}" ] && echo "Please set ${var}" && exit -1
done
# Tell gcc/clang to remap absolute src paths to make enclaves' signature more reproducible
exec "${CMAKE_C_COMPILER}" "$@" -fdebug-prefix-map=${MESATEE_PROJECT_ROOT}=/mesatee_src -fdebug-prefix-map=${MESATEE_BUILD_ROOT}=/mesatee_build
|
def sum_digits(n):
if n == 0:
return 0
return (n % 10 + sum_digits(n // 10))
n = 834
sum = sum_digits(n)
print(sum) |
<gh_stars>0
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
require 'uri'
require 'logger'
# rubocop:disable Lint/UnneededCopDisableDirective, Metrics/LineLength
module OCI
# API for managing certificates.
class CertificatesManagement::CertificatesManagementClient
# Client used to make HTTP requests.
# @return [OCI::ApiClient]
attr_reader :api_client
# Fully qualified endpoint URL
# @return [String]
attr_reader :endpoint
# The default retry configuration to apply to all operations in this service client. This can be overridden
# on a per-operation basis. The default retry configuration value is `nil`, which means that an operation
# will not perform any retries
# @return [OCI::Retry::RetryConfig]
attr_reader :retry_config
# The region, which will usually correspond to a value in {OCI::Regions::REGION_ENUM}.
# @return [String]
attr_reader :region
# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Layout/EmptyLines, Metrics/PerceivedComplexity
# Creates a new CertificatesManagementClient.
# Notes:
# If a config is not specified, then the global OCI.config will be used.
#
# This client is not thread-safe
#
# Either a region or an endpoint must be specified. If an endpoint is specified, it will be used instead of the
# region. A region may be specified in the config or via or the region parameter. If specified in both, then the
# region parameter will be used.
# @param [Config] config A Config object.
# @param [String] region A region used to determine the service endpoint. This will usually
# correspond to a value in {OCI::Regions::REGION_ENUM}, but may be an arbitrary string.
# @param [String] endpoint The fully qualified endpoint URL
# @param [OCI::BaseSigner] signer A signer implementation which can be used by this client. If this is not provided then
# a signer will be constructed via the provided config. One use case of this parameter is instance principals authentication,
# so that the instance principals signer can be provided to the client
# @param [OCI::ApiClientProxySettings] proxy_settings If your environment requires you to use a proxy server for outgoing HTTP requests
# the details for the proxy can be provided in this parameter
# @param [OCI::Retry::RetryConfig] retry_config The retry configuration for this service client. This represents the default retry configuration to
# apply across all operations. This can be overridden on a per-operation basis. The default retry configuration value is `nil`, which means that an operation
# will not perform any retries
def initialize(config: nil, region: nil, endpoint: nil, signer: nil, proxy_settings: nil, retry_config: nil)
# If the signer is an InstancePrincipalsSecurityTokenSigner or SecurityTokenSigner and no config was supplied (they are self-sufficient signers)
# then create a dummy config to pass to the ApiClient constructor. If customers wish to create a client which uses instance principals
# and has config (either populated programmatically or loaded from a file), they must construct that config themselves and then
# pass it to this constructor.
#
# If there is no signer (or the signer is not an instance principals signer) and no config was supplied, this is not valid
# so try and load the config from the default file.
config = OCI::Config.validate_and_build_config_with_signer(config, signer)
signer = OCI::Signer.config_file_auth_builder(config) if signer.nil?
@api_client = OCI::ApiClient.new(config, signer, proxy_settings: proxy_settings)
@retry_config = retry_config
if endpoint
@endpoint = endpoint + '/20210224'
else
region ||= config.region
region ||= signer.region if signer.respond_to?(:region)
self.region = region
end
logger.info "CertificatesManagementClient endpoint set to '#{@endpoint}'." if logger
end
# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Layout/EmptyLines, Metrics/PerceivedComplexity
# Set the region that will be used to determine the service endpoint.
# This will usually correspond to a value in {OCI::Regions::REGION_ENUM},
# but may be an arbitrary string.
def region=(new_region)
@region = new_region
raise 'A region must be specified.' unless @region
@endpoint = OCI::Regions.get_service_endpoint_for_template(@region, 'https://certificatesmanagement.{region}.oci.{secondLevelDomain}') + '/20210224'
logger.info "CertificatesManagementClient endpoint set to '#{@endpoint} from region #{@region}'." if logger
end
# @return [Logger] The logger for this client. May be nil.
def logger
@api_client.config.logger
end
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Cancels the scheduled deletion of the specified certificate authority (CA).
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/cancel_certificate_authority_deletion.rb.html) to see an example of how to use cancel_certificate_authority_deletion API.
def cancel_certificate_authority_deletion(certificate_authority_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#cancel_certificate_authority_deletion.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling cancel_certificate_authority_deletion." if certificate_authority_id.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
path = '/certificateAuthorities/{certificateAuthorityId}/actions/cancelDeletion'.sub('{certificateAuthorityId}', certificate_authority_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#cancel_certificate_authority_deletion') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Cancels the scheduled deletion of the specified certificate authority (CA) version. Canceling
# a scheduled deletion restores the CA version's lifecycle state to what
# it was before its scheduled deletion.
#
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [Integer] certificate_authority_version_number The version number of the certificate authority (CA).
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/cancel_certificate_authority_version_deletion.rb.html) to see an example of how to use cancel_certificate_authority_version_deletion API.
def cancel_certificate_authority_version_deletion(certificate_authority_id, certificate_authority_version_number, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#cancel_certificate_authority_version_deletion.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling cancel_certificate_authority_version_deletion." if certificate_authority_id.nil?
raise "Missing the required parameter 'certificate_authority_version_number' when calling cancel_certificate_authority_version_deletion." if certificate_authority_version_number.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
raise "Parameter value for 'certificate_authority_version_number' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_version_number)
path = '/certificateAuthorities/{certificateAuthorityId}/version/{certificateAuthorityVersionNumber}/actions/cancelDeletion'.sub('{certificateAuthorityId}', certificate_authority_id.to_s).sub('{certificateAuthorityVersionNumber}', certificate_authority_version_number.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#cancel_certificate_authority_version_deletion') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Cancels the pending deletion of the specified certificate. Canceling
# a scheduled deletion restores the certificate's lifecycle state to what
# it was before you scheduled the certificate for deletion.
#
# @param [String] certificate_id The OCID of the certificate.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/cancel_certificate_deletion.rb.html) to see an example of how to use cancel_certificate_deletion API.
def cancel_certificate_deletion(certificate_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#cancel_certificate_deletion.' if logger
raise "Missing the required parameter 'certificate_id' when calling cancel_certificate_deletion." if certificate_id.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
path = '/certificates/{certificateId}/actions/cancelDeletion'.sub('{certificateId}', certificate_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#cancel_certificate_deletion') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Cancels the scheduled deletion of the specified certificate version.
# @param [String] certificate_id The OCID of the certificate.
# @param [Integer] certificate_version_number The version number of the certificate.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/cancel_certificate_version_deletion.rb.html) to see an example of how to use cancel_certificate_version_deletion API.
def cancel_certificate_version_deletion(certificate_id, certificate_version_number, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#cancel_certificate_version_deletion.' if logger
raise "Missing the required parameter 'certificate_id' when calling cancel_certificate_version_deletion." if certificate_id.nil?
raise "Missing the required parameter 'certificate_version_number' when calling cancel_certificate_version_deletion." if certificate_version_number.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
raise "Parameter value for 'certificate_version_number' must not be blank" if OCI::Internal::Util.blank_string?(certificate_version_number)
path = '/certificates/{certificateId}/version/{certificateVersionNumber}/actions/cancelDeletion'.sub('{certificateId}', certificate_id.to_s).sub('{certificateVersionNumber}', certificate_version_number.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#cancel_certificate_version_deletion') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Moves a CA bundle to a different compartment in the same tenancy. For information about
# moving resources between compartments, see [Moving Resources to a Different Compartment](https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes).
#
# When provided, if-match is checked against the ETag values of the secret.
#
# @param [String] ca_bundle_id The OCID of the CA bundle.
# @param [OCI::CertificatesManagement::Models::ChangeCaBundleCompartmentDetails] change_ca_bundle_compartment_details The updated compartment details.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :opc_retry_token A token that uniquely identifies a request so it can be retried in case
# of a timeout or server error without risk of executing that same action
# again. Retry tokens expire after 24 hours, but can be invalidated
# before then due to conflicting operations (for example, if a resource has been
# deleted and purged from the system, then a retry of the original
# creation request may be rejected).
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/change_ca_bundle_compartment.rb.html) to see an example of how to use change_ca_bundle_compartment API.
def change_ca_bundle_compartment(ca_bundle_id, change_ca_bundle_compartment_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#change_ca_bundle_compartment.' if logger
raise "Missing the required parameter 'ca_bundle_id' when calling change_ca_bundle_compartment." if ca_bundle_id.nil?
raise "Missing the required parameter 'change_ca_bundle_compartment_details' when calling change_ca_bundle_compartment." if change_ca_bundle_compartment_details.nil?
raise "Parameter value for 'ca_bundle_id' must not be blank" if OCI::Internal::Util.blank_string?(ca_bundle_id)
path = '/caBundles/{caBundleId}/actions/changeCompartment'.sub('{caBundleId}', ca_bundle_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'opc-retry-token'] = opts[:opc_retry_token] if opts[:opc_retry_token]
# rubocop:enable Style/NegatedIf
header_params[:'opc-retry-token'] ||= OCI::Retry.generate_opc_retry_token
post_body = @api_client.object_to_http_body(change_ca_bundle_compartment_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#change_ca_bundle_compartment') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Moves a certificate authority (CA) to a different compartment within the same tenancy. For information about
# moving resources between compartments, see [Moving Resources to a Different Compartment](https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes).
#
# When provided, If-Match is checked against the ETag values of the source.
#
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [OCI::CertificatesManagement::Models::ChangeCertificateAuthorityCompartmentDetails] change_certificate_authority_compartment_details The updated compartment details
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :opc_retry_token A token that uniquely identifies a request so it can be retried in case
# of a timeout or server error without risk of executing that same action
# again. Retry tokens expire after 24 hours, but can be invalidated
# before then due to conflicting operations (for example, if a resource has been
# deleted and purged from the system, then a retry of the original
# creation request may be rejected).
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/change_certificate_authority_compartment.rb.html) to see an example of how to use change_certificate_authority_compartment API.
def change_certificate_authority_compartment(certificate_authority_id, change_certificate_authority_compartment_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#change_certificate_authority_compartment.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling change_certificate_authority_compartment." if certificate_authority_id.nil?
raise "Missing the required parameter 'change_certificate_authority_compartment_details' when calling change_certificate_authority_compartment." if change_certificate_authority_compartment_details.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
path = '/certificateAuthorities/{certificateAuthorityId}/actions/changeCompartment'.sub('{certificateAuthorityId}', certificate_authority_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'opc-retry-token'] = opts[:opc_retry_token] if opts[:opc_retry_token]
# rubocop:enable Style/NegatedIf
header_params[:'opc-retry-token'] ||= OCI::Retry.generate_opc_retry_token
post_body = @api_client.object_to_http_body(change_certificate_authority_compartment_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#change_certificate_authority_compartment') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Moves a certificate to a different compartment within the same tenancy. For information about
# moving resources between compartments, see [Moving Resources to a Different Compartment](https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes).
#
# When provided, if-match is checked against the ETag values of the secret.
#
# @param [String] certificate_id The OCID of the certificate.
# @param [OCI::CertificatesManagement::Models::ChangeCertificateCompartmentDetails] change_certificate_compartment_details The updated compartment details.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :opc_retry_token A token that uniquely identifies a request so it can be retried in case
# of a timeout or server error without risk of executing that same action
# again. Retry tokens expire after 24 hours, but can be invalidated
# before then due to conflicting operations (for example, if a resource has been
# deleted and purged from the system, then a retry of the original
# creation request may be rejected).
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/change_certificate_compartment.rb.html) to see an example of how to use change_certificate_compartment API.
def change_certificate_compartment(certificate_id, change_certificate_compartment_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#change_certificate_compartment.' if logger
raise "Missing the required parameter 'certificate_id' when calling change_certificate_compartment." if certificate_id.nil?
raise "Missing the required parameter 'change_certificate_compartment_details' when calling change_certificate_compartment." if change_certificate_compartment_details.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
path = '/certificates/{certificateId}/actions/changeCompartment'.sub('{certificateId}', certificate_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'opc-retry-token'] = opts[:opc_retry_token] if opts[:opc_retry_token]
# rubocop:enable Style/NegatedIf
header_params[:'opc-retry-token'] ||= OCI::Retry.generate_opc_retry_token
post_body = @api_client.object_to_http_body(change_certificate_compartment_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#change_certificate_compartment') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Creates a new CA bundle according to the details of the request.
# @param [OCI::CertificatesManagement::Models::CreateCaBundleDetails] create_ca_bundle_details The details of the request to create a new CA bundle.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :opc_retry_token A token that uniquely identifies a request so it can be retried in case
# of a timeout or server error without risk of executing that same action
# again. Retry tokens expire after 24 hours, but can be invalidated
# before then due to conflicting operations (for example, if a resource has been
# deleted and purged from the system, then a retry of the original
# creation request may be rejected).
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CaBundle CaBundle}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/create_ca_bundle.rb.html) to see an example of how to use create_ca_bundle API.
def create_ca_bundle(create_ca_bundle_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#create_ca_bundle.' if logger
raise "Missing the required parameter 'create_ca_bundle_details' when calling create_ca_bundle." if create_ca_bundle_details.nil?
path = '/caBundles'
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'opc-retry-token'] = opts[:opc_retry_token] if opts[:opc_retry_token]
# rubocop:enable Style/NegatedIf
header_params[:'opc-retry-token'] ||= OCI::Retry.generate_opc_retry_token
post_body = @api_client.object_to_http_body(create_ca_bundle_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#create_ca_bundle') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CaBundle'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Creates a new certificate according to the details of the request.
# @param [OCI::CertificatesManagement::Models::CreateCertificateDetails] create_certificate_details The details of the request to create a new certificate.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :opc_retry_token A token that uniquely identifies a request so it can be retried in case
# of a timeout or server error without risk of executing that same action
# again. Retry tokens expire after 24 hours, but can be invalidated
# before then due to conflicting operations (for example, if a resource has been
# deleted and purged from the system, then a retry of the original
# creation request may be rejected).
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::Certificate Certificate}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/create_certificate.rb.html) to see an example of how to use create_certificate API.
def create_certificate(create_certificate_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#create_certificate.' if logger
raise "Missing the required parameter 'create_certificate_details' when calling create_certificate." if create_certificate_details.nil?
path = '/certificates'
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'opc-retry-token'] = opts[:opc_retry_token] if opts[:opc_retry_token]
# rubocop:enable Style/NegatedIf
header_params[:'opc-retry-token'] ||= OCI::Retry.generate_opc_retry_token
post_body = @api_client.object_to_http_body(create_certificate_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#create_certificate') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::Certificate'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Creates a new certificate authority (CA) according to the details of the request.
# @param [OCI::CertificatesManagement::Models::CreateCertificateAuthorityDetails] create_certificate_authority_details The details of the request to create a new CA.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :opc_retry_token A token that uniquely identifies a request so it can be retried in case
# of a timeout or server error without risk of executing that same action
# again. Retry tokens expire after 24 hours, but can be invalidated
# before then due to conflicting operations (for example, if a resource has been
# deleted and purged from the system, then a retry of the original
# creation request may be rejected).
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateAuthority CertificateAuthority}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/create_certificate_authority.rb.html) to see an example of how to use create_certificate_authority API.
def create_certificate_authority(create_certificate_authority_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#create_certificate_authority.' if logger
raise "Missing the required parameter 'create_certificate_authority_details' when calling create_certificate_authority." if create_certificate_authority_details.nil?
path = '/certificateAuthorities'
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'opc-retry-token'] = opts[:opc_retry_token] if opts[:opc_retry_token]
# rubocop:enable Style/NegatedIf
header_params[:'opc-retry-token'] ||= OCI::Retry.generate_opc_retry_token
post_body = @api_client.object_to_http_body(create_certificate_authority_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#create_certificate_authority') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateAuthority'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Deletes the specified CA bundle.
# @param [String] ca_bundle_id The OCID of the CA bundle.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/delete_ca_bundle.rb.html) to see an example of how to use delete_ca_bundle API.
def delete_ca_bundle(ca_bundle_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#delete_ca_bundle.' if logger
raise "Missing the required parameter 'ca_bundle_id' when calling delete_ca_bundle." if ca_bundle_id.nil?
raise "Parameter value for 'ca_bundle_id' must not be blank" if OCI::Internal::Util.blank_string?(ca_bundle_id)
path = '/caBundles/{caBundleId}'.sub('{caBundleId}', ca_bundle_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#delete_ca_bundle') do
@api_client.call_api(
:DELETE,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Gets details about the specified association.
# @param [String] association_id The OCID of an association between a certificate-related resource and another Oracle Cloud Infrastructure resource.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::Association Association}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/get_association.rb.html) to see an example of how to use get_association API.
def get_association(association_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#get_association.' if logger
raise "Missing the required parameter 'association_id' when calling get_association." if association_id.nil?
raise "Parameter value for 'association_id' must not be blank" if OCI::Internal::Util.blank_string?(association_id)
path = '/associations/{associationId}'.sub('{associationId}', association_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#get_association') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::Association'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Gets details about the specified CA bundle.
# @param [String] ca_bundle_id The OCID of the CA bundle.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CaBundle CaBundle}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/get_ca_bundle.rb.html) to see an example of how to use get_ca_bundle API.
def get_ca_bundle(ca_bundle_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#get_ca_bundle.' if logger
raise "Missing the required parameter 'ca_bundle_id' when calling get_ca_bundle." if ca_bundle_id.nil?
raise "Parameter value for 'ca_bundle_id' must not be blank" if OCI::Internal::Util.blank_string?(ca_bundle_id)
path = '/caBundles/{caBundleId}'.sub('{caBundleId}', ca_bundle_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#get_ca_bundle') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CaBundle'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Gets details about the specified certificate.
# @param [String] certificate_id The OCID of the certificate.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::Certificate Certificate}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/get_certificate.rb.html) to see an example of how to use get_certificate API.
def get_certificate(certificate_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#get_certificate.' if logger
raise "Missing the required parameter 'certificate_id' when calling get_certificate." if certificate_id.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
path = '/certificates/{certificateId}'.sub('{certificateId}', certificate_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#get_certificate') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::Certificate'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Gets details about the specified certificate authority (CA).
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateAuthority CertificateAuthority}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/get_certificate_authority.rb.html) to see an example of how to use get_certificate_authority API.
def get_certificate_authority(certificate_authority_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#get_certificate_authority.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling get_certificate_authority." if certificate_authority_id.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
path = '/certificateAuthorities/{certificateAuthorityId}'.sub('{certificateAuthorityId}', certificate_authority_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#get_certificate_authority') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateAuthority'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Gets details about the specified certificate authority (CA) version.
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [Integer] certificate_authority_version_number The version number of the certificate authority (CA).
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateAuthorityVersion CertificateAuthorityVersion}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/get_certificate_authority_version.rb.html) to see an example of how to use get_certificate_authority_version API.
def get_certificate_authority_version(certificate_authority_id, certificate_authority_version_number, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#get_certificate_authority_version.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling get_certificate_authority_version." if certificate_authority_id.nil?
raise "Missing the required parameter 'certificate_authority_version_number' when calling get_certificate_authority_version." if certificate_authority_version_number.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
raise "Parameter value for 'certificate_authority_version_number' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_version_number)
path = '/certificateAuthorities/{certificateAuthorityId}/version/{certificateAuthorityVersionNumber}'.sub('{certificateAuthorityId}', certificate_authority_id.to_s).sub('{certificateAuthorityVersionNumber}', certificate_authority_version_number.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#get_certificate_authority_version') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateAuthorityVersion'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Gets details about the specified version of a certificate.
# @param [String] certificate_id The OCID of the certificate.
# @param [Integer] certificate_version_number The version number of the certificate.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateVersion CertificateVersion}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/get_certificate_version.rb.html) to see an example of how to use get_certificate_version API.
def get_certificate_version(certificate_id, certificate_version_number, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#get_certificate_version.' if logger
raise "Missing the required parameter 'certificate_id' when calling get_certificate_version." if certificate_id.nil?
raise "Missing the required parameter 'certificate_version_number' when calling get_certificate_version." if certificate_version_number.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
raise "Parameter value for 'certificate_version_number' must not be blank" if OCI::Internal::Util.blank_string?(certificate_version_number)
path = '/certificates/{certificateId}/version/{certificateVersionNumber}'.sub('{certificateId}', certificate_id.to_s).sub('{certificateVersionNumber}', certificate_version_number.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#get_certificate_version') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateVersion'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Lists all associations that match the query parameters.
# Optionally, you can use the parameter `FilterByAssociationIdQueryParam` to limit the result set to a single item that matches the specified association.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :compartment_id A filter that returns only resources that match the given compartment OCID.
# @option opts [String] :certificates_resource_id A filter that returns only resources that match the given OCID of a certificate-related resource.
# @option opts [String] :associated_resource_id A filter that returns only resources that match the given OCID of an associated Oracle Cloud Infrastructure resource.
# @option opts [String] :association_id The OCID of the association. If the parameter is set to null, the service lists all associations.
# @option opts [String] :name A filter that returns only resources that match the specified name.
# @option opts [String] :sort_by The field to sort by. You can specify only one sort order. The default order for `TIMECREATED` is descending.
# The default order for `NAME` is ascending.
#
# Allowed values are: NAME, TIMECREATED
# @option opts [String] :sort_order The sort order to use, either ascending (`ASC`) or descending (`DESC`).
#
# Allowed values are: ASC, DESC
# @option opts [Integer] :limit The maximum number of items to return in a paginated \"List\" call.
#
# @option opts [String] :page The value of the `opc-next-page` response header
# from the previous \"List\" call.
#
# @option opts [String] :association_type Type of associations to list. If the parameter is set to null, the service lists all types of associations.
# Allowed values are: CERTIFICATE, CERTIFICATE_AUTHORITY, CA_BUNDLE
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::AssociationCollection AssociationCollection}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/list_associations.rb.html) to see an example of how to use list_associations API.
def list_associations(opts = {})
logger.debug 'Calling operation CertificatesManagementClient#list_associations.' if logger
if opts[:sort_by] && !%w[NAME TIMECREATED].include?(opts[:sort_by])
raise 'Invalid value for "sort_by", must be one of NAME, TIMECREATED.'
end
if opts[:sort_order] && !%w[ASC DESC].include?(opts[:sort_order])
raise 'Invalid value for "sort_order", must be one of ASC, DESC.'
end
if opts[:association_type] && !%w[CERTIFICATE CERTIFICATE_AUTHORITY CA_BUNDLE].include?(opts[:association_type])
raise 'Invalid value for "association_type", must be one of CERTIFICATE, CERTIFICATE_AUTHORITY, CA_BUNDLE.'
end
path = '/associations'
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
query_params[:compartmentId] = opts[:compartment_id] if opts[:compartment_id]
query_params[:certificatesResourceId] = opts[:certificates_resource_id] if opts[:certificates_resource_id]
query_params[:associatedResourceId] = opts[:associated_resource_id] if opts[:associated_resource_id]
query_params[:associationId] = opts[:association_id] if opts[:association_id]
query_params[:name] = opts[:name] if opts[:name]
query_params[:sortBy] = opts[:sort_by] if opts[:sort_by]
query_params[:sortOrder] = opts[:sort_order] if opts[:sort_order]
query_params[:limit] = opts[:limit] if opts[:limit]
query_params[:page] = opts[:page] if opts[:page]
query_params[:associationType] = opts[:association_type] if opts[:association_type]
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#list_associations') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::AssociationCollection'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Lists all CA bundles that match the query parameters.
# Optionally, you can use the parameter `FilterByCaBundleIdQueryParam` to limit the result set to a single item that matches the specified CA bundle.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :compartment_id A filter that returns only resources that match the given compartment OCID.
# @option opts [String] :lifecycle_state A filter that returns only resources that match the given lifecycle state. The state value is case-insensitive.
# Allowed values are: CREATING, ACTIVE, UPDATING, DELETING, DELETED, FAILED
# @option opts [String] :name A filter that returns only resources that match the specified name.
# @option opts [String] :sort_by The field to sort by. You can specify only one sort order. The default order for `TIMECREATED` is descending.
# The default order for `NAME` is ascending.
#
# Allowed values are: NAME, TIMECREATED
# @option opts [String] :sort_order The sort order to use, either ascending (`ASC`) or descending (`DESC`).
#
# Allowed values are: ASC, DESC
# @option opts [Integer] :limit The maximum number of items to return in a paginated \"List\" call.
#
# @option opts [String] :page The value of the `opc-next-page` response header
# from the previous \"List\" call.
#
# @option opts [String] :ca_bundle_id The OCID of the CA bundle.
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CaBundleCollection CaBundleCollection}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/list_ca_bundles.rb.html) to see an example of how to use list_ca_bundles API.
def list_ca_bundles(opts = {})
logger.debug 'Calling operation CertificatesManagementClient#list_ca_bundles.' if logger
if opts[:lifecycle_state] && !%w[CREATING ACTIVE UPDATING DELETING DELETED FAILED].include?(opts[:lifecycle_state])
raise 'Invalid value for "lifecycle_state", must be one of CREATING, ACTIVE, UPDATING, DELETING, DELETED, FAILED.'
end
if opts[:sort_by] && !%w[NAME TIMECREATED].include?(opts[:sort_by])
raise 'Invalid value for "sort_by", must be one of NAME, TIMECREATED.'
end
if opts[:sort_order] && !%w[ASC DESC].include?(opts[:sort_order])
raise 'Invalid value for "sort_order", must be one of ASC, DESC.'
end
path = '/caBundles'
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
query_params[:compartmentId] = opts[:compartment_id] if opts[:compartment_id]
query_params[:lifecycleState] = opts[:lifecycle_state] if opts[:lifecycle_state]
query_params[:name] = opts[:name] if opts[:name]
query_params[:sortBy] = opts[:sort_by] if opts[:sort_by]
query_params[:sortOrder] = opts[:sort_order] if opts[:sort_order]
query_params[:limit] = opts[:limit] if opts[:limit]
query_params[:page] = opts[:page] if opts[:page]
query_params[:caBundleId] = opts[:ca_bundle_id] if opts[:ca_bundle_id]
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#list_ca_bundles') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CaBundleCollection'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Lists all certificate authorities (CAs) in the specified compartment.
# Optionally, you can use the parameter `FilterByCertificateAuthorityIdQueryParam` to limit the results to a single item that matches the specified CA.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :compartment_id A filter that returns only resources that match the given compartment OCID.
# @option opts [String] :lifecycle_state A filter that returns only resources that match the given lifecycle state. The state value is case-insensitive.
# Allowed values are: CREATING, ACTIVE, UPDATING, DELETING, DELETED, SCHEDULING_DELETION, PENDING_DELETION, CANCELLING_DELETION, FAILED
# @option opts [String] :name A filter that returns only resources that match the specified name.
# @option opts [String] :issuer_certificate_authority_id The OCID of the certificate authority (CA). If the parameter is set to null, the service lists all CAs.
# @option opts [String] :certificate_authority_id The OCID of the certificate authority (CA). If the parameter is set to null, the service lists all CAs.
# @option opts [String] :sort_by The field to sort by. You can specify only one sort order. The default
# order for `EXPIRATIONDATE` and 'TIMECREATED' is descending. The default order for `NAME`
# is ascending.
#
# Allowed values are: NAME, EXPIRATIONDATE, TIMECREATED
# @option opts [String] :sort_order The sort order to use, either ascending (`ASC`) or descending (`DESC`).
#
# Allowed values are: ASC, DESC
# @option opts [Integer] :limit The maximum number of items to return in a paginated \"List\" call.
#
# @option opts [String] :page The value of the `opc-next-page` response header
# from the previous \"List\" call.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateAuthorityCollection CertificateAuthorityCollection}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/list_certificate_authorities.rb.html) to see an example of how to use list_certificate_authorities API.
def list_certificate_authorities(opts = {})
logger.debug 'Calling operation CertificatesManagementClient#list_certificate_authorities.' if logger
if opts[:lifecycle_state] && !%w[CREATING ACTIVE UPDATING DELETING DELETED SCHEDULING_DELETION PENDING_DELETION CANCELLING_DELETION FAILED].include?(opts[:lifecycle_state])
raise 'Invalid value for "lifecycle_state", must be one of CREATING, ACTIVE, UPDATING, DELETING, DELETED, SCHEDULING_DELETION, PENDING_DELETION, CANCELLING_DELETION, FAILED.'
end
if opts[:sort_by] && !%w[NAME EXPIRATIONDATE TIMECREATED].include?(opts[:sort_by])
raise 'Invalid value for "sort_by", must be one of NAME, EXPIRATIONDATE, TIMECREATED.'
end
if opts[:sort_order] && !%w[ASC DESC].include?(opts[:sort_order])
raise 'Invalid value for "sort_order", must be one of ASC, DESC.'
end
path = '/certificateAuthorities'
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
query_params[:compartmentId] = opts[:compartment_id] if opts[:compartment_id]
query_params[:lifecycleState] = opts[:lifecycle_state] if opts[:lifecycle_state]
query_params[:name] = opts[:name] if opts[:name]
query_params[:issuerCertificateAuthorityId] = opts[:issuer_certificate_authority_id] if opts[:issuer_certificate_authority_id]
query_params[:certificateAuthorityId] = opts[:certificate_authority_id] if opts[:certificate_authority_id]
query_params[:sortBy] = opts[:sort_by] if opts[:sort_by]
query_params[:sortOrder] = opts[:sort_order] if opts[:sort_order]
query_params[:limit] = opts[:limit] if opts[:limit]
query_params[:page] = opts[:page] if opts[:page]
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#list_certificate_authorities') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateAuthorityCollection'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Lists all versions for the specified certificate authority (CA).
# Optionally, you can use the parameter `FilterByVersionNumberQueryParam` to limit the results to a single item that matches the specified version number.
#
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [Integer] :version_number A filter that returns only resources that match the specified version number. The default value is 0, which means that this filter is not applied.
# (default to 0)
# @option opts [Integer] :limit The maximum number of items to return in a paginated \"List\" call.
#
# @option opts [String] :page The value of the `opc-next-page` response header
# from the previous \"List\" call.
#
# @option opts [String] :sort_by The field to sort by. You can specify only one sort order. The default order for 'VERSION_NUMBER' is ascending.
#
# Allowed values are: VERSION_NUMBER
# @option opts [String] :sort_order The sort order to use, either ascending (`ASC`) or descending (`DESC`).
#
# Allowed values are: ASC, DESC
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateAuthorityVersionCollection CertificateAuthorityVersionCollection}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/list_certificate_authority_versions.rb.html) to see an example of how to use list_certificate_authority_versions API.
def list_certificate_authority_versions(certificate_authority_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#list_certificate_authority_versions.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling list_certificate_authority_versions." if certificate_authority_id.nil?
if opts[:sort_by] && !%w[VERSION_NUMBER].include?(opts[:sort_by])
raise 'Invalid value for "sort_by", must be one of VERSION_NUMBER.'
end
if opts[:sort_order] && !%w[ASC DESC].include?(opts[:sort_order])
raise 'Invalid value for "sort_order", must be one of ASC, DESC.'
end
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
path = '/certificateAuthorities/{certificateAuthorityId}/versions'.sub('{certificateAuthorityId}', certificate_authority_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
query_params[:versionNumber] = opts[:version_number] if opts[:version_number]
query_params[:limit] = opts[:limit] if opts[:limit]
query_params[:page] = opts[:page] if opts[:page]
query_params[:sortBy] = opts[:sort_by] if opts[:sort_by]
query_params[:sortOrder] = opts[:sort_order] if opts[:sort_order]
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#list_certificate_authority_versions') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateAuthorityVersionCollection'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Lists all certificate versions for the specified certificate.
# Optionally, you can use the parameter `FilterByVersionNumberQueryParam` to limit the result set to a single item that matches the specified version number.
#
# @param [String] certificate_id The OCID of the certificate.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [Integer] :version_number A filter that returns only resources that match the specified version number. The default value is 0, which means that this filter is not applied.
# (default to 0)
# @option opts [Integer] :limit The maximum number of items to return in a paginated \"List\" call.
#
# @option opts [String] :page The value of the `opc-next-page` response header
# from the previous \"List\" call.
#
# @option opts [String] :sort_by The field to sort by. You can specify only one sort order. The default order for 'VERSION_NUMBER' is ascending.
#
# Allowed values are: VERSION_NUMBER
# @option opts [String] :sort_order The sort order to use, either ascending (`ASC`) or descending (`DESC`).
#
# Allowed values are: ASC, DESC
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateVersionCollection CertificateVersionCollection}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/list_certificate_versions.rb.html) to see an example of how to use list_certificate_versions API.
def list_certificate_versions(certificate_id, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#list_certificate_versions.' if logger
raise "Missing the required parameter 'certificate_id' when calling list_certificate_versions." if certificate_id.nil?
if opts[:sort_by] && !%w[VERSION_NUMBER].include?(opts[:sort_by])
raise 'Invalid value for "sort_by", must be one of VERSION_NUMBER.'
end
if opts[:sort_order] && !%w[ASC DESC].include?(opts[:sort_order])
raise 'Invalid value for "sort_order", must be one of ASC, DESC.'
end
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
path = '/certificates/{certificateId}/versions'.sub('{certificateId}', certificate_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
query_params[:versionNumber] = opts[:version_number] if opts[:version_number]
query_params[:limit] = opts[:limit] if opts[:limit]
query_params[:page] = opts[:page] if opts[:page]
query_params[:sortBy] = opts[:sort_by] if opts[:sort_by]
query_params[:sortOrder] = opts[:sort_order] if opts[:sort_order]
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#list_certificate_versions') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateVersionCollection'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Lists all certificates that match the query parameters.
# Optionally, you can use the parameter `FilterByCertificateIdQueryParam` to limit the result set to a single item that matches the specified certificate.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :compartment_id A filter that returns only resources that match the given compartment OCID.
# @option opts [String] :lifecycle_state A filter that returns only resources that match the given lifecycle state. The state value is case-insensitive.
# Allowed values are: CREATING, ACTIVE, UPDATING, DELETING, DELETED, SCHEDULING_DELETION, PENDING_DELETION, CANCELLING_DELETION, FAILED
# @option opts [String] :name A filter that returns only resources that match the specified name.
# @option opts [String] :sort_by The field to sort by. You can specify only one sort order. The default
# order for `EXPIRATIONDATE` and 'TIMECREATED' is descending. The default order for `NAME`
# is ascending.
#
# Allowed values are: NAME, EXPIRATIONDATE, TIMECREATED
# @option opts [String] :sort_order The sort order to use, either ascending (`ASC`) or descending (`DESC`).
#
# Allowed values are: ASC, DESC
# @option opts [Integer] :limit The maximum number of items to return in a paginated \"List\" call.
#
# @option opts [String] :page The value of the `opc-next-page` response header
# from the previous \"List\" call.
#
# @option opts [String] :issuer_certificate_authority_id The OCID of the certificate authority (CA). If the parameter is set to null, the service lists all CAs.
# @option opts [String] :certificate_id The OCID of the certificate. If the parameter is set to null, the service lists all certificates.
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateCollection CertificateCollection}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/list_certificates.rb.html) to see an example of how to use list_certificates API.
def list_certificates(opts = {})
logger.debug 'Calling operation CertificatesManagementClient#list_certificates.' if logger
if opts[:lifecycle_state] && !%w[CREATING ACTIVE UPDATING DELETING DELETED SCHEDULING_DELETION PENDING_DELETION CANCELLING_DELETION FAILED].include?(opts[:lifecycle_state])
raise 'Invalid value for "lifecycle_state", must be one of CREATING, ACTIVE, UPDATING, DELETING, DELETED, SCHEDULING_DELETION, PENDING_DELETION, CANCELLING_DELETION, FAILED.'
end
if opts[:sort_by] && !%w[NAME EXPIRATIONDATE TIMECREATED].include?(opts[:sort_by])
raise 'Invalid value for "sort_by", must be one of NAME, EXPIRATIONDATE, TIMECREATED.'
end
if opts[:sort_order] && !%w[ASC DESC].include?(opts[:sort_order])
raise 'Invalid value for "sort_order", must be one of ASC, DESC.'
end
path = '/certificates'
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
query_params[:compartmentId] = opts[:compartment_id] if opts[:compartment_id]
query_params[:lifecycleState] = opts[:lifecycle_state] if opts[:lifecycle_state]
query_params[:name] = opts[:name] if opts[:name]
query_params[:sortBy] = opts[:sort_by] if opts[:sort_by]
query_params[:sortOrder] = opts[:sort_order] if opts[:sort_order]
query_params[:limit] = opts[:limit] if opts[:limit]
query_params[:page] = opts[:page] if opts[:page]
query_params[:issuerCertificateAuthorityId] = opts[:issuer_certificate_authority_id] if opts[:issuer_certificate_authority_id]
query_params[:certificateId] = opts[:certificate_id] if opts[:certificate_id]
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
# rubocop:enable Style/NegatedIf
post_body = nil
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#list_certificates') do
@api_client.call_api(
:GET,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateCollection'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Revokes a certificate authority (CA) version.
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [Integer] certificate_authority_version_number The version number of the certificate authority (CA).
# @param [OCI::CertificatesManagement::Models::RevokeCertificateAuthorityVersionDetails] revoke_certificate_authority_version_details The details of the request to revoke a CA version.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :opc_retry_token A token that uniquely identifies a request so it can be retried in case
# of a timeout or server error without risk of executing that same action
# again. Retry tokens expire after 24 hours, but can be invalidated
# before then due to conflicting operations (for example, if a resource has been
# deleted and purged from the system, then a retry of the original
# creation request may be rejected).
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/revoke_certificate_authority_version.rb.html) to see an example of how to use revoke_certificate_authority_version API.
def revoke_certificate_authority_version(certificate_authority_id, certificate_authority_version_number, revoke_certificate_authority_version_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#revoke_certificate_authority_version.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling revoke_certificate_authority_version." if certificate_authority_id.nil?
raise "Missing the required parameter 'certificate_authority_version_number' when calling revoke_certificate_authority_version." if certificate_authority_version_number.nil?
raise "Missing the required parameter 'revoke_certificate_authority_version_details' when calling revoke_certificate_authority_version." if revoke_certificate_authority_version_details.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
raise "Parameter value for 'certificate_authority_version_number' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_version_number)
path = '/certificateAuthorities/{certificateAuthorityId}/version/{certificateAuthorityVersionNumber}/actions/revoke'.sub('{certificateAuthorityId}', certificate_authority_id.to_s).sub('{certificateAuthorityVersionNumber}', certificate_authority_version_number.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'opc-retry-token'] = opts[:opc_retry_token] if opts[:opc_retry_token]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
header_params[:'opc-retry-token'] ||= OCI::Retry.generate_opc_retry_token
post_body = @api_client.object_to_http_body(revoke_certificate_authority_version_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#revoke_certificate_authority_version') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Revokes the specified certificate version.
# @param [String] certificate_id The OCID of the certificate.
# @param [Integer] certificate_version_number The version number of the certificate.
# @param [OCI::CertificatesManagement::Models::RevokeCertificateVersionDetails] revoke_certificate_version_details The details of the request to revoke a certificate version.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :opc_retry_token A token that uniquely identifies a request so it can be retried in case
# of a timeout or server error without risk of executing that same action
# again. Retry tokens expire after 24 hours, but can be invalidated
# before then due to conflicting operations (for example, if a resource has been
# deleted and purged from the system, then a retry of the original
# creation request may be rejected).
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/revoke_certificate_version.rb.html) to see an example of how to use revoke_certificate_version API.
def revoke_certificate_version(certificate_id, certificate_version_number, revoke_certificate_version_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#revoke_certificate_version.' if logger
raise "Missing the required parameter 'certificate_id' when calling revoke_certificate_version." if certificate_id.nil?
raise "Missing the required parameter 'certificate_version_number' when calling revoke_certificate_version." if certificate_version_number.nil?
raise "Missing the required parameter 'revoke_certificate_version_details' when calling revoke_certificate_version." if revoke_certificate_version_details.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
raise "Parameter value for 'certificate_version_number' must not be blank" if OCI::Internal::Util.blank_string?(certificate_version_number)
path = '/certificates/{certificateId}/version/{certificateVersionNumber}/actions/revoke'.sub('{certificateId}', certificate_id.to_s).sub('{certificateVersionNumber}', certificate_version_number.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'opc-retry-token'] = opts[:opc_retry_token] if opts[:opc_retry_token]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
header_params[:'opc-retry-token'] ||= OCI::Retry.generate_opc_retry_token
post_body = @api_client.object_to_http_body(revoke_certificate_version_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#revoke_certificate_version') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Schedules the deletion of the specified certificate authority (CA). This sets the lifecycle state of the CA to `PENDING_DELETION` and then deletes it after the specified retention period ends. If needed, you can determine the status of the deletion by using `GetCertificateAuthority`.
#
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [OCI::CertificatesManagement::Models::ScheduleCertificateAuthorityDeletionDetails] schedule_certificate_authority_deletion_details The details of the request to schedule the deletion of a CA.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/schedule_certificate_authority_deletion.rb.html) to see an example of how to use schedule_certificate_authority_deletion API.
def schedule_certificate_authority_deletion(certificate_authority_id, schedule_certificate_authority_deletion_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#schedule_certificate_authority_deletion.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling schedule_certificate_authority_deletion." if certificate_authority_id.nil?
raise "Missing the required parameter 'schedule_certificate_authority_deletion_details' when calling schedule_certificate_authority_deletion." if schedule_certificate_authority_deletion_details.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
path = '/certificateAuthorities/{certificateAuthorityId}/actions/scheduleDeletion'.sub('{certificateAuthorityId}', certificate_authority_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = @api_client.object_to_http_body(schedule_certificate_authority_deletion_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#schedule_certificate_authority_deletion') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Schedules the deletion of the specified certificate authority (CA) version.
# This sets the lifecycle state of the CA version to `PENDING_DELETION`
# and then deletes it after the specified retention period ends. If needed, you can determine the status of the deletion by using `GetCertificateAuthorityVersion`.
#
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [Integer] certificate_authority_version_number The version number of the certificate authority (CA).
# @param [OCI::CertificatesManagement::Models::ScheduleCertificateAuthorityVersionDeletionDetails] schedule_certificate_authority_version_deletion_details The details of the request to delete a CA version.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/schedule_certificate_authority_version_deletion.rb.html) to see an example of how to use schedule_certificate_authority_version_deletion API.
def schedule_certificate_authority_version_deletion(certificate_authority_id, certificate_authority_version_number, schedule_certificate_authority_version_deletion_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#schedule_certificate_authority_version_deletion.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling schedule_certificate_authority_version_deletion." if certificate_authority_id.nil?
raise "Missing the required parameter 'certificate_authority_version_number' when calling schedule_certificate_authority_version_deletion." if certificate_authority_version_number.nil?
raise "Missing the required parameter 'schedule_certificate_authority_version_deletion_details' when calling schedule_certificate_authority_version_deletion." if schedule_certificate_authority_version_deletion_details.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
raise "Parameter value for 'certificate_authority_version_number' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_version_number)
path = '/certificateAuthorities/{certificateAuthorityId}/version/{certificateAuthorityVersionNumber}/actions/scheduleDeletion'.sub('{certificateAuthorityId}', certificate_authority_id.to_s).sub('{certificateAuthorityVersionNumber}', certificate_authority_version_number.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = @api_client.object_to_http_body(schedule_certificate_authority_version_deletion_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#schedule_certificate_authority_version_deletion') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Schedules the deletion of the specified certificate. This sets the lifecycle state of the certificate
# to `PENDING_DELETION` and then deletes it after the specified retention period ends.
# You can subsequently use `GetCertificate` to determine the current deletion status.
#
# @param [String] certificate_id The OCID of the certificate.
# @param [OCI::CertificatesManagement::Models::ScheduleCertificateDeletionDetails] schedule_certificate_deletion_details The details of the request to schedule a certificate deletion.
# This sets the lifecycle state of the certificate to `PENDING_DELETION` and then deletes it
# after the specified retention period ends.
# You can subsequently use `GetCertificate` to determine the current deletion status.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/schedule_certificate_deletion.rb.html) to see an example of how to use schedule_certificate_deletion API.
def schedule_certificate_deletion(certificate_id, schedule_certificate_deletion_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#schedule_certificate_deletion.' if logger
raise "Missing the required parameter 'certificate_id' when calling schedule_certificate_deletion." if certificate_id.nil?
raise "Missing the required parameter 'schedule_certificate_deletion_details' when calling schedule_certificate_deletion." if schedule_certificate_deletion_details.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
path = '/certificates/{certificateId}/actions/scheduleDeletion'.sub('{certificateId}', certificate_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = @api_client.object_to_http_body(schedule_certificate_deletion_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#schedule_certificate_deletion') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Schedules the deletion of the specified certificate version. This sets the lifecycle state of the certificate version to `PENDING_DELETION` and then deletes it after the specified retention period ends. You can only
# delete a certificate version if the certificate version rotation state is marked as `DEPRECATED`.
#
# You can subsequently use `GetCertificateVersion` to determine the current certificate version deletion status.
#
# @param [String] certificate_id The OCID of the certificate.
# @param [Integer] certificate_version_number The version number of the certificate.
# @param [OCI::CertificatesManagement::Models::ScheduleCertificateVersionDeletionDetails] schedule_certificate_version_deletion_details The details of the request to delete a certificate version.
#
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type nil
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/schedule_certificate_version_deletion.rb.html) to see an example of how to use schedule_certificate_version_deletion API.
def schedule_certificate_version_deletion(certificate_id, certificate_version_number, schedule_certificate_version_deletion_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#schedule_certificate_version_deletion.' if logger
raise "Missing the required parameter 'certificate_id' when calling schedule_certificate_version_deletion." if certificate_id.nil?
raise "Missing the required parameter 'certificate_version_number' when calling schedule_certificate_version_deletion." if certificate_version_number.nil?
raise "Missing the required parameter 'schedule_certificate_version_deletion_details' when calling schedule_certificate_version_deletion." if schedule_certificate_version_deletion_details.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
raise "Parameter value for 'certificate_version_number' must not be blank" if OCI::Internal::Util.blank_string?(certificate_version_number)
path = '/certificates/{certificateId}/version/{certificateVersionNumber}/actions/scheduleDeletion'.sub('{certificateId}', certificate_id.to_s).sub('{certificateVersionNumber}', certificate_version_number.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = @api_client.object_to_http_body(schedule_certificate_version_deletion_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#schedule_certificate_version_deletion') do
@api_client.call_api(
:POST,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Updates the properties of a CA bundle.
# @param [String] ca_bundle_id The OCID of the CA bundle.
# @param [OCI::CertificatesManagement::Models::UpdateCaBundleDetails] update_ca_bundle_details The details of the request to update a CA bundle.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CaBundle CaBundle}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/update_ca_bundle.rb.html) to see an example of how to use update_ca_bundle API.
def update_ca_bundle(ca_bundle_id, update_ca_bundle_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#update_ca_bundle.' if logger
raise "Missing the required parameter 'ca_bundle_id' when calling update_ca_bundle." if ca_bundle_id.nil?
raise "Missing the required parameter 'update_ca_bundle_details' when calling update_ca_bundle." if update_ca_bundle_details.nil?
raise "Parameter value for 'ca_bundle_id' must not be blank" if OCI::Internal::Util.blank_string?(ca_bundle_id)
path = '/caBundles/{caBundleId}'.sub('{caBundleId}', ca_bundle_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = @api_client.object_to_http_body(update_ca_bundle_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#update_ca_bundle') do
@api_client.call_api(
:PUT,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CaBundle'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Updates the properties of a certificate.
# @param [String] certificate_id The OCID of the certificate.
# @param [OCI::CertificatesManagement::Models::UpdateCertificateDetails] update_certificate_details The details of the request to update a certificate.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::Certificate Certificate}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/update_certificate.rb.html) to see an example of how to use update_certificate API.
def update_certificate(certificate_id, update_certificate_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#update_certificate.' if logger
raise "Missing the required parameter 'certificate_id' when calling update_certificate." if certificate_id.nil?
raise "Missing the required parameter 'update_certificate_details' when calling update_certificate." if update_certificate_details.nil?
raise "Parameter value for 'certificate_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_id)
path = '/certificates/{certificateId}'.sub('{certificateId}', certificate_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = @api_client.object_to_http_body(update_certificate_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#update_certificate') do
@api_client.call_api(
:PUT,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::Certificate'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines
# Updates the properties of the specified certificate authority (CA).
# @param [String] certificate_authority_id The OCID of the certificate authority (CA).
# @param [OCI::CertificatesManagement::Models::UpdateCertificateAuthorityDetails] update_certificate_authority_details The details of the request to update a CA.
# @param [Hash] opts the optional parameters
# @option opts [OCI::Retry::RetryConfig] :retry_config The retry configuration to apply to this operation. If no key is provided then the service-level
# retry configuration defined by {#retry_config} will be used. If an explicit `nil` value is provided then the operation will not retry
# @option opts [String] :opc_request_id Unique Oracle-assigned identifier for the request. If provided, the returned request ID
# will include this value. Otherwise, a random request ID will be
# generated by the service.
#
# @option opts [String] :if_match For optimistic concurrency control. In the PUT or DELETE call for a
# resource, set the `if-match` parameter to the value of the etag from a
# previous GET or POST response for that resource. The resource will be
# updated or deleted only if the etag you provide matches the resource's
# current etag value.
#
# @return [Response] A Response object with data of type {OCI::CertificatesManagement::Models::CertificateAuthority CertificateAuthority}
# @note Click [here](https://docs.cloud.oracle.com/en-us/iaas/tools/ruby-sdk-examples/latest/certificatesmanagement/update_certificate_authority.rb.html) to see an example of how to use update_certificate_authority API.
def update_certificate_authority(certificate_authority_id, update_certificate_authority_details, opts = {})
logger.debug 'Calling operation CertificatesManagementClient#update_certificate_authority.' if logger
raise "Missing the required parameter 'certificate_authority_id' when calling update_certificate_authority." if certificate_authority_id.nil?
raise "Missing the required parameter 'update_certificate_authority_details' when calling update_certificate_authority." if update_certificate_authority_details.nil?
raise "Parameter value for 'certificate_authority_id' must not be blank" if OCI::Internal::Util.blank_string?(certificate_authority_id)
path = '/certificateAuthorities/{certificateAuthorityId}'.sub('{certificateAuthorityId}', certificate_authority_id.to_s)
operation_signing_strategy = :standard
# rubocop:disable Style/NegatedIf
# Query Params
query_params = {}
# Header Params
header_params = {}
header_params[:accept] = 'application/json'
header_params[:'content-type'] = 'application/json'
header_params[:'opc-request-id'] = opts[:opc_request_id] if opts[:opc_request_id]
header_params[:'if-match'] = opts[:if_match] if opts[:if_match]
# rubocop:enable Style/NegatedIf
post_body = @api_client.object_to_http_body(update_certificate_authority_details)
# rubocop:disable Metrics/BlockLength
OCI::Retry.make_retrying_call(applicable_retry_config(opts), call_name: 'CertificatesManagementClient#update_certificate_authority') do
@api_client.call_api(
:PUT,
path,
endpoint,
header_params: header_params,
query_params: query_params,
operation_signing_strategy: operation_signing_strategy,
body: post_body,
return_type: 'OCI::CertificatesManagement::Models::CertificateAuthority'
)
end
# rubocop:enable Metrics/BlockLength
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Style/IfUnlessModifier, Metrics/ParameterLists
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines
private
def applicable_retry_config(opts = {})
return @retry_config unless opts.key?(:retry_config)
opts[:retry_config]
end
end
end
# rubocop:enable Lint/UnneededCopDisableDirective, Metrics/LineLength
|
<reponame>polar-js/docgen
const DocumentationItem = require('./Item.js');
const DocumentationParameter = require('./Parameter.js');
class DocumentationMethod extends DocumentationItem {
constructor(parent, data) {
super(parent, data);
this.parameters = new Map();
this.parse(data);
}
parse(data) {
if (data.signatures[0].comment) {
this.description = data.signatures[0].comment.shortText;
}
this.returns = this.parseType(data.signatures[0].type)
if (data.signatures[0].parameters) data.signatures[0].parameters
.forEach(p => this.addParameter(p));
}
addParameter(p) {
this.parameters.set(p.name, new DocumentationParameter(this, p));
}
serialize() {
return {
returns: this.returns,
parameters: this.parameters.size > 0 ? Array.from(this.parameters.values()).map(p => p.serializer()) : []
};
}
}
module.exports = DocumentationMethod; |
class StateMachine:
def __init__(self):
self.states = {}
def set_state(self, state_id, value):
self.states[state_id] = value
def get_state(self, state_id):
return self.states.get(state_id)
def reset_state(self, state_id):
if state_id in self.states:
del self.states[state_id] |
#!/bin/sh
#
# Vivado(TM)
# runme.sh: a Vivado-generated Runs Script for UNIX
# Copyright 1986-2018 Xilinx, Inc. All Rights Reserved.
#
echo "This script was generated under a different operating system."
echo "Please update the PATH and LD_LIBRARY_PATH variables below, before executing this script"
exit
if [ -z "$PATH" ]; then
PATH=C:/Xilinx/SDK/2018.3/bin;C:/Xilinx/Vivado/2018.3/ids_lite/ISE/bin/nt64;C:/Xilinx/Vivado/2018.3/ids_lite/ISE/lib/nt64:C:/Xilinx/Vivado/2018.3/bin
else
PATH=C:/Xilinx/SDK/2018.3/bin;C:/Xilinx/Vivado/2018.3/ids_lite/ISE/bin/nt64;C:/Xilinx/Vivado/2018.3/ids_lite/ISE/lib/nt64:C:/Xilinx/Vivado/2018.3/bin:$PATH
fi
export PATH
if [ -z "$LD_LIBRARY_PATH" ]; then
LD_LIBRARY_PATH=
else
LD_LIBRARY_PATH=:$LD_LIBRARY_PATH
fi
export LD_LIBRARY_PATH
HD_PWD='C:/Users/curtn/Documents/GitHub/sandbox-zynq/baremetal-amp-dual/baremetal-amp-dual.runs/synth_1'
cd "$HD_PWD"
HD_LOG=runme.log
/bin/touch $HD_LOG
ISEStep="./ISEWrap.sh"
EAStep()
{
$ISEStep $HD_LOG "$@" >> $HD_LOG 2>&1
if [ $? -ne 0 ]
then
exit
fi
}
EAStep vivado -log design_1_wrapper.vds -m64 -product Vivado -mode batch -messageDb vivado.pb -notrace -source design_1_wrapper.tcl
|
# Generated by Powerlevel10k configuration wizard on 2020-05-26 at 21:02 EDT.
# Based on romkatv/powerlevel10k/config/p10k-classic.zsh, checksum 58521.
# Wizard options: nerdfont-complete + powerline, small icons, classic, unicode, dark,
# 24h time, angled separators, blurred heads, flat tails, 2 lines, dotted, right frame,
# compact, many icons, concise, transient_prompt, instant_prompt=verbose.
# Type `p10k configure` to generate another config.
#
# Config for Powerlevel10k with classic powerline prompt style. Type `p10k configure` to generate
# your own config based on it.
#
# Tip: Looking for a nice color? Here's a one-liner to print colormap.
#
# for i in {0..255}; do print -Pn "%K{$i} %k%F{$i}${(l:3::0:)i}%f " ${${(M)$((i%6)):#3}:+$'\n'}; done
# Temporarily change options.
'builtin' 'local' '-a' 'p10k_config_opts'
[[ ! -o 'aliases' ]] || p10k_config_opts+=('aliases')
[[ ! -o 'sh_glob' ]] || p10k_config_opts+=('sh_glob')
[[ ! -o 'no_brace_expand' ]] || p10k_config_opts+=('no_brace_expand')
'builtin' 'setopt' 'no_aliases' 'no_sh_glob' 'brace_expand'
() {
emulate -L zsh -o extended_glob
# Unset all configuration options. This allows you to apply configuration changes without
# restarting zsh. Edit ~/.p10k.zsh and type `source ~/.p10k.zsh`.
unset -m 'POWERLEVEL9K_*|DEFAULT_USER'
# Zsh >= 5.1 is required.
autoload -Uz is-at-least && is-at-least 5.1 || return
# The list of segments shown on the left. Fill it with the most important segments.
typeset -g POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(
# =========================[ Line #1 ]=========================
os_icon # os identifier
context # user@host
dir # current directory
vcs # git status
# =========================[ Line #2 ]=========================
newline # \n
prompt_char # prompt symbol
)
# The list of segments shown on the right. Fill it with less important segments.
# Right prompt on the last prompt line (where you are typing your commands) gets
# automatically hidden when the input line reaches it. Right prompt above the
# last prompt line gets hidden if it would overlap with left prompt.
typeset -g POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=(
# =========================[ Line #1 ]=========================
status # exit code of the last command
command_execution_time # duration of the last command
background_jobs # presence of background jobs
direnv # direnv status (https://direnv.net/)
asdf # asdf version manager (https://github.com/asdf-vm/asdf)
virtualenv # python virtual environment (https://docs.python.org/3/library/venv.html)
anaconda # conda environment (https://conda.io/)
pyenv # python environment (https://github.com/pyenv/pyenv)
goenv # go environment (https://github.com/syndbg/goenv)
nodenv # node.js version from nodenv (https://github.com/nodenv/nodenv)
nvm # node.js version from nvm (https://github.com/nvm-sh/nvm)
nodeenv # node.js environment (https://github.com/ekalinin/nodeenv)
# node_version # node.js version
# go_version # go version (https://golang.org)
# rust_version # rustc version (https://www.rust-lang.org)
# dotnet_version # .NET version (https://dotnet.microsoft.com)
# php_version # php version (https://www.php.net/)
# laravel_version # laravel php framework version (https://laravel.com/)
# java_version # java version (https://www.java.com/)
# package # name@version from package.json (https://docs.npmjs.com/files/package.json)
rbenv # ruby version from rbenv (https://github.com/rbenv/rbenv)
rvm # ruby version from rvm (https://rvm.io)
fvm # flutter version management (https://github.com/leoafarias/fvm)
luaenv # lua version from luaenv (https://github.com/cehoffman/luaenv)
jenv # java version from jenv (https://github.com/jenv/jenv)
plenv # perl version from plenv (https://github.com/tokuhirom/plenv)
phpenv # php version from phpenv (https://github.com/phpenv/phpenv)
haskell_stack # haskell version from stack (https://haskellstack.org/)
kubecontext # current kubernetes context (https://kubernetes.io/)
terraform # terraform workspace (https://www.terraform.io)
aws # aws profile (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html)
aws_eb_env # aws elastic beanstalk environment (https://aws.amazon.com/elasticbeanstalk/)
azure # azure account name (https://docs.microsoft.com/en-us/cli/azure)
gcloud # google cloud cli account and project (https://cloud.google.com/)
google_app_cred # google application credentials (https://cloud.google.com/docs/authentication/production)
nordvpn # nordvpn connection status, linux only (https://nordvpn.com/)
ranger # ranger shell (https://github.com/ranger/ranger)
nnn # nnn shell (https://github.com/jarun/nnn)
vim_shell # vim shell indicator (:sh)
midnight_commander # midnight commander shell (https://midnight-commander.org/)
nix_shell # nix shell (https://nixos.org/nixos/nix-pills/developing-with-nix-shell.html)
# vi_mode # vi mode (you don't need this if you've enabled prompt_char)
# vpn_ip # virtual private network indicator
# load # CPU load
# disk_usage # disk usage
# ram # free RAM
# swap # used swap
todo # todo items (https://github.com/todotxt/todo.txt-cli)
timewarrior # timewarrior tracking status (https://timewarrior.net/)
taskwarrior # taskwarrior task count (https://taskwarrior.org/)
time # current time
# =========================[ Line #2 ]=========================
newline # \n
# ip # ip address and bandwidth usage for a specified network interface
# public_ip # public IP address
# proxy # system-wide http/https/ftp proxy
# battery # internal battery
# wifi # wifi speed
#example # example user-defined segment (see prompt_example function below)
clock_stat
)
# Defines character set used by powerlevel10k. It's best to let `p10k configure` set it for you.
typeset -g POWERLEVEL9K_MODE=nerdfont-complete
# When set to `moderate`, some icons will have an extra space after them. This is meant to avoid
# icon overlap when using non-monospace fonts. When set to `none`, spaces are not added.
typeset -g POWERLEVEL9K_ICON_PADDING=none
# When set to true, icons appear before content on both sides of the prompt. When set
# to false, icons go after content. If empty or not set, icons go before content in the left
# prompt and after content in the right prompt.
#
# You can also override it for a specific segment:
#
# POWERLEVEL9K_STATUS_ICON_BEFORE_CONTENT=false
#
# Or for a specific segment in specific state:
#
# POWERLEVEL9K_DIR_NOT_WRITABLE_ICON_BEFORE_CONTENT=false
typeset -g POWERLEVEL9K_ICON_BEFORE_CONTENT=
# Add an empty line before each prompt.
typeset -g POWERLEVEL9K_PROMPT_ADD_NEWLINE=false
# Connect left prompt lines with these symbols. You'll probably want to use the same color
# as POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_FOREGROUND below.
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_PREFIX=
typeset -g POWERLEVEL9K_MULTILINE_NEWLINE_PROMPT_PREFIX=
typeset -g POWERLEVEL9K_MULTILINE_LAST_PROMPT_PREFIX=
# Connect right prompt lines with these symbols.
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_SUFFIX='%240F─╮'
typeset -g POWERLEVEL9K_MULTILINE_NEWLINE_PROMPT_SUFFIX='%240F─┤'
typeset -g POWERLEVEL9K_MULTILINE_LAST_PROMPT_SUFFIX='%240F─╯'
# Filler between left and right prompt on the first prompt line. You can set it to ' ', '·' or
# '─'. The last two make it easier to see the alignment between left and right prompt and to
# separate prompt from command output. You might want to set POWERLEVEL9K_PROMPT_ADD_NEWLINE=false
# for more compact prompt if using using this option.
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_CHAR='·'
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_BACKGROUND=
typeset -g POWERLEVEL9K_MULTILINE_NEWLINE_PROMPT_GAP_BACKGROUND=
if [[ $POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_CHAR != ' ' ]]; then
# The color of the filler. You'll probably want to match the color of POWERLEVEL9K_MULTILINE
# ornaments defined above.
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_FOREGROUND=240
# Start filler from the edge of the screen if there are no left segments on the first line.
typeset -g POWERLEVEL9K_EMPTY_LINE_LEFT_PROMPT_FIRST_SEGMENT_END_SYMBOL='%{%}'
# End filler on the edge of the screen if there are no right segments on the first line.
typeset -g POWERLEVEL9K_EMPTY_LINE_RIGHT_PROMPT_FIRST_SEGMENT_START_SYMBOL='%{%}'
fi
# Default background color.
typeset -g POWERLEVEL9K_BACKGROUND=236
# Separator between same-color segments on the left.
typeset -g POWERLEVEL9K_LEFT_SUBSEGMENT_SEPARATOR='%244F\uE0B1'
# Separator between same-color segments on the right.
typeset -g POWERLEVEL9K_RIGHT_SUBSEGMENT_SEPARATOR='%244F\uE0B3'
# Separator between different-color segments on the left.
typeset -g POWERLEVEL9K_LEFT_SEGMENT_SEPARATOR='\uE0B0'
# Separator between different-color segments on the right.
typeset -g POWERLEVEL9K_RIGHT_SEGMENT_SEPARATOR='\uE0B2'
# The right end of left prompt.
typeset -g POWERLEVEL9K_LEFT_PROMPT_LAST_SEGMENT_END_SYMBOL='▓▒░'
# The left end of right prompt.
typeset -g POWERLEVEL9K_RIGHT_PROMPT_FIRST_SEGMENT_START_SYMBOL='░▒▓'
# The left end of left prompt.
typeset -g POWERLEVEL9K_LEFT_PROMPT_FIRST_SEGMENT_START_SYMBOL=''
# The right end of right prompt.
typeset -g POWERLEVEL9K_RIGHT_PROMPT_LAST_SEGMENT_END_SYMBOL=''
# Left prompt terminator for lines without any segments.
typeset -g POWERLEVEL9K_EMPTY_LINE_LEFT_PROMPT_LAST_SEGMENT_END_SYMBOL=
#################################[ os_icon: os identifier ]##################################
# OS identifier color.
typeset -g POWERLEVEL9K_OS_ICON_FOREGROUND=255
# Custom icon.
# typeset -g POWERLEVEL9K_OS_ICON_CONTENT_EXPANSION='⭐'
################################[ prompt_char: prompt symbol ]################################
# Transparent background.
typeset -g POWERLEVEL9K_PROMPT_CHAR_BACKGROUND=
# Green prompt symbol if the last command succeeded.
typeset -g POWERLEVEL9K_PROMPT_CHAR_OK_{VIINS,VICMD,VIVIS,VIOWR}_FOREGROUND=76
# Red prompt symbol if the last command failed.
typeset -g POWERLEVEL9K_PROMPT_CHAR_ERROR_{VIINS,VICMD,VIVIS,VIOWR}_FOREGROUND=196
# Default prompt symbol.
typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIINS_CONTENT_EXPANSION='❯'
# Prompt symbol in command vi mode.
typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VICMD_CONTENT_EXPANSION='❮'
# Prompt symbol in visual vi mode.
typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIVIS_CONTENT_EXPANSION='Ⅴ'
# Prompt symbol in overwrite vi mode.
typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIOWR_CONTENT_EXPANSION='▶'
typeset -g POWERLEVEL9K_PROMPT_CHAR_OVERWRITE_STATE=true
# No line terminator if prompt_char is the last segment.
typeset -g POWERLEVEL9K_PROMPT_CHAR_LEFT_PROMPT_LAST_SEGMENT_END_SYMBOL=
# No line introducer if prompt_char is the first segment.
typeset -g POWERLEVEL9K_PROMPT_CHAR_LEFT_PROMPT_FIRST_SEGMENT_START_SYMBOL=
# No surrounding whitespace.
typeset -g POWERLEVEL9K_PROMPT_CHAR_LEFT_{LEFT,RIGHT}_WHITESPACE=
##################################[ dir: current directory ]##################################
# Default current directory color.
typeset -g POWERLEVEL9K_DIR_FOREGROUND=31
# If directory is too long, shorten some of its segments to the shortest possible unique
# prefix. The shortened directory can be tab-completed to the original.
typeset -g POWERLEVEL9K_SHORTEN_STRATEGY=truncate_to_unique
# Replace removed segment suffixes with this symbol.
typeset -g POWERLEVEL9K_SHORTEN_DELIMITER=
# Color of the shortened directory segments.
typeset -g POWERLEVEL9K_DIR_SHORTENED_FOREGROUND=103
# Color of the anchor directory segments. Anchor segments are never shortened. The first
# segment is always an anchor.
typeset -g POWERLEVEL9K_DIR_ANCHOR_FOREGROUND=39
# Display anchor directory segments in bold.
typeset -g POWERLEVEL9K_DIR_ANCHOR_BOLD=true
# Don't shorten directories that contain any of these files. They are anchors.
local anchor_files=(
.bzr
.citc
.git
.hg
.node-version
.python-version
.go-version
.ruby-version
.lua-version
.java-version
.perl-version
.php-version
.tool-version
.shorten_folder_marker
.svn
.terraform
CVS
Cargo.toml
composer.json
go.mod
package.json
stack.yaml
)
typeset -g POWERLEVEL9K_SHORTEN_FOLDER_MARKER="(${(j:|:)anchor_files})"
# If set to "first" ("last"), remove everything before the first (last) subdirectory that contains
# files matching $POWERLEVEL9K_SHORTEN_FOLDER_MARKER. For example, when the current directory is
# /foo/bar/git_repo/nested_git_repo/baz, prompt will display git_repo/nested_git_repo/baz (first)
# or nested_git_repo/baz (last). This assumes that git_repo and nested_git_repo contain markers
# and other directories don't.
typeset -g POWERLEVEL9K_DIR_TRUNCATE_BEFORE_MARKER=false
# Don't shorten this many last directory segments. They are anchors.
typeset -g POWERLEVEL9K_SHORTEN_DIR_LENGTH=1
# Shorten directory if it's longer than this even if there is space for it. The value can
# be either absolute (e.g., '80') or a percentage of terminal width (e.g, '50%'). If empty,
# directory will be shortened only when prompt doesn't fit or when other parameters demand it
# (see POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS and POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT below).
# If set to `0`, directory will always be shortened to its minimum length.
typeset -g POWERLEVEL9K_DIR_MAX_LENGTH=80
# When `dir` segment is on the last prompt line, try to shorten it enough to leave at least this
# many columns for typing commands.
typeset -g POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS=40
# When `dir` segment is on the last prompt line, try to shorten it enough to leave at least
# COLUMNS * POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT * 0.01 columns for typing commands.
typeset -g POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT=50
# If set to true, embed a hyperlink into the directory. Useful for quickly
# opening a directory in the file manager simply by clicking the link.
# Can also be handy when the directory is shortened, as it allows you to see
# the full directory that was used in previous commands.
typeset -g POWERLEVEL9K_DIR_HYPERLINK=false
# Enable special styling for non-writable directories. See POWERLEVEL9K_LOCK_ICON and
# POWERLEVEL9K_DIR_CLASSES below.
typeset -g POWERLEVEL9K_DIR_SHOW_WRITABLE=v2
# The default icon shown next to non-writable directories when POWERLEVEL9K_DIR_SHOW_WRITABLE is
# set to v2.
# typeset -g POWERLEVEL9K_LOCK_ICON='⭐'
# POWERLEVEL9K_DIR_CLASSES allows you to specify custom icons and colors for different
# directories. It must be an array with 3 * N elements. Each triplet consists of:
#
# 1. A pattern against which the current directory ($PWD) is matched. Matching is done with
# extended_glob option enabled.
# 2. Directory class for the purpose of styling.
# 3. An empty string.
#
# Triplets are tried in order. The first triplet whose pattern matches $PWD wins.
#
# If POWERLEVEL9K_DIR_SHOW_WRITABLE is set to v2 and the current directory is not writable,
# its class gets suffix _NOT_WRITABLE.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_DIR_CLASSES=(
# '~/work(|/*)' WORK ''
# '~(|/*)' HOME ''
# '*' DEFAULT '')
#
# Whenever the current directory is ~/work or a subdirectory of ~/work, it gets styled with class
# WORK or WORK_NOT_WRITABLE.
#
# Simply assigning classes to directories don't have any visible effects. It merely gives you an
# option to define custom colors and icons for different directory classes.
#
# # Styling for WORK.
# typeset -g POWERLEVEL9K_DIR_WORK_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_DIR_WORK_FOREGROUND=31
# typeset -g POWERLEVEL9K_DIR_WORK_SHORTENED_FOREGROUND=103
# typeset -g POWERLEVEL9K_DIR_WORK_ANCHOR_FOREGROUND=39
#
# # Styling for WORK_NOT_WRITABLE.
# typeset -g POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_FOREGROUND=31
# typeset -g POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_SHORTENED_FOREGROUND=103
# typeset -g POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_ANCHOR_FOREGROUND=39
#
# If a styling parameter isn't explicitly defined for some class, it falls back to the classless
# parameter. For example, if POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_FOREGROUND is not set, it falls
# back to POWERLEVEL9K_DIR_FOREGROUND.
#
# typeset -g POWERLEVEL9K_DIR_CLASSES=()
# Custom prefix.
# typeset -g POWERLEVEL9K_DIR_PREFIX='%246Fin '
#####################################[ vcs: git status ]######################################
# Branch icon. Set this parameter to '\uF126 ' for the popular Powerline branch icon.
typeset -g POWERLEVEL9K_VCS_BRANCH_ICON='\uF126 '
# Untracked files icon. It's really a question mark, your font isn't broken.
# Change the value of this parameter to show a different icon.
typeset -g POWERLEVEL9K_VCS_UNTRACKED_ICON='?'
# Formatter for Git status.
#
# Example output: master ⇣42⇡42 *42 merge ~42 +42 !42 ?42.
#
# You can edit the function to customize how Git status looks.
#
# VCS_STATUS_* parameters are set by gitstatus plugin. See reference:
# https://github.com/romkatv/gitstatus/blob/master/gitstatus.plugin.zsh.
function my_git_formatter() {
emulate -L zsh
if [[ -n $P9K_CONTENT ]]; then
# If P9K_CONTENT is not empty, use it. It's either "loading" or from vcs_info (not from
# gitstatus plugin). VCS_STATUS_* parameters are not available in this case.
typeset -g my_git_format=$P9K_CONTENT
return
fi
if (( $1 )); then
# Styling for up-to-date Git status.
local meta='%246F' # grey foreground
local clean='%76F' # green foreground
local modified='%178F' # yellow foreground
local untracked='%39F' # blue foreground
local conflicted='%196F' # red foreground
else
# Styling for incomplete and stale Git status.
local meta='%244F' # grey foreground
local clean='%244F' # grey foreground
local modified='%244F' # grey foreground
local untracked='%244F' # grey foreground
local conflicted='%244F' # grey foreground
fi
local res
local where # branch or tag
if [[ -n $VCS_STATUS_LOCAL_BRANCH ]]; then
res+="${clean}${(g::)POWERLEVEL9K_VCS_BRANCH_ICON}"
where=${(V)VCS_STATUS_LOCAL_BRANCH}
elif [[ -n $VCS_STATUS_TAG ]]; then
res+="${meta}#"
where=${(V)VCS_STATUS_TAG}
fi
# If local branch name or tag is at most 32 characters long, show it in full.
# Otherwise show the first 12 … the last 12.
# Tip: To always show local branch name in full without truncation, delete the next line.
(( $#where > 32 )) && where[13,-13]="…"
res+="${clean}${where//\%/%%}" # escape %
# Display the current Git commit if there is no branch or tag.
# Tip: To always display the current Git commit, remove `[[ -z $where ]] &&` from the next line.
[[ -z $where ]] && res+="${meta}@${clean}${VCS_STATUS_COMMIT[1,8]}"
# Show tracking branch name if it differs from local branch.
if [[ -n ${VCS_STATUS_REMOTE_BRANCH:#$VCS_STATUS_LOCAL_BRANCH} ]]; then
res+="${meta}:${clean}${(V)VCS_STATUS_REMOTE_BRANCH//\%/%%}" # escape %
fi
# ⇣42 if behind the remote.
(( VCS_STATUS_COMMITS_BEHIND )) && res+=" ${clean}⇣${VCS_STATUS_COMMITS_BEHIND}"
# ⇡42 if ahead of the remote; no leading space if also behind the remote: ⇣42⇡42.
(( VCS_STATUS_COMMITS_AHEAD && !VCS_STATUS_COMMITS_BEHIND )) && res+=" "
(( VCS_STATUS_COMMITS_AHEAD )) && res+="${clean}⇡${VCS_STATUS_COMMITS_AHEAD}"
# ⇠42 if behind the push remote.
(( VCS_STATUS_PUSH_COMMITS_BEHIND )) && res+=" ${clean}⇠${VCS_STATUS_PUSH_COMMITS_BEHIND}"
(( VCS_STATUS_PUSH_COMMITS_AHEAD && !VCS_STATUS_PUSH_COMMITS_BEHIND )) && res+=" "
# ⇢42 if ahead of the push remote; no leading space if also behind: ⇠42⇢42.
(( VCS_STATUS_PUSH_COMMITS_AHEAD )) && res+="${clean}⇢${VCS_STATUS_PUSH_COMMITS_AHEAD}"
# *42 if have stashes.
(( VCS_STATUS_STASHES )) && res+=" ${clean}*${VCS_STATUS_STASHES}"
# 'merge' if the repo is in an unusual state.
[[ -n $VCS_STATUS_ACTION ]] && res+=" ${conflicted}${VCS_STATUS_ACTION}"
# ~42 if have merge conflicts.
(( VCS_STATUS_NUM_CONFLICTED )) && res+=" ${conflicted}~${VCS_STATUS_NUM_CONFLICTED}"
# +42 if have staged changes.
(( VCS_STATUS_NUM_STAGED )) && res+=" ${modified}+${VCS_STATUS_NUM_STAGED}"
# !42 if have unstaged changes.
(( VCS_STATUS_NUM_UNSTAGED )) && res+=" ${modified}!${VCS_STATUS_NUM_UNSTAGED}"
# ?42 if have untracked files. It's really a question mark, your font isn't broken.
# See POWERLEVEL9K_VCS_UNTRACKED_ICON above if you want to use a different icon.
# Remove the next line if you don't want to see untracked files at all.
(( VCS_STATUS_NUM_UNTRACKED )) && res+=" ${untracked}${(g::)POWERLEVEL9K_VCS_UNTRACKED_ICON}${VCS_STATUS_NUM_UNTRACKED}"
# "─" if the number of unstaged files is unknown. This can happen due to
# POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY (see below) being set to a non-negative number lower
# than the number of files in the Git index, or due to bash.showDirtyState being set to false
# in the repository config. The number of staged and untracked files may also be unknown
# in this case.
(( VCS_STATUS_HAS_UNSTAGED == -1 )) && res+=" ${modified}─"
typeset -g my_git_format=$res
}
functions -M my_git_formatter 2>/dev/null
# Don't count the number of unstaged, untracked and conflicted files in Git repositories with
# more than this many files in the index. Negative value means infinity.
#
# If you are working in Git repositories with tens of millions of files and seeing performance
# sagging, try setting POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY to a number lower than the output
# of `git ls-files | wc -l`. Alternatively, add `bash.showDirtyState = false` to the repository's
# config: `git config bash.showDirtyState false`.
typeset -g POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY=-1
# Don't show Git status in prompt for repositories whose workdir matches this pattern.
# For example, if set to '~', the Git repository at $HOME/.git will be ignored.
# Multiple patterns can be combined with '|': '~(|/foo)|/bar/baz/*'.
typeset -g POWERLEVEL9K_VCS_DISABLED_WORKDIR_PATTERN='~'
# Disable the default Git status formatting.
typeset -g POWERLEVEL9K_VCS_DISABLE_GITSTATUS_FORMATTING=true
# Install our own Git status formatter.
typeset -g POWERLEVEL9K_VCS_CONTENT_EXPANSION='${$((my_git_formatter(1)))+${my_git_format}}'
typeset -g POWERLEVEL9K_VCS_LOADING_CONTENT_EXPANSION='${$((my_git_formatter(0)))+${my_git_format}}'
# Enable counters for staged, unstaged, etc.
typeset -g POWERLEVEL9K_VCS_{STAGED,UNSTAGED,UNTRACKED,CONFLICTED,COMMITS_AHEAD,COMMITS_BEHIND}_MAX_NUM=-1
# Icon color.
typeset -g POWERLEVEL9K_VCS_VISUAL_IDENTIFIER_COLOR=76
typeset -g POWERLEVEL9K_VCS_LOADING_VISUAL_IDENTIFIER_COLOR=244
# Custom icon.
# typeset -g POWERLEVEL9K_VCS_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_VCS_PREFIX='%246Fon '
# Show status of repositories of these types. You can add svn and/or hg if you are
# using them. If you do, your prompt may become slow even when your current directory
# isn't in an svn or hg reposotiry.
typeset -g POWERLEVEL9K_VCS_BACKENDS=(git)
# These settings are used for respositories other than Git or when gitstatusd fails and
# Powerlevel10k has to fall back to using vcs_info.
typeset -g POWERLEVEL9K_VCS_CLEAN_FOREGROUND=76
typeset -g POWERLEVEL9K_VCS_UNTRACKED_FOREGROUND=76
typeset -g POWERLEVEL9K_VCS_MODIFIED_FOREGROUND=178
##########################[ status: exit code of the last command ]###########################
# Enable OK_PIPE, ERROR_PIPE and ERROR_SIGNAL status states to allow us to enable, disable and
# style them independently from the regular OK and ERROR state.
typeset -g POWERLEVEL9K_STATUS_EXTENDED_STATES=true
# Status on success. No content, just an icon. No need to show it if prompt_char is enabled as
# it will signify success by turning green.
typeset -g POWERLEVEL9K_STATUS_OK=false
typeset -g POWERLEVEL9K_STATUS_OK_FOREGROUND=70
typeset -g POWERLEVEL9K_STATUS_OK_VISUAL_IDENTIFIER_EXPANSION='✔'
# Status when some part of a pipe command fails but the overall exit status is zero. It may look
# like this: 1|0.
typeset -g POWERLEVEL9K_STATUS_OK_PIPE=true
typeset -g POWERLEVEL9K_STATUS_OK_PIPE_FOREGROUND=70
typeset -g POWERLEVEL9K_STATUS_OK_PIPE_VISUAL_IDENTIFIER_EXPANSION='✔'
# Status when it's just an error code (e.g., '1'). No need to show it if prompt_char is enabled as
# it will signify error by turning red.
typeset -g POWERLEVEL9K_STATUS_ERROR=false
typeset -g POWERLEVEL9K_STATUS_ERROR_FOREGROUND=160
typeset -g POWERLEVEL9K_STATUS_ERROR_VISUAL_IDENTIFIER_EXPANSION='✘'
# Status when the last command was terminated by a signal.
typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL=true
typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL_FOREGROUND=160
# Use terse signal names: "INT" instead of "SIGINT(2)".
typeset -g POWERLEVEL9K_STATUS_VERBOSE_SIGNAME=false
typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL_VISUAL_IDENTIFIER_EXPANSION='✘'
# Status when some part of a pipe command fails and the overall exit status is also non-zero.
# It may look like this: 1|0.
typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE=true
typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE_FOREGROUND=160
typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE_VISUAL_IDENTIFIER_EXPANSION='✘'
###################[ command_execution_time: duration of the last command ]###################
# Show duration of the last command if takes longer than this many seconds.
typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_THRESHOLD=3
# Show this many fractional digits. Zero means round to seconds.
typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_PRECISION=0
# Execution time color.
typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_FOREGROUND=248
# Duration format: 1d 2h 3m 4s.
typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_FORMAT='d h m s'
# Custom icon.
# typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_PREFIX='%246Ftook '
#######################[ background_jobs: presence of background jobs ]#######################
# Don't show the number of background jobs.
typeset -g POWERLEVEL9K_BACKGROUND_JOBS_VERBOSE=false
# Background jobs color.
typeset -g POWERLEVEL9K_BACKGROUND_JOBS_FOREGROUND=37
# Custom icon.
# typeset -g POWERLEVEL9K_BACKGROUND_JOBS_VISUAL_IDENTIFIER_EXPANSION='⭐'
#######################[ direnv: direnv status (https://direnv.net/) ]########################
# Direnv color.
typeset -g POWERLEVEL9K_DIRENV_FOREGROUND=178
# Custom icon.
# typeset -g POWERLEVEL9K_DIRENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
###############[ asdf: asdf version manager (https://github.com/asdf-vm/asdf) ]###############
# Default asdf color. Only used to display tools for which there is no color override (see below).
typeset -g POWERLEVEL9K_ASDF_FOREGROUND=66
# There are four parameters that can be used to hide asdf tools. Each parameter describes
# conditions under which a tool gets hidden. Parameters can hide tools but not unhide them. If at
# least one parameter decides to hide a tool, that tool gets hidden. If no parameter decides to
# hide a tool, it gets shown.
#
# Special note on the difference between POWERLEVEL9K_ASDF_SOURCES and
# POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW. Consider the effect of the following commands:
#
# asdf local python 3.8.1
# asdf global python 3.8.1
#
# After running both commands the current python version is 3.8.1 and its source is "local" as
# it takes precedence over "global". If POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW is set to false,
# it'll hide python version in this case because 3.8.1 is the same as the global version.
# POWERLEVEL9K_ASDF_SOURCES will hide python version only if the value of this parameter doesn't
# contain "local".
# Hide tool versions that don't come from one of these sources.
#
# Available sources:
#
# - shell `asdf current` says "set by ASDF_${TOOL}_VERSION environment variable"
# - local `asdf current` says "set by /some/not/home/directory/file"
# - global `asdf current` says "set by /home/username/file"
#
# Note: If this parameter is set to (shell local global), it won't hide tools.
# Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SOURCES.
typeset -g POWERLEVEL9K_ASDF_SOURCES=(shell local global)
# If set to false, hide tool versions that are the same as global.
#
# Note: The name of this parameter doesn't reflect its meaning at all.
# Note: If this parameter is set to true, it won't hide tools.
# Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_PROMPT_ALWAYS_SHOW.
typeset -g POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW=false
# If set to false, hide tool versions that are equal to "system".
#
# Note: If this parameter is set to true, it won't hide tools.
# Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SHOW_SYSTEM.
typeset -g POWERLEVEL9K_ASDF_SHOW_SYSTEM=true
# If set to non-empty value, hide tools unless there is a file matching the specified file pattern
# in the current directory, or its parent diretory, or its grandparent directory, and so on.
#
# Note: If this parameter is set to empty value, it won't hide tools.
# Note: SHOW_ON_UPGLOB isn't specific to asdf. It works with all prompt segments.
# Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SHOW_ON_UPGLOB.
#
# Example: Hide nodejs version when there is no package.json and no *.js files in the current
# directory, in `..`, in `../..` and so on.
#
# typeset -g POWERLEVEL9K_ASDF_NODEJS_SHOW_ON_UPGLOB='*.js|package.json'
typeset -g POWERLEVEL9K_ASDF_SHOW_ON_UPGLOB=
# Ruby version from asdf.
typeset -g POWERLEVEL9K_ASDF_RUBY_FOREGROUND=168
# typeset -g POWERLEVEL9K_ASDF_RUBY_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_RUBY_SHOW_ON_UPGLOB='*.foo|*.bar'
# Python version from asdf.
typeset -g POWERLEVEL9K_ASDF_PYTHON_FOREGROUND=37
# typeset -g POWERLEVEL9K_ASDF_PYTHON_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_PYTHON_SHOW_ON_UPGLOB='*.foo|*.bar'
# Go version from asdf.
typeset -g POWERLEVEL9K_ASDF_GOLANG_FOREGROUND=37
# typeset -g POWERLEVEL9K_ASDF_GOLANG_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_GOLANG_SHOW_ON_UPGLOB='*.foo|*.bar'
# Node.js version from asdf.
typeset -g POWERLEVEL9K_ASDF_NODEJS_FOREGROUND=70
# typeset -g POWERLEVEL9K_ASDF_NODEJS_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_NODEJS_SHOW_ON_UPGLOB='*.foo|*.bar'
# Rust version from asdf.
typeset -g POWERLEVEL9K_ASDF_RUST_FOREGROUND=37
# typeset -g POWERLEVEL9K_ASDF_RUST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_RUST_SHOW_ON_UPGLOB='*.foo|*.bar'
# .NET Core version from asdf.
typeset -g POWERLEVEL9K_ASDF_DOTNET_CORE_FOREGROUND=134
# typeset -g POWERLEVEL9K_ASDF_DOTNET_CORE_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_DOTNET_CORE_SHOW_ON_UPGLOB='*.foo|*.bar'
# Flutter version from asdf.
typeset -g POWERLEVEL9K_ASDF_FLUTTER_FOREGROUND=38
# typeset -g POWERLEVEL9K_ASDF_FLUTTER_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_FLUTTER_SHOW_ON_UPGLOB='*.foo|*.bar'
# Lua version from asdf.
typeset -g POWERLEVEL9K_ASDF_LUA_FOREGROUND=32
# typeset -g POWERLEVEL9K_ASDF_LUA_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_LUA_SHOW_ON_UPGLOB='*.foo|*.bar'
# Java version from asdf.
typeset -g POWERLEVEL9K_ASDF_JAVA_FOREGROUND=32
# typeset -g POWERLEVEL9K_ASDF_JAVA_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_JAVA_SHOW_ON_UPGLOB='*.foo|*.bar'
# Perl version from asdf.
typeset -g POWERLEVEL9K_ASDF_PERL_FOREGROUND=67
# typeset -g POWERLEVEL9K_ASDF_PERL_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_PERL_SHOW_ON_UPGLOB='*.foo|*.bar'
# Erlang version from asdf.
typeset -g POWERLEVEL9K_ASDF_ERLANG_FOREGROUND=125
# typeset -g POWERLEVEL9K_ASDF_ERLANG_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_ERLANG_SHOW_ON_UPGLOB='*.foo|*.bar'
# Elixir version from asdf.
typeset -g POWERLEVEL9K_ASDF_ELIXIR_FOREGROUND=129
# typeset -g POWERLEVEL9K_ASDF_ELIXIR_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_ELIXIR_SHOW_ON_UPGLOB='*.foo|*.bar'
# Postgres version from asdf.
typeset -g POWERLEVEL9K_ASDF_POSTGRES_FOREGROUND=31
# typeset -g POWERLEVEL9K_ASDF_POSTGRES_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_POSTGRES_SHOW_ON_UPGLOB='*.foo|*.bar'
# PHP version from asdf.
typeset -g POWERLEVEL9K_ASDF_PHP_FOREGROUND=99
# typeset -g POWERLEVEL9K_ASDF_PHP_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_PHP_SHOW_ON_UPGLOB='*.foo|*.bar'
# Haskell version from asdf.
typeset -g POWERLEVEL9K_ASDF_HASKELL_FOREGROUND=172
# typeset -g POWERLEVEL9K_ASDF_HASKELL_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_HASKELL_SHOW_ON_UPGLOB='*.foo|*.bar'
##########[ nordvpn: nordvpn connection status, linux only (https://nordvpn.com/) ]###########
# NordVPN connection indicator color.
typeset -g POWERLEVEL9K_NORDVPN_FOREGROUND=39
# Hide NordVPN connection indicator when not connected.
typeset -g POWERLEVEL9K_NORDVPN_{DISCONNECTED,CONNECTING,DISCONNECTING}_CONTENT_EXPANSION=
typeset -g POWERLEVEL9K_NORDVPN_{DISCONNECTED,CONNECTING,DISCONNECTING}_VISUAL_IDENTIFIER_EXPANSION=
# Custom icon.
# typeset -g POWERLEVEL9K_NORDVPN_VISUAL_IDENTIFIER_EXPANSION='⭐'
#################[ ranger: ranger shell (https://github.com/ranger/ranger) ]##################
# Ranger shell color.
typeset -g POWERLEVEL9K_RANGER_FOREGROUND=178
# Custom icon.
# typeset -g POWERLEVEL9K_RANGER_VISUAL_IDENTIFIER_EXPANSION='⭐'
######################[ nnn: nnn shell (https://github.com/jarun/nnn) ]#######################
# Nnn shell color.
typeset -g POWERLEVEL9K_NNN_FOREGROUND=72
# Custom icon.
# typeset -g POWERLEVEL9K_NNN_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########################[ vim_shell: vim shell indicator (:sh) ]###########################
# Vim shell indicator color.
typeset -g POWERLEVEL9K_VIM_SHELL_FOREGROUND=34
# Custom icon.
# typeset -g POWERLEVEL9K_VIM_SHELL_VISUAL_IDENTIFIER_EXPANSION='⭐'
######[ midnight_commander: midnight commander shell (https://midnight-commander.org/) ]######
# Midnight Commander shell color.
typeset -g POWERLEVEL9K_MIDNIGHT_COMMANDER_FOREGROUND=178
# Custom icon.
# typeset -g POWERLEVEL9K_MIDNIGHT_COMMANDER_VISUAL_IDENTIFIER_EXPANSION='⭐'
#[ nix_shell: nix shell (https://nixos.org/nixos/nix-pills/developing-with-nix-shell.html) ]##
# Nix shell color.
typeset -g POWERLEVEL9K_NIX_SHELL_FOREGROUND=74
# Tip: If you want to see just the icon without "pure" and "impure", uncomment the next line.
# typeset -g POWERLEVEL9K_NIX_SHELL_CONTENT_EXPANSION=
# Custom icon.
# typeset -g POWERLEVEL9K_NIX_SHELL_VISUAL_IDENTIFIER_EXPANSION='⭐'
##################################[ disk_usage: disk usage ]##################################
# Colors for different levels of disk usage.
typeset -g POWERLEVEL9K_DISK_USAGE_NORMAL_FOREGROUND=35
typeset -g POWERLEVEL9K_DISK_USAGE_WARNING_FOREGROUND=220
typeset -g POWERLEVEL9K_DISK_USAGE_CRITICAL_FOREGROUND=160
# Thresholds for different levels of disk usage (percentage points).
typeset -g POWERLEVEL9K_DISK_USAGE_WARNING_LEVEL=90
typeset -g POWERLEVEL9K_DISK_USAGE_CRITICAL_LEVEL=95
# If set to true, hide disk usage when below $POWERLEVEL9K_DISK_USAGE_WARNING_LEVEL percent.
typeset -g POWERLEVEL9K_DISK_USAGE_ONLY_WARNING=false
# Custom icon.
# typeset -g POWERLEVEL9K_DISK_USAGE_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ vi_mode: vi mode (you don't need this if you've enabled prompt_char) ]###########
# Text and color for normal (a.k.a. command) vi mode.
typeset -g POWERLEVEL9K_VI_COMMAND_MODE_STRING=NORMAL
typeset -g POWERLEVEL9K_VI_MODE_NORMAL_FOREGROUND=106
# Text and color for visual vi mode.
typeset -g POWERLEVEL9K_VI_VISUAL_MODE_STRING=VISUAL
typeset -g POWERLEVEL9K_VI_MODE_VISUAL_FOREGROUND=68
# Text and color for overtype (a.k.a. overwrite and replace) vi mode.
typeset -g POWERLEVEL9K_VI_OVERWRITE_MODE_STRING=OVERTYPE
typeset -g POWERLEVEL9K_VI_MODE_OVERWRITE_FOREGROUND=172
# Text and color for insert vi mode.
typeset -g POWERLEVEL9K_VI_INSERT_MODE_STRING=
typeset -g POWERLEVEL9K_VI_MODE_INSERT_FOREGROUND=66
# Custom icon.
# typeset -g POWERLEVEL9K_RANGER_VISUAL_IDENTIFIER_EXPANSION='⭐'
######################################[ ram: free RAM ]#######################################
# RAM color.
typeset -g POWERLEVEL9K_RAM_FOREGROUND=66
# Custom icon.
# typeset -g POWERLEVEL9K_RAM_VISUAL_IDENTIFIER_EXPANSION='⭐'
#####################################[ swap: used swap ]######################################
# Swap color.
typeset -g POWERLEVEL9K_SWAP_FOREGROUND=96
# Custom icon.
# typeset -g POWERLEVEL9K_SWAP_VISUAL_IDENTIFIER_EXPANSION='⭐'
######################################[ load: CPU load ]######################################
# Show average CPU load over this many last minutes. Valid values are 1, 5 and 15.
typeset -g POWERLEVEL9K_LOAD_WHICH=5
# Load color when load is under 50%.
typeset -g POWERLEVEL9K_LOAD_NORMAL_FOREGROUND=66
# Load color when load is between 50% and 70%.
typeset -g POWERLEVEL9K_LOAD_WARNING_FOREGROUND=178
# Load color when load is over 70%.
typeset -g POWERLEVEL9K_LOAD_CRITICAL_FOREGROUND=166
# Custom icon.
# typeset -g POWERLEVEL9K_LOAD_VISUAL_IDENTIFIER_EXPANSION='⭐'
################[ todo: todo items (https://github.com/todotxt/todo.txt-cli) ]################
# Todo color.
typeset -g POWERLEVEL9K_TODO_FOREGROUND=110
# Hide todo when the total number of tasks is zero.
typeset -g POWERLEVEL9K_TODO_HIDE_ZERO_TOTAL=true
# Hide todo when the number of tasks after filtering is zero.
typeset -g POWERLEVEL9K_TODO_HIDE_ZERO_FILTERED=false
# Todo format. The following parameters are available within the expansion.
#
# - P9K_TODO_TOTAL_TASK_COUNT The total number of tasks.
# - P9K_TODO_FILTERED_TASK_COUNT The number of tasks after filtering.
#
# These variables correspond to the last line of the output of `todo.sh -p ls`:
#
# TODO: 24 of 42 tasks shown
#
# Here 24 is P9K_TODO_FILTERED_TASK_COUNT and 42 is P9K_TODO_TOTAL_TASK_COUNT.
#
# typeset -g POWERLEVEL9K_TODO_CONTENT_EXPANSION='$P9K_TODO_FILTERED_TASK_COUNT'
# Custom icon.
# typeset -g POWERLEVEL9K_TODO_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ timewarrior: timewarrior tracking status (https://timewarrior.net/) ]############
# Timewarrior color.
typeset -g POWERLEVEL9K_TIMEWARRIOR_FOREGROUND=110
# If the tracked task is longer than 24 characters, truncate and append "…".
# Tip: To always display tasks without truncation, delete the following parameter.
# Tip: To hide task names and display just the icon when time tracking is enabled, set the
# value of the following parameter to "".
typeset -g POWERLEVEL9K_TIMEWARRIOR_CONTENT_EXPANSION='${P9K_CONTENT:0:24}${${P9K_CONTENT:24}:+…}'
# Custom icon.
# typeset -g POWERLEVEL9K_TIMEWARRIOR_VISUAL_IDENTIFIER_EXPANSION='⭐'
##############[ taskwarrior: taskwarrior task count (https://taskwarrior.org/) ]##############
# Taskwarrior color.
typeset -g POWERLEVEL9K_TASKWARRIOR_FOREGROUND=74
# Taskwarrior segment format. The following parameters are available within the expansion.
#
# - P9K_TASKWARRIOR_PENDING_COUNT The number of pending tasks: `task +PENDING count`.
# - P9K_TASKWARRIOR_OVERDUE_COUNT The number of overdue tasks: `task +OVERDUE count`.
#
# Zero values are represented as empty parameters.
#
# The default format:
#
# '${P9K_TASKWARRIOR_OVERDUE_COUNT:+"!$P9K_TASKWARRIOR_OVERDUE_COUNT/"}$P9K_TASKWARRIOR_PENDING_COUNT'
#
# typeset -g POWERLEVEL9K_TASKWARRIOR_CONTENT_EXPANSION='$P9K_TASKWARRIOR_PENDING_COUNT'
# Custom icon.
# typeset -g POWERLEVEL9K_TASKWARRIOR_VISUAL_IDENTIFIER_EXPANSION='⭐'
##################################[ context: user@hostname ]##################################
# Context color when running with privileges.
typeset -g POWERLEVEL9K_CONTEXT_ROOT_FOREGROUND=178
# Context color in SSH without privileges.
typeset -g POWERLEVEL9K_CONTEXT_{REMOTE,REMOTE_SUDO}_FOREGROUND=180
# Default context color (no privileges, no SSH).
typeset -g POWERLEVEL9K_CONTEXT_FOREGROUND=180
# Context format when running with privileges: bold user@hostname.
typeset -g POWERLEVEL9K_CONTEXT_ROOT_TEMPLATE='%B%n@%m'
# Context format when in SSH without privileges: user@hostname.
typeset -g POWERLEVEL9K_CONTEXT_{REMOTE,REMOTE_SUDO}_TEMPLATE='%n@%m'
# Default context format (no privileges, no SSH): user@hostname.
typeset -g POWERLEVEL9K_CONTEXT_TEMPLATE='%n@%m'
# Don't show context unless running with privileges or in SSH.
# Tip: Remove the next line to always show context.
#typeset -g POWERLEVEL9K_CONTEXT_{DEFAULT,SUDO}_{CONTENT,VISUAL_IDENTIFIER}_EXPANSION=
# Custom icon.
# typeset -g POWERLEVEL9K_CONTEXT_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_CONTEXT_PREFIX='%246Fwith '
###[ virtualenv: python virtual environment (https://docs.python.org/3/library/venv.html) ]###
# Python virtual environment color.
typeset -g POWERLEVEL9K_VIRTUALENV_FOREGROUND=37
# Don't show Python version next to the virtual environment name.
typeset -g POWERLEVEL9K_VIRTUALENV_SHOW_PYTHON_VERSION=false
# Don't show virtualenv if pyenv is already shown.
typeset -g POWERLEVEL9K_VIRTUALENV_SHOW_WITH_PYENV=false
# Separate environment name from Python version only with a space.
typeset -g POWERLEVEL9K_VIRTUALENV_{LEFT,RIGHT}_DELIMITER=
# Custom icon.
# typeset -g POWERLEVEL9K_VIRTUALENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
#####################[ anaconda: conda environment (https://conda.io/) ]######################
# Anaconda environment color.
typeset -g POWERLEVEL9K_ANACONDA_FOREGROUND=37
# Don't show Python version next to the anaconda environment name.
typeset -g POWERLEVEL9K_ANACONDA_SHOW_PYTHON_VERSION=false
# Separate environment name from Python version only with a space.
typeset -g POWERLEVEL9K_ANACONDA_{LEFT,RIGHT}_DELIMITER=
# Custom icon.
# typeset -g POWERLEVEL9K_ANACONDA_VISUAL_IDENTIFIER_EXPANSION='⭐'
################[ pyenv: python environment (https://github.com/pyenv/pyenv) ]################
# Pyenv color.
typeset -g POWERLEVEL9K_PYENV_FOREGROUND=37
# Hide python version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_PYENV_SOURCES=(shell local global)
# If set to false, hide python version if it's the same as global:
# $(pyenv version-name) == $(pyenv global).
typeset -g POWERLEVEL9K_PYENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide python version if it's equal to "system".
typeset -g POWERLEVEL9K_PYENV_SHOW_SYSTEM=true
# Pyenv segment format. The following parameters are available within the expansion.
#
# - P9K_CONTENT Current pyenv environment (pyenv version-name).
# - P9K_PYENV_PYTHON_VERSION Current python version (python --version).
#
# The default format has the following logic:
#
# 1. Display "$P9K_CONTENT $P9K_PYENV_PYTHON_VERSION" if $P9K_PYENV_PYTHON_VERSION is not
# empty and unequal to $P9K_CONTENT.
# 2. Otherwise display just "$P9K_CONTENT".
typeset -g POWERLEVEL9K_PYENV_CONTENT_EXPANSION='${P9K_CONTENT}${${P9K_PYENV_PYTHON_VERSION:#$P9K_CONTENT}:+ $P9K_PYENV_PYTHON_VERSION}'
# Custom icon.
# typeset -g POWERLEVEL9K_PYENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
################[ goenv: go environment (https://github.com/syndbg/goenv) ]################
# Goenv color.
typeset -g POWERLEVEL9K_GOENV_FOREGROUND=37
# Hide go version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_GOENV_SOURCES=(shell local global)
# If set to false, hide go version if it's the same as global:
# $(goenv version-name) == $(goenv global).
typeset -g POWERLEVEL9K_GOENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide go version if it's equal to "system".
typeset -g POWERLEVEL9K_GOENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_GOENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ nodenv: node.js version from nodenv (https://github.com/nodenv/nodenv) ]##########
# Nodenv color.
typeset -g POWERLEVEL9K_NODENV_FOREGROUND=70
# Hide node version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_NODENV_SOURCES=(shell local global)
# If set to false, hide node version if it's the same as global:
# $(nodenv version-name) == $(nodenv global).
typeset -g POWERLEVEL9K_NODENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide node version if it's equal to "system".
typeset -g POWERLEVEL9K_NODENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_NODENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##############[ nvm: node.js version from nvm (https://github.com/nvm-sh/nvm) ]###############
# Nvm color.
typeset -g POWERLEVEL9K_NVM_FOREGROUND=70
# Custom icon.
# typeset -g POWERLEVEL9K_NVM_VISUAL_IDENTIFIER_EXPANSION='⭐'
############[ nodeenv: node.js environment (https://github.com/ekalinin/nodeenv) ]############
# Nodeenv color.
typeset -g POWERLEVEL9K_NODEENV_FOREGROUND=70
# Don't show Node version next to the environment name.
typeset -g POWERLEVEL9K_NODEENV_SHOW_NODE_VERSION=false
# Separate environment name from Node version only with a space.
typeset -g POWERLEVEL9K_NODEENV_{LEFT,RIGHT}_DELIMITER=
# Custom icon.
# typeset -g POWERLEVEL9K_NODEENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##############################[ node_version: node.js version ]###############################
# Node version color.
typeset -g POWERLEVEL9K_NODE_VERSION_FOREGROUND=70
# Show node version only when in a directory tree containing package.json.
typeset -g POWERLEVEL9K_NODE_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_NODE_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
#######################[ go_version: go version (https://golang.org) ]########################
# Go version color.
typeset -g POWERLEVEL9K_GO_VERSION_FOREGROUND=37
# Show go version only when in a go project subdirectory.
typeset -g POWERLEVEL9K_GO_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_GO_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
#################[ rust_version: rustc version (https://www.rust-lang.org) ]##################
# Rust version color.
typeset -g POWERLEVEL9K_RUST_VERSION_FOREGROUND=37
# Show rust version only when in a rust project subdirectory.
typeset -g POWERLEVEL9K_RUST_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_RUST_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
###############[ dotnet_version: .NET version (https://dotnet.microsoft.com) ]################
# .NET version color.
typeset -g POWERLEVEL9K_DOTNET_VERSION_FOREGROUND=134
# Show .NET version only when in a .NET project subdirectory.
typeset -g POWERLEVEL9K_DOTNET_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_DOTNET_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
#####################[ php_version: php version (https://www.php.net/) ]######################
# PHP version color.
typeset -g POWERLEVEL9K_PHP_VERSION_FOREGROUND=99
# Show PHP version only when in a PHP project subdirectory.
typeset -g POWERLEVEL9K_PHP_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_PHP_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ laravel_version: laravel php framework version (https://laravel.com/) ]###########
# Laravel version color.
typeset -g POWERLEVEL9K_LARAVEL_VERSION_FOREGROUND=161
# Custom icon.
# typeset -g POWERLEVEL9K_LARAVEL_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
####################[ java_version: java version (https://www.java.com/) ]####################
# Java version color.
typeset -g POWERLEVEL9K_JAVA_VERSION_FOREGROUND=32
# Show java version only when in a java project subdirectory.
typeset -g POWERLEVEL9K_JAVA_VERSION_PROJECT_ONLY=true
# Show brief version.
typeset -g POWERLEVEL9K_JAVA_VERSION_FULL=false
# Custom icon.
# typeset -g POWERLEVEL9K_JAVA_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
###[ package: name@version from package.json (https://docs.npmjs.com/files/package.json) ]####
# Package color.
typeset -g POWERLEVEL9K_PACKAGE_FOREGROUND=117
# Package format. The following parameters are available within the expansion.
#
# - P9K_PACKAGE_NAME The value of `name` field in package.json.
# - P9K_PACKAGE_VERSION The value of `version` field in package.json.
#
# typeset -g POWERLEVEL9K_PACKAGE_CONTENT_EXPANSION='${P9K_PACKAGE_NAME//\%/%%}@${P9K_PACKAGE_VERSION//\%/%%}'
# Custom icon.
# typeset -g POWERLEVEL9K_PACKAGE_VISUAL_IDENTIFIER_EXPANSION='⭐'
#############[ rbenv: ruby version from rbenv (https://github.com/rbenv/rbenv) ]##############
# Rbenv color.
typeset -g POWERLEVEL9K_RBENV_FOREGROUND=168
# Hide ruby version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_RBENV_SOURCES=(shell local global)
# If set to false, hide ruby version if it's the same as global:
# $(rbenv version-name) == $(rbenv global).
typeset -g POWERLEVEL9K_RBENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide ruby version if it's equal to "system".
typeset -g POWERLEVEL9K_RBENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_RBENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
#######################[ rvm: ruby version from rvm (https://rvm.io) ]########################
# Rvm color.
typeset -g POWERLEVEL9K_RVM_FOREGROUND=168
# Don't show @gemset at the end.
typeset -g POWERLEVEL9K_RVM_SHOW_GEMSET=false
# Don't show ruby- at the front.
typeset -g POWERLEVEL9K_RVM_SHOW_PREFIX=false
# Custom icon.
# typeset -g POWERLEVEL9K_RVM_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ fvm: flutter version management (https://github.com/leoafarias/fvm) ]############
# Fvm color.
typeset -g POWERLEVEL9K_FVM_FOREGROUND=38
# Custom icon.
# typeset -g POWERLEVEL9K_FVM_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ luaenv: lua version from luaenv (https://github.com/cehoffman/luaenv) ]###########
# Lua color.
typeset -g POWERLEVEL9K_LUAENV_FOREGROUND=32
# Hide lua version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_LUAENV_SOURCES=(shell local global)
# If set to false, hide lua version if it's the same as global:
# $(luaenv version-name) == $(luaenv global).
typeset -g POWERLEVEL9K_LUAENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide lua version if it's equal to "system".
typeset -g POWERLEVEL9K_LUAENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_LUAENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
###############[ jenv: java version from jenv (https://github.com/jenv/jenv) ]################
# Java color.
typeset -g POWERLEVEL9K_JENV_FOREGROUND=32
# Hide java version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_JENV_SOURCES=(shell local global)
# If set to false, hide java version if it's the same as global:
# $(jenv version-name) == $(jenv global).
typeset -g POWERLEVEL9K_JENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide java version if it's equal to "system".
typeset -g POWERLEVEL9K_JENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_JENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ plenv: perl version from plenv (https://github.com/tokuhirom/plenv) ]############
# Perl color.
typeset -g POWERLEVEL9K_PLENV_FOREGROUND=67
# Hide perl version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_PLENV_SOURCES=(shell local global)
# If set to false, hide perl version if it's the same as global:
# $(plenv version-name) == $(plenv global).
typeset -g POWERLEVEL9K_PLENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide perl version if it's equal to "system".
typeset -g POWERLEVEL9K_PLENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_PLENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
############[ phpenv: php version from phpenv (https://github.com/phpenv/phpenv) ]############
# PHP color.
typeset -g POWERLEVEL9K_PHPENV_FOREGROUND=99
# Hide php version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_PHPENV_SOURCES=(shell local global)
# If set to false, hide php version if it's the same as global:
# $(phpenv version-name) == $(phpenv global).
typeset -g POWERLEVEL9K_PHPENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide php version if it's equal to "system".
typeset -g POWERLEVEL9K_PHPENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_PHPENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ haskell_stack: haskell version from stack (https://haskellstack.org/) ]###########
# Haskell color.
typeset -g POWERLEVEL9K_HASKELL_STACK_FOREGROUND=172
# Hide haskell version if it doesn't come from one of these sources.
#
# shell: version is set by STACK_YAML
# local: version is set by stack.yaml up the directory tree
# global: version is set by the implicit global project (~/.stack/global-project/stack.yaml)
typeset -g POWERLEVEL9K_HASKELL_STACK_SOURCES=(shell local)
# If set to false, hide haskell version if it's the same as in the implicit global project.
typeset -g POWERLEVEL9K_HASKELL_STACK_ALWAYS_SHOW=true
# Custom icon.
# typeset -g POWERLEVEL9K_HASKELL_STACK_VISUAL_IDENTIFIER_EXPANSION='⭐'
################[ terraform: terraform workspace (https://www.terraform.io) ]#################
# POWERLEVEL9K_TERRAFORM_CLASSES is an array with even number of elements. The first element
# in each pair defines a pattern against which the current terraform workspace gets matched.
# More specifically, it's P9K_CONTENT prior to the application of context expansion (see below)
# that gets matched. If you unset all POWERLEVEL9K_TERRAFORM_*CONTENT_EXPANSION parameters,
# you'll see this value in your prompt. The second element of each pair in
# POWERLEVEL9K_TERRAFORM_CLASSES defines the workspace class. Patterns are tried in order. The
# first match wins.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_TERRAFORM_CLASSES=(
# '*prod*' PROD
# '*test*' TEST
# '*' DEFAULT)
#
# If your current terraform workspace is "project_test", its class is TEST because "project_test"
# doesn't match the pattern '*prod*' but does match '*test*'.
#
# You can define different colors, icons and content expansions for different classes:
#
# typeset -g POWERLEVEL9K_TERRAFORM_TEST_FOREGROUND=28
# typeset -g POWERLEVEL9K_TERRAFORM_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_TERRAFORM_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <'
typeset -g POWERLEVEL9K_TERRAFORM_CLASSES=(
# '*prod*' PROD # These values are examples that are unlikely
# '*test*' TEST # to match your needs. Customize them as needed.
'*' DEFAULT)
typeset -g POWERLEVEL9K_TERRAFORM_DEFAULT_FOREGROUND=38
# typeset -g POWERLEVEL9K_TERRAFORM_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐'
#############[ kubecontext: current kubernetes context (https://kubernetes.io/) ]#############
# Show kubecontext only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show kubecontext.
typeset -g POWERLEVEL9K_KUBECONTEXT_SHOW_ON_COMMAND='kubectl|helm|kubens|kubectx|oc|istioctl|kogito'
# Kubernetes context classes for the purpose of using different colors, icons and expansions with
# different contexts.
#
# POWERLEVEL9K_KUBECONTEXT_CLASSES is an array with even number of elements. The first element
# in each pair defines a pattern against which the current kubernetes context gets matched.
# More specifically, it's P9K_CONTENT prior to the application of context expansion (see below)
# that gets matched. If you unset all POWERLEVEL9K_KUBECONTEXT_*CONTENT_EXPANSION parameters,
# you'll see this value in your prompt. The second element of each pair in
# POWERLEVEL9K_KUBECONTEXT_CLASSES defines the context class. Patterns are tried in order. The
# first match wins.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_KUBECONTEXT_CLASSES=(
# '*prod*' PROD
# '*test*' TEST
# '*' DEFAULT)
#
# If your current kubernetes context is "deathray-testing/default", its class is TEST
# because "deathray-testing/default" doesn't match the pattern '*prod*' but does match '*test*'.
#
# You can define different colors, icons and content expansions for different classes:
#
# typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_FOREGROUND=28
# typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <'
typeset -g POWERLEVEL9K_KUBECONTEXT_CLASSES=(
# '*prod*' PROD # These values are examples that are unlikely
# '*test*' TEST # to match your needs. Customize them as needed.
'*' DEFAULT)
typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_FOREGROUND=134
# typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Use POWERLEVEL9K_KUBECONTEXT_CONTENT_EXPANSION to specify the content displayed by kubecontext
# segment. Parameter expansions are very flexible and fast, too. See reference:
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion.
#
# Within the expansion the following parameters are always available:
#
# - P9K_CONTENT The content that would've been displayed if there was no content
# expansion defined.
# - P9K_KUBECONTEXT_NAME The current context's name. Corresponds to column NAME in the
# output of `kubectl config get-contexts`.
# - P9K_KUBECONTEXT_CLUSTER The current context's cluster. Corresponds to column CLUSTER in the
# output of `kubectl config get-contexts`.
# - P9K_KUBECONTEXT_NAMESPACE The current context's namespace. Corresponds to column NAMESPACE
# in the output of `kubectl config get-contexts`. If there is no
# namespace, the parameter is set to "default".
# - P9K_KUBECONTEXT_USER The current context's user. Corresponds to column AUTHINFO in the
# output of `kubectl config get-contexts`.
#
# If the context points to Google Kubernetes Engine (GKE) or Elastic Kubernetes Service (EKS),
# the following extra parameters are available:
#
# - P9K_KUBECONTEXT_CLOUD_NAME Either "gke" or "eks".
# - P9K_KUBECONTEXT_CLOUD_ACCOUNT Account/project ID.
# - P9K_KUBECONTEXT_CLOUD_ZONE Availability zone.
# - P9K_KUBECONTEXT_CLOUD_CLUSTER Cluster.
#
# P9K_KUBECONTEXT_CLOUD_* parameters are derived from P9K_KUBECONTEXT_CLUSTER. For example,
# if P9K_KUBECONTEXT_CLUSTER is "gke_my-account_us-east1-a_my-cluster-01":
#
# - P9K_KUBECONTEXT_CLOUD_NAME=gke
# - P9K_KUBECONTEXT_CLOUD_ACCOUNT=my-account
# - P9K_KUBECONTEXT_CLOUD_ZONE=us-east1-a
# - P9K_KUBECONTEXT_CLOUD_CLUSTER=my-cluster-01
#
# If P9K_KUBECONTEXT_CLUSTER is "arn:aws:eks:us-east-1:123456789012:cluster/my-cluster-01":
#
# - P9K_KUBECONTEXT_CLOUD_NAME=eks
# - P9K_KUBECONTEXT_CLOUD_ACCOUNT=123456789012
# - P9K_KUBECONTEXT_CLOUD_ZONE=us-east-1
# - P9K_KUBECONTEXT_CLOUD_CLUSTER=my-cluster-01
typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION=
# Show P9K_KUBECONTEXT_CLOUD_CLUSTER if it's not empty and fall back to P9K_KUBECONTEXT_NAME.
POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION+='${P9K_KUBECONTEXT_CLOUD_CLUSTER:-${P9K_KUBECONTEXT_NAME}}'
# Append the current context's namespace if it's not "default".
POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION+='${${:-/$P9K_KUBECONTEXT_NAMESPACE}:#/default}'
# Custom prefix.
# typeset -g POWERLEVEL9K_KUBECONTEXT_PREFIX='%246Fat '
#[ aws: aws profile (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) ]#
# Show aws only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show aws.
typeset -g POWERLEVEL9K_AWS_SHOW_ON_COMMAND='aws|awless|terraform|pulumi'
# POWERLEVEL9K_AWS_CLASSES is an array with even number of elements. The first element
# in each pair defines a pattern against which the current AWS profile gets matched.
# More specifically, it's P9K_CONTENT prior to the application of context expansion (see below)
# that gets matched. If you unset all POWERLEVEL9K_AWS_*CONTENT_EXPANSION parameters,
# you'll see this value in your prompt. The second element of each pair in
# POWERLEVEL9K_AWS_CLASSES defines the profile class. Patterns are tried in order. The
# first match wins.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_AWS_CLASSES=(
# '*prod*' PROD
# '*test*' TEST
# '*' DEFAULT)
#
# If your current AWS profile is "company_test", its class is TEST
# because "company_test" doesn't match the pattern '*prod*' but does match '*test*'.
#
# You can define different colors, icons and content expansions for different classes:
#
# typeset -g POWERLEVEL9K_AWS_TEST_FOREGROUND=28
# typeset -g POWERLEVEL9K_AWS_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_AWS_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <'
typeset -g POWERLEVEL9K_AWS_CLASSES=(
# '*prod*' PROD # These values are examples that are unlikely
# '*test*' TEST # to match your needs. Customize them as needed.
'*' DEFAULT)
typeset -g POWERLEVEL9K_AWS_DEFAULT_FOREGROUND=208
# typeset -g POWERLEVEL9K_AWS_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐'
#[ aws_eb_env: aws elastic beanstalk environment (https://aws.amazon.com/elasticbeanstalk/) ]#
# AWS Elastic Beanstalk environment color.
typeset -g POWERLEVEL9K_AWS_EB_ENV_FOREGROUND=70
# Custom icon.
# typeset -g POWERLEVEL9K_AWS_EB_ENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ azure: azure account name (https://docs.microsoft.com/en-us/cli/azure) ]##########
# Show azure only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show azure.
typeset -g POWERLEVEL9K_AZURE_SHOW_ON_COMMAND='az|terraform|pulumi'
# Azure account name color.
typeset -g POWERLEVEL9K_AZURE_FOREGROUND=32
# Custom icon.
# typeset -g POWERLEVEL9K_AZURE_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ gcloud: google cloud account and project (https://cloud.google.com/) ]###########
# Show gcloud only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show gcloud.
typeset -g POWERLEVEL9K_GCLOUD_SHOW_ON_COMMAND='gcloud|gcs'
# Google cloud color.
typeset -g POWERLEVEL9K_GCLOUD_FOREGROUND=32
# Google cloud format. Change the value of POWERLEVEL9K_GCLOUD_PARTIAL_CONTENT_EXPANSION and/or
# POWERLEVEL9K_GCLOUD_COMPLETE_CONTENT_EXPANSION if the default is too verbose or not informative
# enough. You can use the following parameters in the expansions. Each of them corresponds to the
# output of `gcloud` tool.
#
# Parameter | Source
# -------------------------|--------------------------------------------------------------------
# P9K_GCLOUD_CONFIGURATION | gcloud config configurations list --format='value(name)'
# P9K_GCLOUD_ACCOUNT | gcloud config get-value account
# P9K_GCLOUD_PROJECT_ID | gcloud config get-value project
# P9K_GCLOUD_PROJECT_NAME | gcloud projects describe $P9K_GCLOUD_PROJECT_ID --format='value(name)'
#
# Note: ${VARIABLE//\%/%%} expands to ${VARIABLE} with all occurences of '%' replaced with '%%'.
#
# Obtaining project name requires sending a request to Google servers. This can take a long time
# and even fail. When project name is unknown, P9K_GCLOUD_PROJECT_NAME is not set and gcloud
# prompt segment is in state PARTIAL. When project name gets known, P9K_GCLOUD_PROJECT_NAME gets
# set and gcloud prompt segment transitions to state COMPLETE.
#
# You can customize the format, icon and colors of gcloud segment separately for states PARTIAL
# and COMPLETE. You can also hide gcloud in state PARTIAL by setting
# POWERLEVEL9K_GCLOUD_PARTIAL_VISUAL_IDENTIFIER_EXPANSION and
# POWERLEVEL9K_GCLOUD_PARTIAL_CONTENT_EXPANSION to empty.
typeset -g POWERLEVEL9K_GCLOUD_PARTIAL_CONTENT_EXPANSION='${P9K_GCLOUD_PROJECT_ID//\%/%%}'
typeset -g POWERLEVEL9K_GCLOUD_COMPLETE_CONTENT_EXPANSION='${P9K_GCLOUD_PROJECT_NAME//\%/%%}'
# Send a request to Google (by means of `gcloud projects describe ...`) to obtain project name
# this often. Negative value disables periodic polling. In this mode project name is retrieved
# only when the current configuration, account or project id changes.
typeset -g POWERLEVEL9K_GCLOUD_REFRESH_PROJECT_NAME_SECONDS=60
# Custom icon.
# typeset -g POWERLEVEL9K_GCLOUD_VISUAL_IDENTIFIER_EXPANSION='⭐'
#[ google_app_cred: google application credentials (https://cloud.google.com/docs/authentication/production) ]#
# Show google_app_cred only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show google_app_cred.
typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_SHOW_ON_COMMAND='terraform|pulumi'
# Google application credentials classes for the purpose of using different colors, icons and
# expansions with different credentials.
#
# POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES is an array with even number of elements. The first
# element in each pair defines a pattern against which the current kubernetes context gets
# matched. More specifically, it's P9K_CONTENT prior to the application of context expansion
# (see below) that gets matched. If you unset all POWERLEVEL9K_GOOGLE_APP_CRED_*CONTENT_EXPANSION
# parameters, you'll see this value in your prompt. The second element of each pair in
# POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES defines the context class. Patterns are tried in order.
# The first match wins.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES=(
# '*:*prod*:*' PROD
# '*:*test*:*' TEST
# '*' DEFAULT)
#
# If your current Google application credentials is "service_account deathray-testing x@y.com",
# its class is TEST because it doesn't match the pattern '* *prod* *' but does match '* *test* *'.
#
# You can define different colors, icons and content expansions for different classes:
#
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_FOREGROUND=28
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_CONTENT_EXPANSION='$P9K_GOOGLE_APP_CRED_PROJECT_ID'
typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES=(
# '*:*prod*:*' PROD # These values are examples that are unlikely
# '*:*test*:*' TEST # to match your needs. Customize them as needed.
'*' DEFAULT)
typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_FOREGROUND=32
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Use POWERLEVEL9K_GOOGLE_APP_CRED_CONTENT_EXPANSION to specify the content displayed by
# google_app_cred segment. Parameter expansions are very flexible and fast, too. See reference:
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion.
#
# You can use the following parameters in the expansion. Each of them corresponds to one of the
# fields in the JSON file pointed to by GOOGLE_APPLICATION_CREDENTIALS.
#
# Parameter | JSON key file field
# ---------------------------------+---------------
# P9K_GOOGLE_APP_CRED_TYPE | type
# P9K_GOOGLE_APP_CRED_PROJECT_ID | project_id
# P9K_GOOGLE_APP_CRED_CLIENT_EMAIL | client_email
#
# Note: ${VARIABLE//\%/%%} expands to ${VARIABLE} with all occurences of '%' replaced by '%%'.
typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_CONTENT_EXPANSION='${P9K_GOOGLE_APP_CRED_PROJECT_ID//\%/%%}'
###############################[ public_ip: public IP address ]###############################
# Public IP color.
typeset -g POWERLEVEL9K_PUBLIC_IP_FOREGROUND=94
# Custom icon.
# typeset -g POWERLEVEL9K_PUBLIC_IP_VISUAL_IDENTIFIER_EXPANSION='⭐'
########################[ vpn_ip: virtual private network indicator ]#########################
# VPN IP color.
typeset -g POWERLEVEL9K_VPN_IP_FOREGROUND=81
# When on VPN, show just an icon without the IP address.
# Tip: To display the private IP address when on VPN, remove the next line.
typeset -g POWERLEVEL9K_VPN_IP_CONTENT_EXPANSION=
# Regular expression for the VPN network interface. Run `ifconfig` or `ip -4 a show` while on VPN
# to see the name of the interface.
typeset -g POWERLEVEL9K_VPN_IP_INTERFACE='(wg|(.*tun))[0-9]*'
# If set to true, show one segment per matching network interface. If set to false, show only
# one segment corresponding to the first matching network interface.
# Tip: If you set it to true, you'll probably want to unset POWERLEVEL9K_VPN_IP_CONTENT_EXPANSION.
typeset -g POWERLEVEL9K_VPN_IP_SHOW_ALL=false
# Custom icon.
# typeset -g POWERLEVEL9K_VPN_IP_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ ip: ip address and bandwidth usage for a specified network interface ]###########
# IP color.
typeset -g POWERLEVEL9K_IP_FOREGROUND=38
# The following parameters are accessible within the expansion:
#
# Parameter | Meaning
# ----------------------+---------------
# P9K_IP_IP | IP address
# P9K_IP_INTERFACE | network interface
# P9K_IP_RX_BYTES | total number of bytes received
# P9K_IP_TX_BYTES | total number of bytes sent
# P9K_IP_RX_RATE | receive rate (since last prompt)
# P9K_IP_TX_RATE | send rate (since last prompt)
typeset -g POWERLEVEL9K_IP_CONTENT_EXPANSION='${P9K_IP_RX_RATE:+%70F⇣$P9K_IP_RX_RATE }${P9K_IP_TX_RATE:+%215F⇡$P9K_IP_TX_RATE }%38F$P9K_IP_IP'
# Show information for the first network interface whose name matches this regular expression.
# Run `ifconfig` or `ip -4 a show` to see the names of all network interfaces.
typeset -g POWERLEVEL9K_IP_INTERFACE='wlan.*'
# Custom icon.
# typeset -g POWERLEVEL9K_IP_VISUAL_IDENTIFIER_EXPANSION='⭐'
#########################[ proxy: system-wide http/https/ftp proxy ]##########################
# Proxy color.
typeset -g POWERLEVEL9K_PROXY_FOREGROUND=68
# Custom icon.
# typeset -g POWERLEVEL9K_PROXY_VISUAL_IDENTIFIER_EXPANSION='⭐'
################################[ battery: internal battery ]#################################
# Show battery in red when it's below this level and not connected to power supply.
typeset -g POWERLEVEL9K_BATTERY_LOW_THRESHOLD=20
typeset -g POWERLEVEL9K_BATTERY_LOW_FOREGROUND=160
# Show battery in green when it's charging or fully charged.
typeset -g POWERLEVEL9K_BATTERY_{CHARGING,CHARGED}_FOREGROUND=70
# Show battery in yellow when it's discharging.
typeset -g POWERLEVEL9K_BATTERY_DISCONNECTED_FOREGROUND=178
# Battery pictograms going from low to high level of charge.
typeset -g POWERLEVEL9K_BATTERY_STAGES='\uf58d\uf579\uf57a\uf57b\uf57c\uf57d\uf57e\uf57f\uf580\uf581\uf578'
# Don't show the remaining time to charge/discharge.
typeset -g POWERLEVEL9K_BATTERY_VERBOSE=false
#####################################[ wifi: wifi speed ]#####################################
# WiFi color.
typeset -g POWERLEVEL9K_WIFI_FOREGROUND=68
# Custom icon.
# typeset -g POWERLEVEL9K_WIFI_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Use different colors and icons depending on signal strength ($P9K_WIFI_BARS).
#
# # Wifi colors and icons for different signal strength levels (low to high).
# typeset -g my_wifi_fg=(68 68 68 68 68) # <-- change these values
# typeset -g my_wifi_icon=('WiFi' 'WiFi' 'WiFi' 'WiFi' 'WiFi') # <-- change these values
#
# typeset -g POWERLEVEL9K_WIFI_CONTENT_EXPANSION='%F{${my_wifi_fg[P9K_WIFI_BARS+1]}}$P9K_WIFI_LAST_TX_RATE Mbps'
# typeset -g POWERLEVEL9K_WIFI_VISUAL_IDENTIFIER_EXPANSION='%F{${my_wifi_fg[P9K_WIFI_BARS+1]}}${my_wifi_icon[P9K_WIFI_BARS+1]}'
#
# The following parameters are accessible within the expansions:
#
# Parameter | Meaning
# ----------------------+---------------
# P9K_WIFI_SSID | service set identifier, a.k.a. network name
# P9K_WIFI_LINK_AUTH | authentication protocol such as "wpa2-psk" or "none"
# P9K_WIFI_LAST_TX_RATE | wireless transmit rate in megabits per second
# P9K_WIFI_RSSI | signal strength in dBm, from -120 to 0
# P9K_WIFI_NOISE | noise in dBm, from -120 to 0
# P9K_WIFI_BARS | signal strength in bars, from 0 to 4 (derived from P9K_WIFI_RSSI and P9K_WIFI_NOISE)
#
# All parameters except P9K_WIFI_BARS are extracted from the output of the following command:
#
# /System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport -I
####################################[ time: current time ]####################################
# Current time color.
typeset -g POWERLEVEL9K_TIME_FOREGROUND=66
# Format for the current time: 09:51:02. See `man 3 strftime`.
typeset -g POWERLEVEL9K_TIME_FORMAT='%D{%H:%M:%S}'
# If set to true, time will update when you hit enter. This way prompts for the past
# commands will contain the start times of their commands as opposed to the default
# behavior where they contain the end times of their preceding commands.
typeset -g POWERLEVEL9K_TIME_UPDATE_ON_COMMAND=false
# Custom icon.
# typeset -g POWERLEVEL9K_TIME_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_TIME_PREFIX='%246Fat '
# Example of a user-defined prompt segment. Function prompt_example will be called on every
# prompt if `example` prompt segment is added to POWERLEVEL9K_LEFT_PROMPT_ELEMENTS or
# POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS. It displays an icon and orange text greeting the user.
#
# Type `p10k help segment` for documentation and a more sophisticated example.
function prompt_example() {
p10k segment -f 208 -i '⭐' -t 'hello, %n'
}
# Example of a user-defined prompt segment. Function prompt_example will be called on every
# prompt if `example` prompt segment is added to POWERLEVEL9K_LEFT_PROMPT_ELEMENTS or
# POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS. It displays an icon and orange text greeting the user.
#
# Type `p10k help segment` for documentation and a more sophisticated example.
function prompt_clock_stat() {
if [[ -f "/home/ajacobs/.clock_log/state" ]]; then
seg_text=`cat /home/ajacobs/.clock_log/state`
# todo: properly use the variable instead of hard-coding (definted as "typeset -g POWERLEVEL9K_TIME_FOREGROUND=66)
p10k segment -f 66 -t $seg_text
fi
}
# User-defined prompt segments may optionally provide an instant_prompt_* function. Its job
# is to generate the prompt segment for display in instant prompt. See
# https://github.com/romkatv/powerlevel10k/blob/master/README.md#instant-prompt.
#
# Powerlevel10k will call instant_prompt_* at the same time as the regular prompt_* function
# and will record all `p10k segment` calls it makes. When displaying instant prompt, Powerlevel10k
# will replay these calls without actually calling instant_prompt_*. It is imperative that
# instant_prompt_* always makes the same `p10k segment` calls regardless of environment. If this
# rule is not observed, the content of instant prompt will be incorrect.
#
# Usually, you should either not define instant_prompt_* or simply call prompt_* from it. If
# instant_prompt_* is not defined for a segment, the segment won't be shown in instant prompt.
function instant_prompt_example() {
# Since prompt_example always makes the same `p10k segment` calls, we can call it from
# instant_prompt_example. This will give us the same `example` prompt segment in the instant
# and regular prompts.
prompt_example
}
# User-defined prompt segments can be customized the same way as built-in segments.
# typeset -g POWERLEVEL9K_EXAMPLE_FOREGROUND=208
# typeset -g POWERLEVEL9K_EXAMPLE_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Transient prompt works similarly to the builtin transient_rprompt option. It trims down prompt
# when accepting a command line. Supported values:
#
# - off: Don't change prompt when accepting a command line.
# - always: Trim down prompt when accepting a command line.
# - same-dir: Trim down prompt when accepting a command line unless this is the first command
# typed after changing current working directory.
typeset -g POWERLEVEL9K_TRANSIENT_PROMPT=always
# Instant prompt mode.
#
# - off: Disable instant prompt. Choose this if you've tried instant prompt and found
# it incompatible with your zsh configuration files.
# - quiet: Enable instant prompt and don't print warnings when detecting console output
# during zsh initialization. Choose this if you've read and understood
# https://github.com/romkatv/powerlevel10k/blob/master/README.md#instant-prompt.
# - verbose: Enable instant prompt and print a warning when detecting console output during
# zsh initialization. Choose this if you've never tried instant prompt, haven't
# seen the warning, or if you are unsure what this all means.
typeset -g POWERLEVEL9K_INSTANT_PROMPT=verbose
# Hot reload allows you to change POWERLEVEL9K options after Powerlevel10k has been initialized.
# For example, you can type POWERLEVEL9K_BACKGROUND=red and see your prompt turn red. Hot reload
# can slow down prompt by 1-2 milliseconds, so it's better to keep it turned off unless you
# really need it.
typeset -g POWERLEVEL9K_DISABLE_HOT_RELOAD=true
# If p10k is already loaded, reload configuration.
# This works even with POWERLEVEL9K_DISABLE_HOT_RELOAD=true.
(( ! $+functions[p10k] )) || p10k reload
}
# Tell `p10k configure` which file it should overwrite.
typeset -g POWERLEVEL9K_CONFIG_FILE=${${(%):-%x}:a}
(( ${#p10k_config_opts} )) && setopt ${p10k_config_opts[@]}
'builtin' 'unset' 'p10k_config_opts'
|
#!/bin/bash
#
echo "[Info] Installing mysql"
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password password root'
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password root'
sudo apt-get -y install mysql-server
sudo apt-get -y install mysql-client
sed -i 's/bind-address/#bind-address/g' /etc/mysql/my.cnf
mysql -u root -proot -e "UPDATE mysql.user SET Host='%' WHERE Host='vagrant-ubuntu-trusty-64' AND User='root';"
mysql -u root -proot -e "UPDATE mysql.user SET Host='%' WHERE Host='app' AND User='root';"
mysql -u root -proot -e "UPDATE mysql.user SET Host='%' WHERE Host='app.int' AND User='root';"
mysql -u root -proot -e "FLUSH PRIVILEGES;"
|
#include <memory>
#include "Intent.hpp"
Intent::Intent(const std::map<std::string, std::string>& entitiesData) {
EntitiesConfigurations = entitiesData;
}
Intent::~Intent() {}
std::string Intent::prefix() {
return "Intent: ";
}
std::unique_ptr<std::pair<std::string, std::string>> Intent::getEntityDefinition(std::string entityType) {
for (auto entityDefinition : EntitiesConfigurations) {
if (entityType == entityDefinition.second) {
std::pair<std::string, std::string> res = {entityDefinition.first, entityType};
return std::make_unique<std::pair<std::string, std::string>>( res );
}
}
return nullptr;
}
|
def bubbleSort(array):
n = len(array)
for i in range(n):
for j in range(0, n-i-1):
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j] |
<reponame>liveprojects-alex/gaddum_app<gh_stars>0
DELETE from playlists WHERE playlists.id = ?1; |
from custom_gym.doublecartpole import DoubleCartPoleEnv
import time
Env = DoubleCartPoleEnv()
while 1:
Env.step(1)
Env.render()
time.sleep(Env.time_step)
|
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <float.h>
#include <math.h>
#include "vsi_nn_platform.h"
#include "vsi_nn_prv.h"
#include "vsi_nn_log.h"
#include "vsi_nn_test.h"
#include "vsi_nn_tensor_util.h"
#include "utils/vsi_nn_util.h"
#include "utils/vsi_nn_dtype_util.h"
#include "utils/vsi_nn_math.h"
#include "client/vsi_nn_vxkernel.h"
#include "libnnext/vx_lib_nnext.h"
#define _VX_KERNEL_VAR (vx_kernel_AXIS_ALIGNED_BBOX_TRANSFORM)
#define _VX_KERNEL_ID (VX_KERNEL_ENUM_AXIS_ALIGNED_BBOX_TRANSFORM)
#define _VX_KERNEL_NAME (VX_KERNEL_NAME_AXIS_ALIGNED_BBOX_TRANSFORM)
#define _VX_KERNEL_FUNC_KERNEL (vxAxis_aligned_bbox_transformKernel)
typedef struct
{
float x1, y1, x2, y2;
}BoxEncodingCorner;
typedef struct
{
float w, h, x, y;
}BoxEncodingCenter;
void toBoxEncodingCorner
(
BoxEncodingCenter* ctr,
BoxEncodingCorner* cnr
)
{
cnr->x1 = ctr->x - ctr->w / 2;
cnr->y1 = ctr->y - ctr->h / 2;
cnr->x2 = ctr->x + ctr->w / 2;
cnr->y2 = ctr->y + ctr->h / 2;
}
void toBoxEncodingCenter
(
BoxEncodingCorner* cnr,
BoxEncodingCenter* ctr
)
{
ctr->w = cnr->x2 - cnr->x1;
ctr->h = cnr->y2 - cnr->y1;
ctr->x = (cnr->x1 + cnr->x2) / 2;
ctr->y = (cnr->y1 + cnr->y2) / 2;
}
static vsi_status VX_CALLBACK vxAxis_aligned_bbox_transformKernel
(
vx_node node,
const vx_reference* paramObj,
uint32_t paramNum
)
{
#define ARG_NUM (0)
#define TENSOR_NUM_INPUT (4)
#define TENSOR_NUM_OUTPUT (1)
#define TENSOR_NUM (TENSOR_NUM_INPUT+TENSOR_NUM_OUTPUT)
vsi_status status = VSI_FAILURE;
vx_context context = NULL;
vx_tensor input[TENSOR_NUM_INPUT] = {0};
vx_tensor output[TENSOR_NUM_OUTPUT] = {0};
float *f32_in_buffer[TENSOR_NUM_INPUT] = {0};
int32_t* int32_in_buffer[TENSOR_NUM_INPUT] = {0};
float *f32_out_buffer[TENSOR_NUM_OUTPUT] = {0};
vsi_nn_tensor_attr_t in_attr[TENSOR_NUM_INPUT];
vsi_nn_tensor_attr_t out_attr[TENSOR_NUM_OUTPUT];
uint32_t in_elements[TENSOR_NUM_INPUT] = {0};
uint32_t out_elements[TENSOR_NUM_OUTPUT]= {0};
int32_t i;
for(i = 0; i < TENSOR_NUM_INPUT; i++)
{
memset(&in_attr[i], 0x0, sizeof(vsi_nn_tensor_attr_t));
}
for(i = 0; i < TENSOR_NUM_OUTPUT; i++)
{
memset(&out_attr[i], 0x0, sizeof(vsi_nn_tensor_attr_t));
}
/* prepare data */
context = vxGetContext((vx_reference)node);
for(i = 0; i < TENSOR_NUM_INPUT; i ++)
{
input[i] = (vx_tensor)paramObj[i];
status = vsi_nn_vxGetTensorAttr(input[i], &in_attr[i]);
TEST_CHECK_STATUS(status, final);
in_elements[i] = vsi_nn_vxGetTensorElementNum(&in_attr[i]);
if (i == 2)
{
int32_in_buffer[i] = (int32_t *)vsi_nn_vxCopyTensorToData(context,
input[i], &in_attr[i]);
}
else
{
f32_in_buffer[i] = (float *)malloc(in_elements[i] * sizeof(float));
status = vsi_nn_vxConvertTensorToFloat32Data(
context, input[i], &in_attr[i], f32_in_buffer[i],
in_elements[i] * sizeof(float));
TEST_CHECK_STATUS(status, final);
}
}
for(i = 0; i < TENSOR_NUM_OUTPUT; i ++)
{
output[i] = (vx_tensor)paramObj[i + TENSOR_NUM_INPUT];
status = vsi_nn_vxGetTensorAttr(output[i], &out_attr[i]);
TEST_CHECK_STATUS(status, final);
out_elements[i] = vsi_nn_vxGetTensorElementNum(&out_attr[i]);
f32_out_buffer[i]= (float *)malloc(out_elements[i] * sizeof(float));
memset(f32_out_buffer[i], 0, out_elements[i] * sizeof(float));
}
/* TODO: Add CPU kernel implement */
{
const uint32_t roiLength = 4;
const uint32_t imageLength = 2;
uint32_t numClasses = in_attr[1].size[0] / roiLength;
uint32_t numRois = in_attr[0].size[1];
uint32_t i;
uint32_t roiIndex;
for(roiIndex = 0; roiIndex < numRois; roiIndex++)
{
uint32_t batchIndex = int32_in_buffer[2][roiIndex];
float imageHeight = f32_in_buffer[3][batchIndex * imageLength];
float imageWidth = f32_in_buffer[3][batchIndex * imageLength + 1];
BoxEncodingCorner roi_cnr;
BoxEncodingCenter roiBefore;
roi_cnr.x1 = f32_in_buffer[0][roiIndex * roiLength];
roi_cnr.y1 = f32_in_buffer[0][roiIndex * roiLength + 1];
roi_cnr.x2 = f32_in_buffer[0][roiIndex * roiLength + 2];
roi_cnr.y2 = f32_in_buffer[0][roiIndex * roiLength + 3];
toBoxEncodingCenter(&roi_cnr, &roiBefore);
for (i = 0; i < numClasses; i++)
{
BoxEncodingCenter roi_ctr;
BoxEncodingCorner roiAfter;
BoxEncodingCorner cliped;
uint32_t index = (roiIndex * numClasses + i) * roiLength;
roi_ctr.w = (float)(exp(f32_in_buffer[1][index + 2]) * roiBefore.w);
roi_ctr.h = (float)(exp(f32_in_buffer[1][index + 3]) * roiBefore.h);
roi_ctr.x = roiBefore.x + f32_in_buffer[1][index] * roiBefore.w;
roi_ctr.y = roiBefore.y + f32_in_buffer[1][index + 1] * roiBefore.h;
toBoxEncodingCorner(&roi_ctr, &roiAfter);
cliped.x1 = vsi_nn_min(vsi_nn_max(roiAfter.x1, 0.0f), imageWidth);
cliped.y1 = vsi_nn_min(vsi_nn_max(roiAfter.y1, 0.0f), imageHeight);
cliped.x2 = vsi_nn_min(vsi_nn_max(roiAfter.x2, 0.0f), imageWidth);
cliped.y2 = vsi_nn_min(vsi_nn_max(roiAfter.y2, 0.0f), imageHeight);
f32_out_buffer[0][index] = cliped.x1;
f32_out_buffer[0][index + 1] = cliped.y1;
f32_out_buffer[0][index + 2] = cliped.x2;
f32_out_buffer[0][index + 3] = cliped.y2;
}
}
}
/* save data */
for(i = 0; i < TENSOR_NUM_OUTPUT; i++)
{
status = vsi_nn_vxConvertFloat32DataToTensor(
context, output[i], &out_attr[i], f32_out_buffer[i],
out_elements[i] * sizeof(float));
TEST_CHECK_STATUS(status, final);
}
final:
for (i = 0; i < TENSOR_NUM_INPUT; i++)
{
if (f32_in_buffer[i]) free(f32_in_buffer[i]);
if (int32_in_buffer[i]) free(int32_in_buffer[i]);
}
for(i = 0; i < TENSOR_NUM_OUTPUT; i++)
{
if (f32_out_buffer[i]) free(f32_out_buffer[i]);
}
return status;
} /* _VX_KERNEL_FUNC_KERNEL() */
static vx_param_description_t vxAxis_aligned_bbox_transformKernelParam[] =
{
{VX_INPUT, VX_TYPE_TENSOR, VX_PARAMETER_STATE_REQUIRED},
{VX_INPUT, VX_TYPE_TENSOR, VX_PARAMETER_STATE_REQUIRED},
{VX_INPUT, VX_TYPE_TENSOR, VX_PARAMETER_STATE_REQUIRED},
{VX_INPUT, VX_TYPE_TENSOR, VX_PARAMETER_STATE_REQUIRED},
{VX_OUTPUT, VX_TYPE_TENSOR, VX_PARAMETER_STATE_REQUIRED},
};
vx_status VX_CALLBACK vxAxis_aligned_bbox_transformInitializer
(
vx_node nodObj,
const vx_reference *paramObj,
vx_uint32 paraNum
)
{
vx_status status = VX_SUCCESS;
/*TODO: Add initial code for VX program*/
return status;
}
#ifdef __cplusplus
extern "C" {
#endif
vx_kernel_description_t vxAxis_aligned_bbox_transform_CPU =
{
_VX_KERNEL_ID,
_VX_KERNEL_NAME,
_VX_KERNEL_FUNC_KERNEL,
vxAxis_aligned_bbox_transformKernelParam,
_cnt_of_array( vxAxis_aligned_bbox_transformKernelParam ),
vsi_nn_KernelValidator,
NULL,
NULL,
vsi_nn_KernelInitializer,
vsi_nn_KernelDeinitializer
};
vx_kernel_description_t vxAxis_aligned_bbox_transform_VX =
{
_VX_KERNEL_ID,
_VX_KERNEL_NAME,
NULL,
vxAxis_aligned_bbox_transformKernelParam,
_cnt_of_array( vxAxis_aligned_bbox_transformKernelParam ),
vsi_nn_KernelValidator,
NULL,
NULL,
vxAxis_aligned_bbox_transformInitializer,
vsi_nn_KernelDeinitializer
};
vx_kernel_description_t * vx_kernel_AXIS_ALIGNED_BBOX_TRANSFORM_list[] =
{
&vxAxis_aligned_bbox_transform_CPU,
&vxAxis_aligned_bbox_transform_VX,
NULL
};
#ifdef __cplusplus
}
#endif
|
/**
*/
package com.telpoo.frame.net;
import java.io.FileNotFoundException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.security.KeyStore;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManagerFactory;
import org.apache.http.Header;
import org.apache.http.HttpResponse;
import org.apache.http.HttpVersion;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.conn.ssl.SSLSocketFactory;
import org.apache.http.cookie.CookieOrigin;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.auth.BasicScheme;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.impl.cookie.CookieSpecBase;
import org.apache.http.message.BasicHeader;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.CoreProtocolPNames;
import org.apache.http.params.HttpConnectionParams;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.HTTP;
import android.content.Context;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.util.Base64;
import com.telpoo.frame.object.BaseObject;
import com.telpoo.frame.utils.FileSupport;
import com.telpoo.frame.utils.Mlog;
/**
*
* @author NAQ
*
*/
public class BaseNetSupportBeta {
protected static String TAG = BaseNetSupportBeta.class.getSimpleName();
private String contentType;
private String userAgent;
private int connectTimeout;
private int soTimeout;
private String authorization;
private int numberRetry = 3;
private boolean isInited = false;
// private volatile static BaseNetSupportBeta instance;
private static BaseNetSupportBeta instance;
/** Returns singleton class instance */
// public static BaseNetSupportBeta getInstance() {
// if (instance == null) {
// synchronized (ImageLoader.class) {
// if (instance == null) {
// instance = new BaseNetSupportBeta();
// }
// }
// }
// return instance;
// }
public static BaseNetSupportBeta getInstance() {
if (instance == null) {
if (instance == null) {
instance = new BaseNetSupportBeta();
}
}
return instance;
}
private BaseNetSupportBeta() {
}
public void init(NetConfig netConfig) {
connectTimeout = netConfig.getConnectTimeout();
soTimeout = netConfig.getSoTimeout();
authorization = netConfig.getAuthorization();
contentType = netConfig.getContentType();
userAgent = netConfig.getUserAgent();
numberRetry = netConfig.getNumberRetry();
}
private HttpClient myHttpClient() {
HttpParams params = new BasicHttpParams();
params.setParameter(CoreProtocolPNames.PROTOCOL_VERSION, HttpVersion.HTTP_1_1);
HttpConnectionParams.setConnectionTimeout(params, connectTimeout);
HttpConnectionParams.setSoTimeout(params, soTimeout);
HttpClient client = new DefaultHttpClient(params);
return client;
}
public String method_GET(String url) {
if (numberRetry == 0)
return null;
int retryCount = 0;
do {
try {
URL myUrl = new URL(url.replace(" ", "%20"));
Mlog.D(TAG + "- method_GET -URl:" + myUrl);
HttpURLConnection conn = (HttpURLConnection) myUrl.openConnection();
conn.setConnectTimeout(connectTimeout);
conn.setReadTimeout(soTimeout);
if (authorization != null)
conn.setRequestProperty("Authorization", authorization);
conn.setRequestProperty("Content-Type", contentType);
conn.setRequestProperty("User-Agent", userAgent);
String jsonContent = FileSupport.readFromInputStream(conn.getInputStream());
Mlog.D(TAG + "- method_GET - json result=" + jsonContent);
conn.disconnect();
return jsonContent;
} catch (FileNotFoundException ex) {
Mlog.E(TAG + "658345234 NetworkSupport - getNetworkData - FileNotFoundException = " + ex.getMessage());
} catch (Exception ex) {
Mlog.E(TAG + "789345564 NetworkSupport - getNetworkData - Exception = " + ex.getMessage());
}
} while (++retryCount < numberRetry);
return null;
}
public String method_POST(String url, String bodySend) {
if (numberRetry == 0)
return null;
int retryCount = 0;
Mlog.D(TAG + "-method_POST - url=" + url);
Mlog.D(TAG + "-method_POST - json sent=" + bodySend);
do {
try {
URL myUrl = new URL(url);
HttpClient client = myHttpClient();
HttpConnectionParams.setConnectionTimeout(client.getParams(), connectTimeout);
HttpResponse response;
InputStream in = null;
try {
HttpPost post = new HttpPost(myUrl.toURI());
if (authorization != null)
post.setHeader("Authorization", getB64Auth(authorization));
StringEntity se = new StringEntity(bodySend, "UTF8");
se.setContentType(new BasicHeader(HTTP.CONTENT_TYPE, contentType));
se.setContentEncoding(HTTP.UTF_8);
post.setEntity(se);
response = client.execute(post);
/* Checking response */
if (response != null) {
in = response.getEntity().getContent(); // Get the data
}
} catch (Exception e) {
Mlog.E(TAG + "4363 method_POST " + e.toString());
return null;
}
String jsonContent = FileSupport.readFromInputStream(in);
Mlog.D("method_POST - response: " + jsonContent);
return jsonContent;
} catch (FileNotFoundException ex) {
Mlog.E("method_POST - getNetworkData - FileNotFoundException = " + ex.getMessage());
return null;
} catch (Exception ex) {
Mlog.E("method_POST - getNetworkData - Exception = " + ex.getMessage());
return null;
}
} while (++retryCount < numberRetry);
}
public String method_POST_SSL(String url, String bodySend) {
if (numberRetry == 0)
return null;
int retryCount = 0;
Mlog.D(TAG + "-method_POST - url=" + url);
Mlog.D(TAG + "-method_POST - json sent=" + bodySend);
do {
try {
URL myUrl = new URL(url);
HttpClient client = MySSLSocketFactory.getNewHttpClient(); // myHttpClient();
HttpConnectionParams.setConnectionTimeout(client.getParams(), connectTimeout);
HttpResponse response;
InputStream in = null;
try {
HttpPost post = new HttpPost(myUrl.toURI());
if (authorization != null)
post.setHeader("Authorization", getB64Auth(authorization));
StringEntity se = new StringEntity(bodySend, "UTF8");
se.setContentType(new BasicHeader(HTTP.CONTENT_TYPE, contentType));
se.setContentEncoding(HTTP.UTF_8);
post.setEntity(se);
response = client.execute(post);
/* Checking response */
if (response != null) {
in = response.getEntity().getContent(); // Get the data
}
} catch (Exception e) {
Mlog.E(TAG + "234234 method_POST_SSL " + e.toString());
return null;
}
String jsonContent = FileSupport.readFromInputStream(in);
Mlog.D("method_POST - response: " + jsonContent);
return jsonContent;
} catch (FileNotFoundException ex) {
Mlog.E("method_POST - getNetworkData - FileNotFoundException = " + ex.getMessage());
return null;
} catch (Exception ex) {
Mlog.E("method_POST - getNetworkData - Exception = " + ex.getMessage());
return null;
}
} while (++retryCount < numberRetry);
}
public BaseObject method_POST_SSL_N(String url, String bodySend) {
if (numberRetry == 0)
return null;
BaseObject oj = new BaseObject();
int retryCount = 0;
Mlog.D(TAG + "-method_POST - json sent=" + bodySend);
Mlog.D(TAG + "-method_POST - url=" + url);
do {
try {
URL myUrl = new URL(url);
HttpClient client = MySSLSocketFactory.getNewHttpClient(); // myHttpClient();
HttpConnectionParams.setConnectionTimeout(client.getParams(), connectTimeout);
HttpResponse response;
InputStream in = null;
try {
HttpPost post = new HttpPost(myUrl.toURI());
if (authorization != null)
post.setHeader("Authorization", getB64Auth(authorization));
StringEntity se = new StringEntity(bodySend, "UTF8");
se.setContentType(new BasicHeader(HTTP.CONTENT_TYPE, contentType));
se.setContentEncoding(HTTP.UTF_8);
post.setEntity(se);
response = client.execute(post);
/* Checking response */
if (response != null) {
in = response.getEntity().getContent(); // Get the data
}
oj.set("code", response.getStatusLine().getStatusCode());
} catch (Exception e) {
Mlog.E(TAG + "576237 method_POST_SSL_N " + e.toString());
return null;
}
String jsonContent = FileSupport.readFromInputStream(in);
Mlog.D("method_POST - response: " + jsonContent);
if (jsonContent != null) {
oj.set("res", jsonContent);
}
return oj;
} catch (FileNotFoundException ex) {
Mlog.E("method_POST - getNetworkData - FileNotFoundException = " + ex.getMessage());
return null;
} catch (Exception ex) {
Mlog.E("method_POST - getNetworkData - Exception = " + ex.getMessage());
return null;
}
} while (++retryCount < numberRetry);
}
public String method_GET_SSL(String url) {
if (numberRetry == 0)
return null;
int retryCount = 0;
Mlog.D(TAG + "-method_GET_SSL - url=" + url);
do {
try {
URL myUrl = new URL(url);
HttpClient client = MySSLSocketFactory.getNewHttpClient(); // myHttpClient();
HttpConnectionParams.setConnectionTimeout(client.getParams(), connectTimeout);
HttpResponse response;
InputStream in = null;
try {
HttpGet post = new HttpGet(myUrl.toURI());
if (authorization != null) {
post.addHeader(BasicScheme.authenticate(new UsernamePasswordCredentials("vnp-mobile-app", "A8aFPkuCmbeBcTXRQVyZNn4hW9q"), "UTF-8", false));
}
response = client.execute(post);
/* Checking response */
if (response != null) {
in = response.getEntity().getContent(); // Get the data
}
} catch (Exception e) {
Mlog.E(TAG + "576237 method_GET_SSL " + e.toString());
return null;
}
String jsonContent = FileSupport.readFromInputStream(in);
Mlog.D("method_POST - response: " + jsonContent);
return jsonContent;
} catch (FileNotFoundException ex) {
Mlog.E("method_POST - getNetworkData - FileNotFoundException = " + ex.getMessage());
return null;
} catch (Exception ex) {
Mlog.E("method_POST - getNetworkData - Exception = " + ex.getMessage());
return null;
}
} while (++retryCount < numberRetry);
}
/*
* protected static NetData parseResponseToNetData(String response, String[]
* keys) { NetData netData = new NetData(); if (response == null ||
* response.length() == 0) { netData.code = false; netData.msg =
* HomeActivity.str_connect_error;
*
* return netData; } return BaseNetSupport.ParseJson(response, keys);
*
* }
*/
public static boolean isNetworkAvailable(Context context) {
ConnectivityManager connectivity = (ConnectivityManager) context.getSystemService(Context.CONNECTIVITY_SERVICE);
if (connectivity != null) {
NetworkInfo[] info = connectivity.getAllNetworkInfo();
if (info != null)
for (int i = 0; i < info.length; i++)
if (info[i].getState() == NetworkInfo.State.CONNECTED) {
return true;
}
}
return false;
}
private String getB64Auth(String string) {
String ret = "Basic " + Base64.encodeToString(string.getBytes(), Base64.URL_SAFE | Base64.NO_WRAP);
return ret;
}
// public static String uploadImgur(String path, String clientId) {
// try {
// File file = new File(path);
// final String upload_to = "https://api.imgur.com/3/upload.json";
// String API_key = "27905d84c9ec40a";
// if (clientId != null)
// API_key = clientId;
// HttpClient httpClient = new DefaultHttpClient();
// HttpContext localContext = new BasicHttpContext();
// HttpPost httpPost = new HttpPost(upload_to);
// httpPost.setHeader("Authorization", "Client-ID " + API_key);
//
// final MultipartEntity entity = new
// MultipartEntity(HttpMultipartMode.BROWSER_COMPATIBLE);
// entity.addPart("image", new FileBody(file));
// entity.addPart("key", new StringBody(API_key));
// entity.addPart("description", new StringBody("test upimgae"));
// httpPost.setEntity(entity);
// final HttpResponse response = httpClient.execute(httpPost, localContext);
// final String response_string =
// EntityUtils.toString(response.getEntity());
// // return response_string;
// final JSONObject json = new JSONObject(response_string);
// String link = json.getJSONObject("data").getString("link");
// // link = "http://i.imgur.com/"+link+"l.png";
// return link;
// } catch (Exception e) {
// e.printStackTrace();
// return null;
// }
//
// }
// final DefaultHttpClient client = new DefaultHttpClient();
// HttpHost proxy = new HttpHost("127.0.0.1", 8888, "http");
// HttpHost target = new HttpHost("www.google.com", 443, "https");
// client.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY, proxy);
// HttpProtocolParams
// .setUserAgent(client.getParams(),
// "Mozilla/5.0 (Windows NT 5.1; rv:19.0) Gecko/20100101 Firefox/19.0");
// final HttpGet get = new HttpGet("/");
//
// HttpResponse response = client.execute(target, get);
public BaseHttpResponse tempMethod_POST(String url, String bodySend) {
if (numberRetry == 0)
return null;
int retryCount = 0;
BaseHttpResponse baseResponse=new BaseHttpResponse();
Mlog.D(TAG + "-method_POST - url=" + url);
Mlog.D(TAG + "-method_POST - json sent=" + bodySend);
baseResponse.setUrl(url);
do {
try {
URL myUrl = new URL(url);
HttpClient client = myHttpClient();
HttpConnectionParams.setConnectionTimeout(client.getParams(), connectTimeout);
HttpResponse response;
InputStream in = null;
try {
HttpPost post = new HttpPost(myUrl.toURI());
if (authorization != null)
post.setHeader("Authorization", getB64Auth(authorization));
StringEntity se = new StringEntity(bodySend, "UTF8");
se.setContentType(new BasicHeader(HTTP.CONTENT_TYPE, contentType));
se.setContentEncoding(HTTP.UTF_8);
post.setEntity(se);
response = client.execute(post);
/* Checking response */
if (response != null) {
baseResponse.setStatus(response.getStatusLine().getStatusCode());
in = response.getEntity().getContent(); // Get the data
//baseResponse.setHeaders(response.getAllHeaders());
baseResponse.setHttpResponse(response);
// Header[] headers = response.getHeaders("Set-Cookie");
// for (Header h : headers) {
// System.out.println(h.getValue().toString());
// }
}
} catch (Exception e) {
Mlog.E(TAG + "4363 method_POST " + e.toString());
return null;
}
String strResponse = FileSupport.readFromInputStream(in);
baseResponse.setStringRespone(strResponse);
Mlog.D("method_POST - response: " + strResponse);
return baseResponse;
} catch (FileNotFoundException ex) {
Mlog.E("method_POST - getNetworkData - FileNotFoundException = " + ex.getMessage());
return null;
} catch (Exception ex) {
Mlog.E("method_POST - getNetworkData - Exception = " + ex.getMessage());
return null;
}
} while (++retryCount < numberRetry);
}
}
|
<filename>dhis-2/dhis-services/dhis-service-administration/src/test/java/org/hisp/dhis/statistics/StatisticsProviderTest.java
package org.hisp.dhis.statistics;
/*
* Copyright (c) 2004-2012, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import static junit.framework.Assert.assertEquals;
import static junit.framework.Assert.assertNotNull;
import java.util.Map;
import org.hisp.dhis.DhisSpringTest;
import org.hisp.dhis.common.Objects;
import org.hisp.dhis.dataelement.DataElementService;
import org.junit.Test;
/**
* @author <NAME>
* @version $Id$
*/
public class StatisticsProviderTest
extends DhisSpringTest
{
private StatisticsProvider statisticsProvider;
// -------------------------------------------------------------------------
// Fixture
// -------------------------------------------------------------------------
@Override
public void setUpTest()
{
dataElementService = (DataElementService) getBean( DataElementService.ID );
statisticsProvider = (StatisticsProvider) getBean( StatisticsProvider.ID );
dataElementService.addDataElement( createDataElement( 'A' ) );
dataElementService.addDataElement( createDataElement( 'B' ) );
dataElementService.addDataElement( createDataElement( 'C' ) );
dataElementService.addDataElementGroup( createDataElementGroup( 'A' ) );
dataElementService.addDataElementGroup( createDataElementGroup( 'B' ) );
}
// -------------------------------------------------------------------------
// Tests
// -------------------------------------------------------------------------
@Test
public void testGetDataElementObjectCount()
{
Map<Objects, Integer> counts = statisticsProvider.getObjectCounts();
assertNotNull( counts );
assertEquals( new Integer( 3 ), counts.get( Objects.DATAELEMENT ) );
}
@Test
public void testGetDataElementGroupObjectCounts()
{
Map<Objects, Integer> counts = statisticsProvider.getObjectCounts();
assertNotNull( counts );
assertEquals( new Integer( 2 ), counts.get( Objects.DATAELEMENTGROUP ) );
}
}
|
import java.util.Random;
public class Example {
public static void main(String[] args) {
String SALTCHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890";
StringBuilder salt = new StringBuilder();
Random rnd = new Random();
while (salt.length()< 10) {
int index = (int) (rnd.nextFloat() * SALTCHARS.length());
salt.append(SALTCHARS.charAt(index));
}
String saltStr = salt.toString();
System.out.println(saltStr);
}
}
// Output: UjOYAWNy80 |
#!/usr/bin/env bash
docker-compose up -d nginx mysql mailhog redis pm2
|
package nightmarethreatreis.com.github.mvp.managers;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import javafx.scene.layout.Pane;
import nightmarethreatreis.com.github.mvp.model.Admin;
import nightmarethreatreis.com.github.mvp.model.Korisnik;
import nightmarethreatreis.com.github.mvp.model.Kupac;
import nightmarethreatreis.com.github.mvp.model.Radnik;
import nightmarethreatreis.com.github.mvp.screens.externalComponents.NavbarLink;
@Component
public class NavbarManager {
@Autowired
private ScreenManager screenManager;
@Autowired
private SessionManager sessionManager;
private Map<Class<? extends Korisnik>, List<NavbarLink>> links = new HashMap<>();
private NavbarLink createLink(String name, String screenName) {
return new NavbarLink(name, event -> screenManager.activate(screenName));
}
private NavbarLink createLogoutLink(String name) {
return new NavbarLink(name, event -> {
sessionManager.logout();
screenManager.activate("login");
});
}
{
reloadNavbar();
}
public void reloadNavbar() {
List<NavbarLink> kupacLinks = new LinkedList<>();
List<NavbarLink> adminLinks = new LinkedList<>();
List<NavbarLink> radnikLinks = new LinkedList<>();
// KUPAC LINKS
kupacLinks.add(createLink("Početna", "home"));
kupacLinks.add(createLink("Aktivne predstave", "showActiveShows"));
kupacLinks.add(createLink("Promena korisničkog imena", "changeUsername"));
kupacLinks.add(createLink("Promena lozinke", "changePassword"));
kupacLinks.add(createLogoutLink("Odjavi se"));
// ADMIN LINKS
adminLinks.add(createLink("Početna", "home"));
adminLinks.add(createLink("Pregled dramskih radnika", "showAllPersonel"));
adminLinks.add(createLink("Prikaz svih predstava", "showActiveShows"));
adminLinks.add(createLink("Promena korisničkog imena", "changeUsername"));
adminLinks.add(createLink("Promena lozinke", "changePassword"));
adminLinks.add(createLogoutLink("Odjavi se"));
// RADNIK LINKS
radnikLinks.add(createLink("Početna", "home"));
radnikLinks.add(createLink("Promena korisničkog imena", "changeUsername"));
radnikLinks.add(createLink("Promena lozinke", "changePassword"));
radnikLinks.add(createLogoutLink("Odjavi se"));
links.put(Kupac.class, kupacLinks);
links.put(Admin.class, adminLinks);
links.put(Radnik.class, radnikLinks);
}
private List<NavbarLink> getNavbarLinkList(Class<? extends Korisnik> uloga) {
return links.get(uloga);
}
public List<NavbarLink> getNavbarLinks() {
Korisnik korisnik = sessionManager.getLoggedInKorisnik();
if(korisnik == null) {
List<NavbarLink> lista = new LinkedList<>();
lista.add(createLink("Login", "login"));
return lista;
}
return getNavbarLinkList(korisnik.getClass());
}
public void updateNavbar(Pane navbar) {
if(navbar == null) {
throw new NullPointerException("Navbar ne sme biti null");
}
navbar.getChildren().clear();
navbar.getChildren().addAll(getNavbarLinks());
}
}
|
<reponame>hatamiarash7/RTL-Toast<filename>app/src/main/java/ir/hatamiarash/toast_sample/MainActivity.java
/*
* Copyright (c) 2018. <NAME>
*/
package ir.hatamiarash.toast_sample;
import android.graphics.Color;
import android.graphics.Typeface;
import android.graphics.drawable.Drawable;
import android.os.Bundle;
import android.text.Spannable;
import android.text.SpannableStringBuilder;
import android.text.style.StyleSpan;
import android.view.View;
import android.widget.Toast;
import androidx.appcompat.app.AppCompatActivity;
import ir.hatamiarash.toast.RTLToast;
import static android.graphics.Typeface.BOLD_ITALIC;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
findViewById(R.id.button_error_toast).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
RTLToast.error(MainActivity.this, R.string.error_message, Toast.LENGTH_SHORT, true).show();
}
});
findViewById(R.id.button_success_toast).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
RTLToast.success(MainActivity.this, R.string.success_message, Toast.LENGTH_SHORT, true).show();
}
});
findViewById(R.id.button_info_toast).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
RTLToast.info(MainActivity.this, R.string.info_message, Toast.LENGTH_SHORT, true).show();
}
});
findViewById(R.id.button_warning_toast).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
RTLToast.warning(MainActivity.this, R.string.warning_message, Toast.LENGTH_SHORT, true).show();
}
});
findViewById(R.id.button_normal_toast_wo_icon).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
RTLToast.normal(MainActivity.this, R.string.normal_message_without_icon).show();
}
});
findViewById(R.id.button_normal_toast_w_icon).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
Drawable icon = getResources().getDrawable(R.drawable.ic_pets_white_48dp);
RTLToast.normal(MainActivity.this, R.string.normal_message_with_icon, icon).show();
}
});
findViewById(R.id.button_info_toast_with_formatting).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
RTLToast.info(MainActivity.this, getFormattedMessage()).show();
}
});
findViewById(R.id.button_custom_config).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
RTLToast.Config.getInstance()
.setTextColor(Color.GREEN)
.setToastTypeface(Typeface.createFromAsset(getAssets(), "IRANSans.ttf"))
.apply();
RTLToast.custom(MainActivity.this, R.string.custom_message, getResources().getDrawable(R.drawable.laptop512),
Color.BLACK, Toast.LENGTH_SHORT, true, true).show();
RTLToast.Config.reset();
}
});
}
private CharSequence getFormattedMessage() {
final String prefix = "متن ";
final String highlight = "با فرمت ";
final String suffix = " مخصوص";
SpannableStringBuilder ssb = new SpannableStringBuilder(prefix).append(highlight).append(suffix);
int prefixLen = prefix.length();
ssb.setSpan(new StyleSpan(BOLD_ITALIC), prefixLen, prefixLen + highlight.length(), Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
return ssb;
}
} |
package com.maxmind.geoip2;
import com.maxmind.db.Reader;
import com.maxmind.geoip2.exception.AddressNotFoundException;
import com.maxmind.geoip2.exception.GeoIp2Exception;
import com.maxmind.geoip2.model.*;
import com.maxmind.geoip2.model.ConnectionTypeResponse.ConnectionType;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetAddress;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Arrays;
import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.*;
public class DatabaseReaderTest {
@Rule
public ExpectedException exception = ExpectedException.none();
private File geoipFile;
private InputStream geoipStream;
@Before
public void setup() throws URISyntaxException, IOException {
URL resource = DatabaseReaderTest.class
.getResource("/maxmind-db/test-data/GeoIP2-City-Test.mmdb");
this.geoipStream = resource.openStream();
this.geoipFile = new File(resource.toURI());
}
@Test
public void testDefaultLocaleFile() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.build();
this.testDefaultLocale(reader);
}
@Test
public void testDefaultLocaleURL() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipStream)
.build();
this.testDefaultLocale(reader);
reader.close();
}
private void testDefaultLocale(DatabaseReader reader) throws IOException,
GeoIp2Exception {
CityResponse city = reader.city(InetAddress.getByName("172.16.17.32"));
assertEquals("London", city.getCity().getName());
reader.close();
}
@Test
public void testIsInEuropeanUnion() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.build();
CityResponse city = reader.city(InetAddress.getByName("192.168.127.12"));
assertTrue(city.getCountry().isInEuropeanUnion());
assertTrue(city.getRegisteredCountry().isInEuropeanUnion());
}
@Test
public void testLocaleListFile() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.locales(Arrays.asList("xx", "ru", "pt-BR", "es", "en"))
.build();
this.testLocaleList(reader);
}
@Test
public void testLocaleListURL() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.locales(Arrays.asList("xx", "ru", "pt-BR", "es", "en"))
.build();
this.testLocaleList(reader);
}
private void testLocaleList(DatabaseReader reader) throws IOException,
GeoIp2Exception {
CityResponse city = reader.city(InetAddress.getByName("172.16.17.32"));
assertEquals("Лондон", city.getCity().getName());
reader.close();
}
@Test
public void testMemoryModeFile() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.fileMode(Reader.FileMode.MEMORY).build();
this.testMemoryMode(reader);
}
@Test
public void testMemoryModeURL() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.fileMode(Reader.FileMode.MEMORY).build();
this.testMemoryMode(reader);
}
private void testMemoryMode(DatabaseReader reader) throws IOException,
GeoIp2Exception {
CityResponse city = reader.city(InetAddress.getByName("172.16.17.32"));
assertEquals("London", city.getCity().getName());
assertEquals(100, city.getLocation().getAccuracyRadius().longValue());
reader.close();
}
@Test
public void metadata() throws IOException {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.fileMode(Reader.FileMode.MEMORY).build();
assertEquals("GeoIP2-City", reader.getMetadata().getDatabaseType());
reader.close();
}
@Test
public void hasIpAddressFile() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.build();
this.hasIpAddress(reader);
}
@Test
public void hasIpAddressURL() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.build();
this.hasIpAddress(reader);
}
private void hasIpAddress(DatabaseReader reader) throws IOException,
GeoIp2Exception {
CityResponse cio = reader.city(InetAddress.getByName("172.16.17.32"));
assertEquals("172.16.17.32", cio.getTraits().getIpAddress());
reader.close();
}
@Test
public void unknownAddressFile() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.build();
this.unknownAddress(reader);
}
@Test
public void unknownAddressURL() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(this.geoipFile)
.build();
this.unknownAddress(reader);
}
private void unknownAddress(DatabaseReader reader) throws IOException,
GeoIp2Exception {
try {
assertFalse(reader.tryCity(InetAddress.getByName("10.10.10.10")).isPresent());
this.exception.expect(AddressNotFoundException.class);
this.exception
.expectMessage(containsString("The address 10.10.10.10 is not in the database."));
reader.city(InetAddress.getByName("10.10.10.10"));
} finally {
reader.close();
}
}
@Test
public void testUnsupportedFileMode() throws IOException {
this.exception.expect(IllegalArgumentException.class);
this.exception.expectMessage(containsString("Only FileMode.MEMORY"));
new DatabaseReader.Builder(this.geoipStream).fileMode(
Reader.FileMode.MEMORY_MAPPED).build();
}
@Test
public void incorrectDatabaseMethod() throws Exception {
this.exception.expect(UnsupportedOperationException.class);
this.exception
.expectMessage(containsString("GeoIP2-City database using the isp method"));
try (DatabaseReader db = new DatabaseReader.Builder(this.geoipFile).build()) {
db.isp(InetAddress.getByName("1.1.1.1"));
}
}
@Test
public void testAnonymousIp() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(
this.getFile("GeoIP2-Anonymous-IP-Test.mmdb")).build();
InetAddress ipAddress = InetAddress.getByName("1.2.0.1");
AnonymousIpResponse response = reader.anonymousIp(ipAddress);
assertTrue(response.isAnonymous());
assertTrue(response.isAnonymousVpn());
assertFalse(response.isHostingProvider());
assertFalse(response.isPublicProxy());
assertFalse(response.isTorExitNode());
assertEquals(ipAddress.getHostAddress(), response.getIpAddress());
reader.close();
}
@Test
public void testAsn() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(
this.getFile("GeoLite2-ASN-Test.mmdb")).build();
InetAddress ipAddress = InetAddress.getByName("172.16.58.3");
AsnResponse response = reader.asn(ipAddress);
assertEquals(1221, response.getAutonomousSystemNumber().intValue());
assertEquals("Telstra Pty Ltd",
response.getAutonomousSystemOrganization());
assertEquals(ipAddress.getHostAddress(), response.getIpAddress());
reader.close();
}
@Test
public void testConnectionType() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(
this.getFile("GeoIP2-Connection-Type-Test.mmdb")).build();
InetAddress ipAddress = InetAddress.getByName("1.0.1.0");
ConnectionTypeResponse response = reader.connectionType(ipAddress);
assertEquals(ConnectionType.CABLE_DSL, response.getConnectionType());
assertEquals(ipAddress.getHostAddress(), response.getIpAddress());
reader.close();
}
@Test
public void testDomain() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(
this.getFile("GeoIP2-Domain-Test.mmdb")).build();
InetAddress ipAddress = InetAddress.getByName("1.2.0.0");
DomainResponse response = reader.domain(ipAddress);
assertEquals("maxmind.com", response.getDomain());
assertEquals(ipAddress.getHostAddress(), response.getIpAddress());
reader.close();
}
@Test
public void testEnterprise() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(
getFile("GeoIP2-Enterprise-Test.mmdb")).build();
InetAddress ipAddress = InetAddress.getByName("172.16.17.32");
EnterpriseResponse response = reader.enterprise(ipAddress);
assertEquals(11, response.getCity().getConfidence().intValue());
assertEquals(99, response.getCountry().getConfidence().intValue());
assertEquals(6252001, response.getCountry().getGeoNameId().intValue());
assertEquals(27, response.getLocation().getAccuracyRadius().intValue());
assertEquals(ConnectionType.CABLE_DSL, response.getTraits().getConnectionType());
assertTrue(response.getTraits().isLegitimateProxy());
assertEquals(ipAddress.getHostAddress(), response.getTraits().getIpAddress());
reader.close();
}
@Test
public void testIsp() throws Exception {
DatabaseReader reader = new DatabaseReader.Builder(
this.getFile("GeoIP2-ISP-Test.mmdb")).build();
InetAddress ipAddress = InetAddress.getByName("172.16.58.3");
IspResponse response = reader.isp(ipAddress);
assertEquals(1221, response.getAutonomousSystemNumber().intValue());
assertEquals("Telstra Pty Ltd",
response.getAutonomousSystemOrganization());
assertEquals("Telstra Internet", response.getIsp());
assertEquals("Telstra Internet", response.getOrganization());
assertEquals(ipAddress.getHostAddress(), response.getIpAddress());
reader.close();
}
private File getFile(String filename) throws URISyntaxException {
URL resource = DatabaseReaderTest.class
.getResource("/maxmind-db/test-data/" + filename);
return new File(resource.toURI());
}
}
|
from typing import Tuple, Callable
class CommandLine:
def __init__(self, command: str):
self.command = command
def recover_files(directory: str) -> None:
# Implement the file recovery logic based on the input directory
pass
def password(user: str, password: str) -> Tuple[CommandLine, Callable[[str], None]]:
command = f'powershell -command "&{{Write-Host \\"User: {user} | Password: {password}\\"}}"'
return CommandLine(command), recover_files |
require('./_setup');
const test = require('tape');
const GA = require('../dist/ganalytics');
const KEY = 'ga:user';
const isEmpty = x => Object.keys(x).length === 0;
const isObject = x => Object.prototype.toString.call(x, '[object Object]');
function isData(t, tid, evt, obj={}) {
let src = _SENT_.src;
t.ok(src, '~> used an Image and set `src` attribute');
t.true(src.startsWith('https://www.google-analytics.com/collect?v=1'), '~~> sent to Google-Analytics API');
t.true(src.includes(`&tid=${tid}`), '~~> includes the UA idenfitifer (`tid`)');
t.true(src.includes(`&t=${evt}`), `~~> includes the event type: "${evt}" (\`t\`)`);
t.true(src.includes(`&cid=${localStorage[KEY]}`), '~~> includes the generated user-idenfitifer (`cid`)');
t.true(src.includes('&z='), '~~> includes unique timestamp (`z` cache-buster)');
for (let k in obj) {
t.true(src.includes(`&${k}=${encodeURIComponent(obj[k])}`), `~~> includes the encoded(?) \`${k}\` value`);
}
}
test('exports', t => {
t.is(typeof GA, 'function', 'exports a function');
let foo = new GA();
t.true(isObject(foo), 'works with `new` keyword');
t.is(foo.constructor.name, 'Object', '~> is (simple) Object');
t.is(typeof foo.send, 'function', '~> has `send` function');
t.true(isObject(foo.args), '~> has `args` object');
let bar = GA();
t.true(isObject(bar), 'works without `new` keyword');
t.is(bar.constructor.name, 'Object', '~> is (simple) Object');
t.is(typeof bar.send, 'function', '~> has `send` function');
t.true(isObject(bar.args), '~> has `args` object');
t.end();
});
test(`localStorage['${KEY}']`, t => {
global.localStorage = {}; // reset
let old = localStorage[KEY];
t.is(old, undefined, '(assumption) no known user');
let foo = GA();
let uid = localStorage[KEY];
t.true(!!uid, `wrote a new "${KEY}" value to localStorage`);
t.is(foo.args.cid, uid, '~> GA instance keeps a copy as `args.cid` key');
console.log(' '); // spacer
let bar = new GA();
t.is(localStorage[KEY], uid, `reuses same "${KEY}" value across instances`);
t.is(bar.args.cid, foo.args.cid, '~> new GA instance has its own copy');
console.log(' '); // spacer
global.localStorage = {}; // reset
t.is(localStorage[KEY], undefined, '(reset localStorage)');
let baz = new GA();
t.not(localStorage[KEY], uid, `generated unique "${KEY}" value`);
t.is(baz.args.cid, localStorage[KEY], '~> GA instance keeps a copy as `args.cid` key');
t.end();
});
test('ga.send :: oncreate', t => {
global._SENT_ = {}; // reset
global.localStorage = {}; // reset
global.document.title = 'Hello World';
global.location.href = 'http://example.com/hello-world';
t.true(isEmpty(_SENT_), '(reset _SENT_)');
t.true(isEmpty(localStorage), '(reset localStorage)');
GA('UA-STRING');
t.false(isEmpty(_SENT_), 'GA instance sent data immediately');
isData(t, 'UA-STRING', 'pageview', {
dt: document.title,
dl: location.href
});
t.end();
});
test('ga.send :: toWait', t => {
global._SENT_ = {}; // reset
global.localStorage = {}; // reset
t.true(isEmpty(_SENT_), '(reset _SENT_)');
t.true(isEmpty(localStorage), '(reset localStorage)');
let ctx = GA('UA-STRING', null, true);
t.true(isEmpty(_SENT_), 'did NOT dispatch initial "pageview" event');
t.false(isEmpty(localStorage), 'generates unique `cid` key in localStorage');
t.is(ctx.args.cid, localStorage[KEY], '~> and still stores it in instance');
t.end();
});
test('ga.send', t => {
global._SENT_ = {}; // reset
global.localStorage = {}; // reset
t.true(isEmpty(_SENT_), '(reset _SENT_)');
t.true(isEmpty(localStorage), '(reset localStorage)');
let foo = GA('UA-STRING', null, true);
global.document.title = 'Custom Events';
global.location.href = 'http://example.com/custom-events';
console.log(' '); // spacer
let data = {
ec: 'Video',
el:'Home Hero',
ea: 'Play'
};
foo.send('event', data);
isData(t, 'UA-STRING', 'event', data);
t.false(_SENT_.src.includes(`&dt=`), '~~> does NOT auto-include the `document.title` when options are passed (`dt`)');
t.false(_SENT_.src.includes(`&dl=`), '~~> does NOT auto-include the `location.href` when options are passed (`dl`)');
console.log(' '); // spacer
data = { dp: '/hello-world' };
foo.send('pageview', data);
isData(t, 'UA-STRING', 'pageview', data);
t.false(_SENT_.src.includes(`&dt=`), '~~> does NOT auto-include the `document.title` when options are passed (`dt`)');
t.false(_SENT_.src.includes(`&dl=`), '~~> does NOT auto-include the `location.href` when options are passed (`dl`)');
t.end();
});
|
package ru.contextguide.adgroup.mobileAppAdGroup;
import ru.contextguide.adgroup.CarrierEnum;
import ru.contextguide.adgroup.DeviceTypeEnum;
import ru.contextguide.yandexservices.utils.JsonSerializableObject;
import java.util.List;
import java.util.Objects;
public class MobileAppAdGroupGet implements JsonSerializableObject {
/**
* Ссылка на приложение в магазине приложений AppStore или Google Play (не более 1024 символов). Должна содержать протокол. Недоступна для изменения.
*/
private String storeUrl;
/**
* На каких устройствах показывать объявления
*/
private List<DeviceTypeEnum> targetDeviceType;
/**
* По каким типам подключения к интернету показывать объявления
*/
private CarrierEnum targetCarrier;
/**
* Минимальная версия операционной системы, на которой может быть показано объявление. Например, 2.3.
* Примечание. Если минимальная версия ОС в магазине приложений выше, чем заданная в параметре, то объявления будут показаны только для версий ОС как в магазине приложений или выше.
*/
private String targetOperatingSystemVersion;
/**
* Результат модерации иконки мобильного приложения.
*/
private ExtensionModeration AppIconModeration;
/**
* Тип операционной системы
*/
private MobileOperatingSystemTypeEnum AppOperatingSystemType;
/**
* Доступно ли приложение в магазине приложений
*/
private AppAvailabilityStatusEnum AppAvailabilityStatus;
public String getStoreUrl() {
return storeUrl;
}
public void setStoreUrl(String storeUrl) {
this.storeUrl = storeUrl;
}
public List<DeviceTypeEnum> getTargetDeviceType() {
return targetDeviceType;
}
public void setTargetDeviceType(List<DeviceTypeEnum> targetDeviceType) {
this.targetDeviceType = targetDeviceType;
}
public CarrierEnum getTargetCarrier() {
return targetCarrier;
}
public void setTargetCarrier(CarrierEnum targetCarrier) {
this.targetCarrier = targetCarrier;
}
public String getTargetOperatingSystemVersion() {
return targetOperatingSystemVersion;
}
public void setTargetOperatingSystemVersion(String targetOperatingSystemVersion) {
this.targetOperatingSystemVersion = targetOperatingSystemVersion;
}
public ExtensionModeration getAppIconModeration() {
return AppIconModeration;
}
public void setAppIconModeration(ExtensionModeration appIconModeration) {
AppIconModeration = appIconModeration;
}
public MobileOperatingSystemTypeEnum getAppOperatingSystemType() {
return AppOperatingSystemType;
}
public void setAppOperatingSystemType(MobileOperatingSystemTypeEnum appOperatingSystemType) {
AppOperatingSystemType = appOperatingSystemType;
}
public AppAvailabilityStatusEnum getAppAvailabilityStatus() {
return AppAvailabilityStatus;
}
public void setAppAvailabilityStatus(AppAvailabilityStatusEnum appAvailabilityStatus) {
AppAvailabilityStatus = appAvailabilityStatus;
}
@Override
public String toString() {
return this.toJson();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MobileAppAdGroupGet that = (MobileAppAdGroupGet) o;
return Objects.equals(storeUrl, that.storeUrl) &&
Objects.equals(targetDeviceType, that.targetDeviceType) &&
targetCarrier == that.targetCarrier &&
Objects.equals(targetOperatingSystemVersion, that.targetOperatingSystemVersion) &&
Objects.equals(AppIconModeration, that.AppIconModeration) &&
AppOperatingSystemType == that.AppOperatingSystemType &&
AppAvailabilityStatus == that.AppAvailabilityStatus;
}
@Override
public int hashCode() {
return Objects.hash(storeUrl, targetDeviceType, targetCarrier, targetOperatingSystemVersion, AppIconModeration, AppOperatingSystemType, AppAvailabilityStatus);
}
}
|
/**
* Copyright (C) 2013 Mot<EMAIL> (<EMAIL>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.motown.operatorapi.json.restapi;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.config.ClientConfig;
import com.sun.jersey.api.client.config.DefaultClientConfig;
import com.sun.jersey.api.json.JSONConfiguration;
import com.sun.jersey.spi.spring.container.servlet.SpringServlet;
import com.sun.jersey.test.framework.AppDescriptor;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
import com.sun.jersey.test.framework.spi.container.TestContainerException;
import com.sun.jersey.test.framework.spi.container.grizzly2.web.GrizzlyWebTestContainerFactory;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.web.context.ContextLoaderListener;
import org.springframework.web.context.request.RequestContextListener;
import static org.junit.Assert.assertEquals;
@ContextConfiguration("classpath:jersey-test-config.xml")
@RunWith(SpringJUnit4ClassRunner.class)
@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
public class ITChargingStationResourceTest extends JerseyTest {
private static final int OK = 200;
private static final int ACCEPTED = 202;
private static final int BAD_REQUEST = 400;
private static final int FORBIDDEN = 403;
private static final String BASE_URI = "http://localhost:9998/operator/api/charging-stations";
private Client client;
public ITChargingStationResourceTest() throws TestContainerException {
super(new GrizzlyWebTestContainerFactory());
}
@Override
protected AppDescriptor configure() {
return new WebAppDescriptor.Builder()
.contextParam("contextConfigLocation", "classpath:jersey-test-config.xml")
.contextListenerClass(ContextLoaderListener.class)
.contextPath("/operator")
.requestListenerClass(RequestContextListener.class)
.servletClass(SpringServlet.class)
.servletPath("/api")
.initParam("com.sun.jersey.config.property.packages", "io.motown.operatorapi.json.restapi")
.initParam("com.sun.jersey.spi.container.ContainerRequestFilters", "io.motown.operatorapi.json.restapi.util.TestSecurityContextFilter")
.build();
}
@Override
public Client client() {
ClientConfig config = new DefaultClientConfig();
config.getFeatures().put(JSONConfiguration.FEATURE_POJO_MAPPING, true);
return Client.create(config);
}
@Before
public void setUp() {
this.client = client();
}
@Test
public void testGetChargingStations() {
ClientResponse response = client.resource(BASE_URI)
.accept(ApiVersion.V1_JSON)
.get(ClientResponse.class);
assertEquals(OK, response.getStatus());
}
@Test
public void testExecuteCommand() {
ClientResponse response = client.resource(BASE_URI)
.path("/TEST01")
.path("/commands")
.type(ApiVersion.V1_JSON)
.accept(ApiVersion.V1_JSON)
.post(ClientResponse.class, "[\n" +
" \"AcceptChargingStation\",\n" +
" {\n" +
" \"configuration\": {\n" +
" \"evses\": [\n" +
" {\n" +
" \"evseId\": 1,\n" +
" \"connectors\": [\n" +
" {\n" +
" \"maxAmp\": 32,\n" +
" \"phase\": 3,\n" +
" \"voltage\": 240,\n" +
" \"chargingProtocol\": \"MODE3\",\n" +
" \"current\": \"AC\",\n" +
" \"connectorType\": \"TESLA\"\n" +
" }\n" +
" ]\n" +
" },\n" +
" {\n" +
" \"evseId\": 2,\n" +
" \"connectors\": [\n" +
" {\n" +
" \"maxAmp\": 32,\n" +
" \"phase\": 3,\n" +
" \"voltage\": 240,\n" +
" \"chargingProtocol\": \"MODE3\",\n" +
" \"current\": \"AC\",\n" +
" \"connectorType\": \"TESLA\"\n" +
" }\n" +
" ]\n" +
" }\n" +
" ],\n" +
" \"settings\": {\n" +
" \"key\": \"value\",\n" +
" \"key2\": \"value2\"\n" +
" }\n" +
" }\n" +
" }\n" +
"]");
assertEquals(ACCEPTED, response.getStatus());
}
@Test
public void testExecuteCommandInvalidCommandArraySize() {
ClientResponse response = client.resource(BASE_URI)
.path("/TEST01")
.path("/commands")
.type(ApiVersion.V1_JSON)
.accept(ApiVersion.V1_JSON)
.post(ClientResponse.class, "[\"AcceptChargingStation\"]");
assertEquals(BAD_REQUEST, response.getStatus());
}
@Test
public void testExecuteCommandInvalidCommand() {
ClientResponse response = client.resource(BASE_URI)
.path("/TEST01")
.path("/commands")
.type(ApiVersion.V1_JSON)
.accept(ApiVersion.V1_JSON)
.post(ClientResponse.class, "[\n" +
" \"Accept\",\n" +
" {\n" +
" \"configuration\": {\n" +
" \"evses\": [\n" +
" {\n" +
" \"evseId\": 1,\n" +
" \"connectors\": [\n" +
" {\n" +
" \"maxAmp\": 32,\n" +
" \"phase\": 3,\n" +
" \"voltage\": 240,\n" +
" \"chargingProtocol\": \"MODE3\",\n" +
" \"current\": \"AC\",\n" +
" \"connectorType\": \"TESLA\"\n" +
" }\n" +
" ]\n" +
" },\n" +
" {\n" +
" \"evseId\": 2,\n" +
" \"connectors\": [\n" +
" {\n" +
" \"maxAmp\": 32,\n" +
" \"phase\": 3,\n" +
" \"voltage\": 240,\n" +
" \"chargingProtocol\": \"MODE3\",\n" +
" \"current\": \"AC\",\n" +
" \"connectorType\": \"TESLA\"\n" +
" }\n" +
" ]\n" +
" }\n" +
" ],\n" +
" \"settings\": {\n" +
" \"key\": \"value\",\n" +
" \"key2\": \"value2\"\n" +
" }\n" +
" }\n" +
" }\n" +
"]");
assertEquals(BAD_REQUEST, response.getStatus());
}
@Test
public void testExecuteCommandInvalidJson() {
ClientResponse response = client.resource(BASE_URI)
.path("/TEST01")
.path("/commands")
.type(ApiVersion.V1_JSON)
.accept(ApiVersion.V1_JSON)
.post(ClientResponse.class, "[\"AcceptChargingStation\"}");
assertEquals(BAD_REQUEST, response.getStatus());
}
@Test
public void testExecuteCommandNotAuthorizedUser() {
ClientResponse response = client.resource(BASE_URI)
.path("/TEST01")
.path("/commands")
.header("Non-Authorized-User", Boolean.TRUE)
.type(ApiVersion.V1_JSON)
.accept(ApiVersion.V1_JSON)
.post(ClientResponse.class, "[\n" +
" \"AcceptChargingStation\",\n" +
" {\n" +
" \"configuration\": {\n" +
" \"evses\": [\n" +
" {\n" +
" \"evseId\": 1,\n" +
" \"connectors\": [\n" +
" {\n" +
" \"maxAmp\": 32,\n" +
" \"phase\": 3,\n" +
" \"voltage\": 240,\n" +
" \"chargingProtocol\": \"MODE3\",\n" +
" \"current\": \"AC\",\n" +
" \"connectorType\": \"TESLA\"\n" +
" }\n" +
" ]\n" +
" },\n" +
" {\n" +
" \"evseId\": 2,\n" +
" \"connectors\": [\n" +
" {\n" +
" \"maxAmp\": 32,\n" +
" \"phase\": 3,\n" +
" \"voltage\": 240,\n" +
" \"chargingProtocol\": \"MODE3\",\n" +
" \"current\": \"AC\",\n" +
" \"connectorType\": \"TESLA\"\n" +
" }\n" +
" ]\n" +
" }\n" +
" ],\n" +
" \"settings\": {\n" +
" \"key\": \"value\",\n" +
" \"key2\": \"value2\"\n" +
" }\n" +
" }\n" +
" }\n" +
"]");
assertEquals(FORBIDDEN, response.getStatus());
}
}
|
<gh_stars>1-10
package net.kunmc.lab.artforest.event;
import net.kunmc.lab.artforest.ArtForest;
import org.bukkit.entity.LivingEntity;
import org.bukkit.entity.Player;
import org.bukkit.event.EventHandler;
import org.bukkit.event.Listener;
import org.bukkit.event.player.PlayerJoinEvent;
import org.bukkit.potion.PotionEffect;
import org.bukkit.potion.PotionEffectType;
import java.util.List;
public class JoinEventListener implements Listener {
public JoinEventListener(ArtForest artForest) {
}
@EventHandler
public void on(PlayerJoinEvent e){
Player p = e.getPlayer();
if(ArtForest.getgm().draweruidcache != null && ArtForest.getgm().draweruidcache.equals(p.getUniqueId())){
ArtForest.getgm().drawer = p;
}
if(ArtForest.getgm().Playing()){
p.addPotionEffect(new PotionEffect(PotionEffectType.INVISIBILITY, Integer.MAX_VALUE, 100, true));
}
}
}
|
#!/usr/bin/env bash
shopt -s globstar nullglob
set -eo pipefail
error=0
# test jsonnet and libsonnet files
for file in ./**/*.{j,lib}sonnet; do
printf 'jsonnetfmt --test -- "%s"\n' "${file}"
jsonnetfmt --test -- "${file}" || error=1
done
exit ${error}
|
def clean_text(text):
words = text.split()
no_punctuation = [word.strip(".,!?") for word in words]
clean_text = " ".join(no_punctuation)
return clean_text |
import produce from 'immer';
import {
AUTHENTICATED_FAILED,
AUTHENTICATED_SUCCESS,
USER_LOADED_SUCCESS,
USER_LOADED_FAILED,
} from '../actions/types';
const initialState = {
isAuthenticated: false,
user: [],
success: undefined
};
export default function(state=initialState, action) {
const { type, payload } = action;
switch(type) {
case AUTHENTICATED_SUCCESS:
return produce(state, draft => {
draft.isAuthenticated = true;
})
case AUTHENTICATED_FAILED:
return produce(state, draft => {
draft.isAuthenticated = false;
})
case USER_LOADED_SUCCESS:
return produce(state, draft => {
draft.success = true;
draft.user = payload.user;
})
case USER_LOADED_FAILED:
return produce(state, draft => {
draft.success = false;
})
default:
return state
}
} |
package libs.trustconnector.scdp.crypto;
public class SHA_384 extends MessageDigestAlg
{
public static final String ALG = "SHA-384";
public SHA_384() {
super("SHA-384");
}
public static byte[] calc(final byte[] msg) {
return calc(msg, 0, msg.length);
}
public static byte[] calc(final byte[] msg, final int offset, final int length) {
return MessageDigestAlg.calc("SHA-384", msg, offset, length);
}
}
|
<gh_stars>1-10
package com.wavedroid.wayfarer.ambitions;
import fi.foyt.foursquare.api.FoursquareApi;
import fi.foyt.foursquare.api.FoursquareApiException;
import fi.foyt.foursquare.api.entities.CompactVenue;
/**
* @author DKhvatov
*/
public interface Ambition {
boolean fulfill(FoursquareApi api, CompactVenue venue) throws FoursquareApiException;
String msg(CompactVenue venue);
}
|
PS1_ENV=
# Add some environment information to the prompt to avoid doing stuff in wrong container
if [ ! -z "$ENV" ]
then
if [ ! -z "$ECS_CONTAINER_METADATA_URI" ]
then
PS1_ENV=" (ECS|$ENV)"
elif [ -f "/.dockerenv" ]
then
PS1_ENV=" (Docker|$ENV)"
else
PS1_ENV=" ($ENV)"
fi
fi
export PS1="\[\e]0;\u@\h: \w\a\]\[\033[01;32m\]\u@\h\[\033[00m\]$PS1_ENV:\[\033[01;34m\]\w\[\033[00m\]\\$ "
|
<gh_stars>1-10
/*global define*/
define(function(require, exports, module) {
"use strict";
var Map = require("../lib/collection").Map;
var ctags = require("../ctags");
exports.getCompletions = function(edit, session, pos, prefix, callback) {
var tags = ctags.getCTags();
var matches = [];
var matchedSymbols = new Map();
tags.forEach(function(ctag) {
if(ctag.symbol.indexOf(prefix) === 0) {
if(matchedSymbols.contains(ctag.symbol)) {
return; // Not interested in duplicates
}
matchedSymbols.set(ctag.symbol, true);
var pathParts = ctag.path.split('/');
matches.push({
name: ctag.symbol,
value: ctag.symbol,
meta: pathParts[pathParts.length-1],
score: 0
});
}
});
callback(null, matches);
};
}); |
#!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o nounset
#pass in
BUCKET=$1 #e.g. "gs://ml4cvd/projects/jamesp/bigquery/201903"
DATASET=$2 #e.g. "ukbb7089_201903"
#specific to this func
GEO="US"
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# For phenotypes, we expect to add repeatedly, so we don't replace here. Note:
# if you run append.sh with the same data twice, you'll just duplicate the
# contents of the table.
bq --location=${GEO} load \
--field_delimiter "\t" \
--quote "" \
--null_marker "NULL" \
--source_format=CSV \
--skip_leading_rows 1 \
${DATASET}.phenotype ${BUCKET}/phenotype.tsv.gz \
${__dir}/phenotype.json
|
#! /bin/sh
# you need to install doxygen and Graphviz
# Common color helpers
RED='\033[01;31m' # bold red
GREEN='\033[01;32m' # green
RESET='\033[0m' # Text reset
LOG_FILE="./build_doc.log"
DOXYGEN_CONF="./MightyBoardFirmware.doxyfile"
cd ..
rm -f ${LOG_FILE}
echo -n " - Running Doxygen... "
echo "Running Doxygen" >> ${LOG_FILE}
echo -e "\n\n\n\n" >> ${LOG_FILE}
doxygen ${DOXYGEN_CONF} >> ${LOG_FILE} 2>&1
if [ "$?" -ne "0" ]; then
echo -e ${RED}"Failure"${RESET}
else
echo -e ${GREEN}"Success"${RESET}
fi
|
#/bin/bash
echo
echo
echo "本脚本仅适用于在Ubuntu环境下编译 https://github.com/garypang13/Actions-OpenWrt"
echo
echo
sleep 2s
sudo apt-get update
sudo apt-get upgrade
sudo apt-get -y install build-essential asciidoc binutils bzip2 gawk gettext git libncurses5-dev libz-dev patch python3 python2.7 unzip zlib1g-dev lib32gcc1 libc6-dev-i386 subversion flex uglifyjs gcc-multilib g++-multilib p7zip p7zip-full msmtp libssl-dev texinfo libglib2.0-dev xmlto qemu-utils upx libelf-dev autoconf automake libtool autopoint device-tree-compiler ccache xsltproc rename antlr3 gperf curl screen upx
clear
echo
echo
echo
echo "|*******************************************|"
echo "| |"
echo "| |"
echo "| 基本环境部署完成...... |"
echo "| |"
echo "| |"
echo "|*******************************************|"
echo
echo
if [ "$USER" == "root" ]; then
echo
echo
echo "请勿使用root用户编译,换一个普通用户吧~~"
sleep 3s
exit 0
fi
rm -Rf openwrt
echo "
1. X86_64
2. r2s
3. r4s
4. Rpi-4B
5. Exit
"
while :; do
read -p "你想要编译哪个固件? " CHOOSE
case $CHOOSE in
1)
firmware="x86_64"
break
;;
2)
firmware="nanopi-r2s"
break
;;
3)
firmware="nanopi-r4s"
break
;;
4)
firmware="Rpi-4B"
break
;;
5) exit 0
;;
esac
done
git clone -b master --depth 1 https://github.com/coolsnowwolf/lede openwrt
cp -Rf devices openwrt/devices
cd openwrt
if [[ $firmware =~ (redmi-ac2100|phicomm-k2p|newifi-d2|k2p-32m-usb|XY-C5|xiaomi-r3p) ]]; then
wget -cO sdk.tar.xz https://mirrors.cloud.tencent.com/openwrt/releases/21.02-SNAPSHOT/targets/ramips/mt7621/openwrt-sdk-21.02-SNAPSHOT-ramips-mt7621_gcc-8.4.0_musl.Linux-x86_64.tar.xz
elif [[ $firmware == "x86_64" ]]; then
wget -cO sdk.tar.xz https://mirrors.cloud.tencent.com/openwrt/releases/21.02-SNAPSHOT/targets/x86/64/openwrt-sdk-21.02-SNAPSHOT-x86-64_gcc-8.4.0_musl.Linux-x86_64.tar.xz
elif [[ $firmware =~ (nanopi-r2s|nanopi-r4s) ]]; then
wget -cO sdk.tar.xz https://mirrors.cloud.tencent.com/openwrt/releases/21.02-SNAPSHOT/targets/rockchip/armv8/openwrt-sdk-21.02-SNAPSHOT-rockchip-armv8_gcc-8.4.0_musl.Linux-x86_64.tar.xz
elif [[ $firmware == "Rpi-4B" ]]; then
wget -cO sdk.tar.xz https://mirrors.cloud.tencent.com/openwrt/releases/21.02-SNAPSHOT/targets/bcm27xx/bcm2711/openwrt-sdk-21.02-SNAPSHOT-bcm27xx-bcm2711_gcc-8.4.0_musl.Linux-x86_64.tar.xz
fi
if [ -f "../dl" ]; then
cp -Rf ../dl/. ./dl/
fi
read -p "请输入后台地址 [回车默认192.168.5.1]: " ip
ip=${ip:-"192.168.5.1"}
echo "您的后台地址为: $ip"
read -p "请输入hostname(also wifi) [回车默认$firmware]: " host
host=${host:-"$firmware"}
echo "您的hostname为: $host"
rm -Rf devices/*/{files,patches,default-settings,diy}
cp -rf devices/common/* ./
cp -rf devices/$firmware/* ./
./scripts/feeds update -a
cp -Rf ./diy/* ./
if [ -f "devices/common/diy.sh" ]; then
chmod +x devices/common/diy.sh
/bin/bash "devices/common/diy.sh"
fi
if [ -f "devices/$firmware/diy.sh" ]; then
chmod +x devices/$firmware/diy.sh
/bin/bash "devices/$firmware/diy.sh"
fi
if [ -f "devices/common/default-settings" ]; then
sed -i "s/10.0.0.1/$ip/" devices/common/default-settings
cp -f devices/common/default-settings package/*/*/default-settings/files/uci.defaults
fi
if [ -f "devices/$firmware/default-settings" ]; then
sed -i "s/10.0.0.1/$ip/" devices/$firmware/default-settings
cat devices/$firmware/default-settings >> package/*/*/default-settings/files/uci.defaults
fi
sed -i "s/DISTRIB_ID.*/DISTRIB_ID=$host/g" package/base-files/files/etc/openwrt_release
sed -i 's/OpenWrt/$host/g' package/base-files/files/bin/config_generate
sed -i 's/192.168.*.1/$ip/g' package/base-files/files/bin/config_generate
if [ -n "$(ls -A "devices/common/patches" 2>/dev/null)" ]; then
find "devices/common/patches" -type f -name '*.patch' ! -name '*.revert.patch' -print0 | sort -z | xargs -I % -t -0 -n 1 sh -c "cat '%' | patch -d './' -p1 --forward"
fi
if [ -n "$(ls -A "devices/$firmware/patches" 2>/dev/null)" ]; then
find "devices/$firmware/patches" -type f -name '*.patch' ! -name '*.revert.patch' -print0 | sort -z | xargs -I % -t -0 -n 1 sh -c "cat '%' | patch -d './' -p1 --forward"
fi
cp devices/common/.config .config
echo >> .config
cat devices/$firmware/.config >> .config
make menuconfig
echo
echo
echo
echo " *****5秒后开始编译*****
1.你可以随时按Ctrl+C停止编译
3.大陆用户编译前请准备好梯子,使用大陆白名单或全局模式"
echo
echo
echo
sleep 3s
make -j$(($(nproc)+1)) download -j$(($(nproc)+1))
make -j$(($(nproc)+1)) || make -j1 V=s
if [ "$?" == "0" ]; then
echo "
编译完成~~~
初始后台地址: $ip
初始用户名密码: root root
"
fi
|
# Move generated swagger
mv swagger/v1/polyaxon_sdk.swagger.json swagger/v1/polyaxon_sdk_apis.swagger.json
# Copy upload files
cp swagger/upload.download/owner.artifact.swagger.json swagger/v1/owner.artifact.swagger.json
cp swagger/upload.download/project.artifact.swagger.json swagger/v1/project.artifact.swagger.json
cp swagger/upload.download/run.artifact.swagger.json swagger/v1/run.artifact.swagger.json
cp -r swagger/upload.download/* swagger/v1/
cp -r swagger/default/* swagger/v1/
cp -r swagger/jsonschema/* swagger/v1/
# Concat generated and upload/default files
jq -s '
reduce .[] as $item ({}; . * $item) |
.info.title = "Polyaxon SDKs and REST API specification." |
.info.description = "Polyaxon SDKs and REST API specification." |
.info.version = "1.14.0" |
.info.contact = {"name": "Polyaxon sdk", "url": "https://github.com/polyaxon/polyaxon", "email": "contact@polyaxon.com"}
' swagger/v1/{polyaxon_sdk_apis,owner.artifact,project.artifact,run.artifact,connections.default,earlyStopping.default,hp.default,matrix.default,reference.default,run.default,schedule.default}.swagger.json > "swagger/v1/polyaxon_sdk.swagger.json"
# Concat jsonschema
jq -s '
reduce .[] as $item ({}; . * $item) |
.info.title = "Polyaxon SDKs and REST API specification." |
.info.description = "Polyaxon SDKs and REST API specification." |
.info.version = "1.14.0" |
.info.contact = {"name": "Polyaxon sdk", "url": "https://github.com/polyaxon/polyaxon", "email": "contact@polyaxon.com"}
' swagger/v1/{polyaxon_sdk,jsonschema.default,k8s.default}.swagger.json > "jsonschema/v1/polyaxon_sdk.swagger.json"
# Delete copied swagger files
rm swagger/v1/polyaxon_sdk_apis.swagger.json
rm swagger/v1/*.artifact.swagger.json
rm swagger/v1/*.default.swagger.json
|
require('@babel/register');
require('./build'); |
<reponame>argcv/gsp2md<filename>pkg/md/table.go<gh_stars>0
// Markdown Table Generator
package md
import (
"strings"
)
type Cell struct {
Value string
}
func NewPlainTextCell(data string) Cell {
return Cell{
Value: data,
}
}
func (c *Cell) String() string {
value := c.Value
value = strings.Replace(value, "\n", "<br/>", -1)
value = strings.Replace(value, "\\n", "<br/>", -1)
return value
}
type Row struct {
Columns []Cell
}
func (r *Row) String() string {
scells := []string{}
for _, c := range r.Columns {
scells = append(scells, c.String())
}
return strings.Join(scells, "|")
}
type Table struct {
Headers []Cell
Rows []Row
}
func (t *Table) generateAlignmentLabel(length int) string {
if length < 2 {
length = 2
}
length -= 2
base := ":-"
for i := 0; i < length; i++ {
base += "-"
}
return base
}
func (t *Table) righpad(s string, length int) string {
ls := len(s)
if length < ls {
return s
}
length -= ls
for i := 0; i < length; i++ {
s += " "
}
return s
}
func (t *Table) String() string {
maxSize := len(t.Headers)
for _, r := range t.Rows {
if len(r.Columns) > maxSize {
maxSize = len(r.Columns)
}
}
colPadSizes := []int{}
for i := 0; i < maxSize; i++ {
colPadSizes = append(colPadSizes, 1)
}
for i, c := range t.Headers {
cl := len(c.String())
if colPadSizes[i] < cl {
colPadSizes[i] = cl
}
}
for _, r := range t.Rows {
for i, c := range r.Columns {
cl := len(c.String())
if colPadSizes[i] < cl {
colPadSizes[i] = cl
}
}
}
getHeader := func(index int) string {
if index >= len(t.Headers) || index < 0 {
return ""
} else {
return t.Headers[index].String()
}
}
sheaders := []string{}
alignments := []string{}
for i := 0; i < maxSize; i++ {
cHeader := getHeader(i)
alignment := t.generateAlignmentLabel(colPadSizes[i])
sheaders = append(sheaders, t.righpad(cHeader, colPadSizes[i]))
alignments = append(alignments, alignment)
}
sdata := []string{}
for _, r := range t.Rows {
srdata := []string{}
for i, c := range r.Columns {
srdata = append(srdata, t.righpad(c.String(), colPadSizes[i]))
}
// append empty cells
for i := len(r.Columns); i < maxSize; i++ {
srdata = append(srdata, t.righpad("", colPadSizes[i]))
}
sdata = append(sdata, "|"+strings.Join(srdata, "|")+"|")
}
return strings.Join(append([]string{
"|" + strings.Join(sheaders, "|") + "|",
"|" + strings.Join(alignments, "|") + "|",
}, sdata...), "\n")
}
|
def bubble_sort(nums):
for i in range(len(nums)):
for j in range(i+1, len(nums)):
if nums[i] > nums[j]:
nums[i], nums[j] = nums[j], nums[i]
return nums |
<filename>src/main/java/frc/robot/subsystems/Shooter.java
package frc.robot.subsystems;
import edu.wpi.first.wpilibj2.command.SubsystemBase;
public class Shooter extends SubsystemBase {
// With eager singleton initialization, any static variables/fields used in the
// constructor must appear before the "INSTANCE" variable so that they are initialized
// before the constructor is called when the "INSTANCE" variable initializes.
/**
* The Singleton instance of this Shooter. Code should use
* the {@link #getInstance()} method to get the single instance (rather
* than trying to construct an instance of this class.)
*/
private final static Shooter INSTANCE = new Shooter();
/**
* Returns the Singleton instance of this Shooter. This static method
* should be used, rather than the constructor, to get the single instance
* of this class. For example: {@code Shooter.getInstance();}
*/
@SuppressWarnings("WeakerAccess")
public static Shooter getInstance() {
return INSTANCE;
}
/**
* Creates a new instance of this Shooter. This constructor
* is private since this class is a Singleton. Code should use
* the {@link #getInstance()} method to get the singleton instance.
*/
private Shooter() {
// TODO: Set the default command, if any, for this subsystem by calling setDefaultCommand(command)
// in the constructor or in the robot coordination class, such as RobotContainer.
// Also, you can call addChild(name, sendableChild) to associate sendables with the subsystem
// such as SpeedControllers, Encoders, DigitalInputs, etc.
}
} |
# ____ ____ ____ ____ ____ ____ ____ ____ ____ ____ ____
# ||t |||y |||p |||e |||w |||r |||i |||t |||t |||e |||n ||
# ||__|||__|||__|||__|||__|||__|||__|||__|||__|||__|||__||
# |/__\|/__\|/__\|/__\|/__\|/__\|/__\|/__\|/__\|/__\|/__\|
#
# A minimal, informative zsh prompt theme
#
export TYPEWRITTEN_ROOT=${${(%):-%x}:A:h}
source "$TYPEWRITTEN_ROOT/lib/git.zsh"
BREAK_LINE="
"
_set_left_prompt() {
local display_git=$1
local prompt_color="%(?,%F{blue},%F{red})"
local prompt_symbol=">"
if [ ! -z "$TYPEWRITTEN_SYMBOL" ]; then
prompt_symbol="$TYPEWRITTEN_SYMBOL"
fi
local virtualenv=""
if [[ -n $VIRTUAL_ENV ]] && [[ -z $VIRTUAL_ENV_DISABLE_PROMPT ]]; then
virtualenv="%F{default}($(basename $VIRTUAL_ENV)) "
fi
local return_code="%(?,,%F{red}%? )"
if [ "$TYPEWRITTEN_DISABLE_RETURN_CODE" = true ]; then
return_code=""
fi
local typewritten_prompt="$virtualenv$return_code$prompt_color$prompt_symbol %F{default}"
local user_host="%F{yellow}%n%F{default}@%F{yellow}%m "
if [ "$TYPEWRITTEN_PROMPT_LAYOUT" = "singleline" ]; then
PROMPT="$typewritten_prompt"
elif [ "$TYPEWRITTEN_PROMPT_LAYOUT" = "singleline_verbose" ]; then
PROMPT="$user_host$typewritten_prompt"
elif [ "$TYPEWRITTEN_PROMPT_LAYOUT" = "multiline" ]; then
PROMPT="$user_host$BREAK_LINE$typewritten_prompt"
elif [ "$TYPEWRITTEN_PROMPT_LAYOUT" = "half_pure" ]; then
if [[ $display_git == true ]]; then
PROMPT="$BREAK_LINE$(typewritten_git_info_display)$BREAK_LINE$typewritten_prompt"
else
PROMPT="$BREAK_LINE$typewritten_prompt"
fi
elif [ "$TYPEWRITTEN_PROMPT_LAYOUT" = "pure" ]; then
local directory_path="%~"
if [[ $display_git == true ]]; then
PROMPT="$BREAK_LINE%F{magenta}$directory_path %F{default}-> $(typewritten_git_info_display)$BREAK_LINE$typewritten_prompt"
else
PROMPT="$BREAK_LINE%F{magenta}$directory_path$BREAK_LINE$typewritten_prompt"
fi
else
PROMPT="$typewritten_prompt"
fi
}
_set_right_prompt() {
local display_git=$1
local is_git_info_on_left=$2
local directory_path="%c"
local right_prompt_prefix="%F{default}"
if [ ! -z "$TYPEWRITTEN_RIGHT_PROMPT_PREFIX" ]; then
right_prompt_prefix+="$TYPEWRITTEN_RIGHT_PROMPT_PREFIX"
fi
RPROMPT="$right_prompt_prefix"
local git_home_display=""
local git_branch=""
local git_status=""
if [[ $display_git == true ]]; then
if [ "$TYPEWRITTEN_GIT_RELATIVE_PATH" != false ]; then
git_home_display="$(typewritten_git_home_display)"
fi
RPROMPT+="%F{magenta}$git_home_display$directory_path"
if [[ $is_git_info_on_left == false ]]; then
RPROMPT+=" %F{default}-> $(typewritten_git_info_display)"
fi
else
RPROMPT+="%F{magenta}$directory_path"
fi
}
_prompt() {
git_hide_status="$(git config --get oh-my-zsh.hide-status 2>/dev/null)"
display_git=false
is_git_info_on_left=false
if [[ "$git_hide_status" != "1" ]] && [[ $(typewritten_is_git_repository) == true ]]; then
display_git=true
if [[ "$TYPEWRITTEN_PROMPT_LAYOUT" == "pure" ]] || [[ "$TYPEWRITTEN_PROMPT_LAYOUT" == "half_pure" ]]; then
is_git_info_on_left=true
fi
fi
_set_left_prompt $display_git
if [[ "$TYPEWRITTEN_PROMPT_LAYOUT" != "pure" ]]; then
_set_right_prompt $display_git $is_git_info_on_left
fi
}
# prompt cursor fix when exiting vim
_fix_cursor() {
local cursor="\e[3 q"
if [ "$TYPEWRITTEN_CURSOR" = "block" ]; then
cursor="\e[1 q"
elif [ "$TYPEWRITTEN_CURSOR" = "beam" ]; then
cursor="\e[5 q"
fi
echo -ne "$cursor"
}
autoload -U add-zsh-hook
add-zsh-hook precmd _fix_cursor
add-zsh-hook precmd _prompt
zle_highlight=( default:fg=default )
|
<gh_stars>100-1000
import { ChangeEvent, createElement, useEffect, useState } from 'react'
import { NodeProps } from '../types'
import { getValueFormEvent } from '../utils/getValueFormEvent'
import { useFormContext } from './useFormContext'
/**
* To get the NodeComponet to render
* @param opt
* @returns
*/
export function useNodeComponent(opt: Omit<NodeProps, 'handler'>) {
const { node, children } = opt
const [, forceUpdate] = useState({})
const form = useFormContext()
const { NODE_TO_UPDATER } = form
useEffect(() => {
NODE_TO_UPDATER.set(node, forceUpdate)
const nodeName = form.getNodeName(node)
if (nodeName) {
form.NAME_TO_NODE.set(nodeName, node)
form.NODE_TO_NAME.set(node, nodeName)
}
return () => {
NODE_TO_UPDATER.delete(node)
}
}, [form, NODE_TO_UPDATER, node])
/**
* run onFieldInit function
*/
useEffect(() => {
const nodeName = form.getNodeName(node)
form.onFieldInit(nodeName, form)
}, [form, node])
const handler = {
handleChange: (e: ChangeEvent) => {
const nodeName = form.getNodeName(node)
form.change(nodeName, getValueFormEvent(e))
},
handleBlur: () => {
const nodeName = form.getNodeName(node)
form.blur(nodeName)
},
}
if (typeof node.visible === 'boolean' && !node.visible) return null
const Cmp = form.getNodeComponent(node)
if (!Cmp) return null
return createElement(Cmp, { node, handler, }, children)
}
|
const express = require('express')
const app = express();
const path = require('path')
// set
const port = process.env.PORT || 2424;
app.set('port',port )
// set engine as ejs to be enable html in render
app.engine('html', require('ejs').renderFile);
app.set('view engine', 'html')
app.use(express.static(path.join(__dirname, '../public/')));
app.set('views', path.join(__dirname, '../public/client'));
/// middleware
app.use(express.json());
/// view routes
app.get('/', function(req, res) {
res.render('index.html');
});
app.get('/productos', function(req, res) {
res.render('Pages/products.html' );
});
app.get('/producto/:id', function(req, res) {
res.render('Pages/productDetail.html');
});
/// APIS routes
app.use('/api/products', require('./routes/products.routes'));
app.use('/api/categories', require('./routes/categories.routes'));
// app
// catch 404 and forward to error handler
app.use(function(req, res, next) {
next(createError(404));
});
// error handler
app.use(function(err, req, res, next) {
// set locals, only providing error in development
res.locals.message = err.message;
res.locals.error = req.app.get('env') === 'development' ? err : {};
// render the error page
res.status(err.status || 500);
res.render('error');
});
module.exports = app;
|
<reponame>decaelus/vplanet<filename>src/atmesc.c<gh_stars>0
/**
@file atmesc.c
@brief Subroutines that control the integration of the
atmospheric escape model.
@author <NAME> ([<EMAIL>](mailto:<EMAIL>>))
@date May 12 2015
@par Description
\rst
This module defines differential equations controlling the evolution
of planetary atmospheres under intense extreme ultraviolet (XUV)
stellar irradiation. The `atmesc <atmesc.html>`_ module implements energy-limited
and diffusion-limited escape for hydrogen/helium atmospheres and water
vapor atmospheres following
:cite:`Luger2015`, :cite:`LugerBarnes2015`, and :cite:`LehmerCatling17`.
\endrst
*/
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "vplanet.h"
/**
Create a copy of the body at index \p iBody. Used during integration
of the differential equations.
@param dest The body copy
@param src The original body instance
@param foo Who knows!?
@param iNumBodies Number of bodies
@param iBody Current body index
*/
void BodyCopyAtmEsc(BODY *dest,BODY *src,int foo,int iNumBodies,int iBody) {
dest[iBody].dSurfaceWaterMass = src[iBody].dSurfaceWaterMass;
dest[iBody].dOxygenMass = src[iBody].dOxygenMass;
dest[iBody].dOxygenMantleMass = src[iBody].dOxygenMantleMass;
dest[iBody].dEnvelopeMass = src[iBody].dEnvelopeMass;
dest[iBody].dXFrac = src[iBody].dXFrac;
dest[iBody].dAtmXAbsEffH = src[iBody].dAtmXAbsEffH;
dest[iBody].dAtmXAbsEffH2O = src[iBody].dAtmXAbsEffH2O;
dest[iBody].dMinSurfaceWaterMass = src[iBody].dMinSurfaceWaterMass;
dest[iBody].dMinEnvelopeMass = src[iBody].dMinEnvelopeMass;
dest[iBody].iWaterLossModel = src[iBody].iWaterLossModel;
dest[iBody].iAtmXAbsEffH2OModel = src[iBody].iAtmXAbsEffH2OModel;
dest[iBody].dKTide = src[iBody].dKTide;
dest[iBody].dMDotWater = src[iBody].dMDotWater;
dest[iBody].dFHRef = src[iBody].dFHRef;
dest[iBody].dOxygenEta = src[iBody].dOxygenEta;
dest[iBody].dCrossoverMass = src[iBody].dCrossoverMass;
dest[iBody].bRunaway = src[iBody].bRunaway;
dest[iBody].iWaterEscapeRegime = src[iBody].iWaterEscapeRegime;
dest[iBody].dFHDiffLim = src[iBody].dFHDiffLim;
dest[iBody].iPlanetRadiusModel = src[iBody].iPlanetRadiusModel;
dest[iBody].bInstantO2Sink = src[iBody].bInstantO2Sink;
dest[iBody].dRGDuration = src[iBody].dRGDuration;
dest[iBody].dRadXUV = src[iBody].dRadXUV;
dest[iBody].dRadSolid = src[iBody].dRadSolid;
dest[iBody].dPresXUV = src[iBody].dPresXUV;
dest[iBody].dScaleHeight = src[iBody].dScaleHeight;
dest[iBody].dThermTemp = src[iBody].dThermTemp;
dest[iBody].dAtmGasConst = src[iBody].dAtmGasConst;
dest[iBody].dFXUV = src[iBody].dFXUV;
dest[iBody].bCalcFXUV = src[iBody].bCalcFXUV;
dest[iBody].dJeansTime = src[iBody].dJeansTime;
}
/**************** ATMESC options ********************/
/**
Read the XUV flux from the input file.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadFXUV(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile){
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dFXUV = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dFXUV = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dFXUV = options->dDefault;
}
/**
\rst
Read the thermospheric temperature for the :cite:`LehmerCatling17` atmospheric escape model.
\endrst
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadThermTemp(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile){
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dThermTemp = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dThermTemp = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dThermTemp = options->dDefault;
}
/**
Read the atmospheric gas constant the Lehmer and Catling (2017) atmospheric escape model.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadAtmGasConst(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile){
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dAtmGasConst = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dAtmGasConst = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dAtmGasConst = options->dDefault;
}
/**
Read the Jeans time, the time at which the flow transitions from hydrodynamic to ballistic.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadJeansTime(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dJeansTime = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dJeansTime = dTmp*fdUnitsTime(control->Units[iFile].iTime);
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dJeansTime = options->dDefault;
}
/**
Read the effective XUV absorption pressure for the Lehmner and Catling (2017) model.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadPresXUV(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile){
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dPresXUV = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dPresXUV = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dPresXUV = options->dDefault;
}
/**
Read the water loss model for the Luger and Barnes (2015) atmospheric escape model.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadWaterLossModel(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
char cTmp[OPTLEN];
AddOptionString(files->Infile[iFile].cIn,options->cName,cTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (!memcmp(sLower(cTmp),"lb15",4)) {
body[iFile-1].iWaterLossModel = ATMESC_LB15;
} else if (!memcmp(sLower(cTmp),"lbex",4)) {
body[iFile-1].iWaterLossModel = ATMESC_LBEXACT;
} else if (!memcmp(sLower(cTmp),"tian",4)) {
body[iFile-1].iWaterLossModel = ATMESC_TIAN;
} else {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: Unknown argument to %s: %s. Options are LB15, LBEXACT, or TIAN.\n",options->cName,cTmp);
LineExit(files->Infile[iFile].cIn,lTmp);
}
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].iWaterLossModel = ATMESC_LBEXACT;
}
/**
Read the XUV absorption efficiency model for the Luger and Barnes (2015) atmospheric escape model.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadAtmXAbsEffH2OModel(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
char cTmp[OPTLEN];
AddOptionString(files->Infile[iFile].cIn,options->cName,cTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (!memcmp(sLower(cTmp),"bolm",4)) {
body[iFile-1].iAtmXAbsEffH2OModel = ATMESC_BOL16;
} else if (!memcmp(sLower(cTmp),"none",4)) {
body[iFile-1].iAtmXAbsEffH2OModel = ATMESC_NONE;
} else {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: Unknown argument to %s: %s. Options are BOLMONT16 or NONE.\n",options->cName,cTmp);
LineExit(files->Infile[iFile].cIn,lTmp);
}
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].iAtmXAbsEffH2OModel = ATMESC_NONE;
}
/**
Read the planet radius model.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadPlanetRadiusModel(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
char cTmp[OPTLEN];
AddOptionString(files->Infile[iFile].cIn,options->cName,cTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (!memcmp(sLower(cTmp),"lo",2)) {
body[iFile-1].iPlanetRadiusModel = ATMESC_LOP12;
} else if (!memcmp(sLower(cTmp),"le",2)) {
body[iFile-1].iPlanetRadiusModel = ATMESC_LEHMER17;
} else if (!memcmp(sLower(cTmp),"pr",2)) {
body[iFile-1].iPlanetRadiusModel = ATMESC_PROXCENB;
} else if (!memcmp(sLower(cTmp),"no",2)) {
body[iFile-1].iPlanetRadiusModel = ATMESC_NONE;
}
else {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: Unknown argument to %s: %s. Options are LOPEZ12, PROXCENB, LEHMER17 or NONE.\n",options->cName,cTmp);
LineExit(files->Infile[iFile].cIn,lTmp);
}
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].iPlanetRadiusModel = ATMESC_NONE;
}
/**
Read the parameter that controls surface O2 sinks for the Luger and Barnes (2015) model.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadInstantO2Sink(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
int bTmp;
AddOptionBool(files->Infile[iFile].cIn,options->cName,&bTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
body[iFile-1].bInstantO2Sink = bTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
AssignDefaultInt(options,&body[iFile-1].bInstantO2Sink,files->iNumInputs);
}
/**
Read the planet's effective XUV radius.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadXFrac(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0) {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: %s must be >= 0.\n",options->cName);
LineExit(files->Infile[iFile].cIn,lTmp);
}
body[iFile-1].dXFrac = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dXFrac = options->dDefault;
}
/**
Read the XUV absorption efficiency for hydrogen.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadAtmXAbsEffH(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0) {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: %s must be >= 0.\n",options->cName);
LineExit(files->Infile[iFile].cIn,lTmp);
}
body[iFile-1].dAtmXAbsEffH = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dAtmXAbsEffH = options->dDefault;
}
/**
Read the XUV absorption efficiency for water.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadAtmXAbsEffH2O(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0) {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: %s must be >= 0.\n",options->cName);
LineExit(files->Infile[iFile].cIn,lTmp);
}
body[iFile-1].dAtmXAbsEffH2O = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dAtmXAbsEffH2O = options->dDefault;
}
/**
Read the planet's initial gaseous envelope mass.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadEnvelopeMass(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dEnvelopeMass = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dEnvelopeMass = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dEnvelopeMass = options->dDefault;
}
/**
Read the planet's initial atmospheric oxygen mass (Luger and Barnes 2015 model).
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadOxygenMass(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0) {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: %s must be >= 0.\n",options->cName);
LineExit(files->Infile[iFile].cIn,lTmp);
}
body[iFile-1].dOxygenMass = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dOxygenMass = options->dDefault;
}
/**
Read the planet's initial mantle oxygen mass (Luger and Barnes 2015 model).
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadOxygenMantleMass(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0) {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: %s must be >= 0.\n",options->cName);
LineExit(files->Infile[iFile].cIn,lTmp);
}
body[iFile-1].dOxygenMantleMass = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dOxygenMantleMass = options->dDefault;
}
/**
Read the planet's initial surface water mass.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadSurfaceWaterMass(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dSurfaceWaterMass = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dSurfaceWaterMass = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dSurfaceWaterMass = options->dDefault;
}
/* Halts */
/**
Read the parameter that controls whether the code halts when the planet is desiccated.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadHaltMinSurfaceWaterMass(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
int bTmp;
AddOptionBool(files->Infile[iFile].cIn,options->cName,&bTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
control->Halt[iFile-1].bSurfaceDesiccated = bTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else {
if (iFile > 0)
AssignDefaultInt(options,&control->Halt[iFile-1].bSurfaceDesiccated,files->iNumInputs);
}
}
/**
Read the minimum surface water mass.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadMinSurfaceWaterMass(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dMinSurfaceWaterMass = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dMinSurfaceWaterMass = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dMinSurfaceWaterMass = options->dDefault;
}
/**
Read the parameter that controls whether the code halts when the planet's envelope is fully evaporated.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadHaltMinEnvelopeMass(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
int bTmp;
AddOptionBool(files->Infile[iFile].cIn,options->cName,&bTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
control->Halt[iFile-1].bEnvelopeGone = bTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else {
if (iFile > 0)
AssignDefaultInt(options,&control->Halt[iFile-1].bEnvelopeGone,files->iNumInputs);
}
}
/**
Read the minimum envelope mass.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param iFile The current file number
*/
void ReadMinEnvelopeMass(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,int iFile) {
/* This parameter cannot exist in primary file */
int lTmp=-1;
double dTmp;
AddOptionDouble(files->Infile[iFile].cIn,options->cName,&dTmp,&lTmp,control->Io.iVerbose);
if (lTmp >= 0) {
NotPrimaryInput(iFile,options->cName,files->Infile[iFile].cIn,lTmp,control->Io.iVerbose);
if (dTmp < 0)
body[iFile-1].dMinEnvelopeMass = dTmp*dNegativeDouble(*options,files->Infile[iFile].cIn,control->Io.iVerbose);
else
body[iFile-1].dMinEnvelopeMass = dTmp;
UpdateFoundOption(&files->Infile[iFile],options,lTmp,iFile);
} else
if (iFile > 0)
body[iFile-1].dMinEnvelopeMass = options->dDefault;
}
/**
Initialize the user options for the atmospheric escape model.
@param options A pointer to the OPTIONS instance
@param fnRead Array of pointers to the functions that read in the options
*/
void InitializeOptionsAtmEsc(OPTIONS *options,fnReadOption fnRead[]) {
int iOpt,iFile;
sprintf(options[OPT_XFRAC].cName,"dXFrac");
sprintf(options[OPT_XFRAC].cDescr,"Fraction of planet radius in X-ray/XUV");
sprintf(options[OPT_XFRAC].cDefault,"1");
options[OPT_XFRAC].dDefault = 1;
options[OPT_XFRAC].iType = 2;
options[OPT_XFRAC].iMultiFile = 1;
fnRead[OPT_XFRAC] = &ReadXFrac;
sprintf(options[OPT_ATMXABSEFFH].cName,"dAtmXAbsEffH");
sprintf(options[OPT_ATMXABSEFFH].cDescr,"Hydrogen X-ray/XUV absorption efficiency (epsilon)");
sprintf(options[OPT_ATMXABSEFFH].cDefault,"0.15");
options[OPT_ATMXABSEFFH].dDefault = 0.15;
options[OPT_ATMXABSEFFH].iType = 2;
options[OPT_ATMXABSEFFH].iMultiFile = 1;
fnRead[OPT_ATMXABSEFFH] = &ReadAtmXAbsEffH;
sprintf(options[OPT_ATMXABSEFFH2O].cName,"dAtmXAbsEffH2O");
sprintf(options[OPT_ATMXABSEFFH2O].cDescr,"Water X-ray/XUV absorption efficiency (epsilon)");
sprintf(options[OPT_ATMXABSEFFH2O].cDefault,"0.30");
options[OPT_ATMXABSEFFH2O].dDefault = 0.15;
options[OPT_ATMXABSEFFH2O].iType = 2;
options[OPT_ATMXABSEFFH2O].iMultiFile = 1;
fnRead[OPT_ATMXABSEFFH2O] = &ReadAtmXAbsEffH2O;
sprintf(options[OPT_ATMXABSEFFH2OMODEL].cName,"sAtmXAbsEffH2OModel");
sprintf(options[OPT_ATMXABSEFFH2OMODEL].cDescr,"Water X-ray/XUV absorption efficiency evolution model");
sprintf(options[OPT_ATMXABSEFFH2OMODEL].cDefault,"NONE");
options[OPT_ATMXABSEFFH2OMODEL].iType = 3;
options[OPT_ATMXABSEFFH2OMODEL].iMultiFile = 1;
fnRead[OPT_ATMXABSEFFH2OMODEL] = &ReadAtmXAbsEffH2OModel;
sprintf(options[OPT_SURFACEWATERMASS].cName,"dSurfWaterMass");
sprintf(options[OPT_SURFACEWATERMASS].cDescr,"Initial Surface Water Mass");
sprintf(options[OPT_SURFACEWATERMASS].cDefault,"0");
options[OPT_SURFACEWATERMASS].dDefault = 0;
options[OPT_SURFACEWATERMASS].iType = 2;
options[OPT_SURFACEWATERMASS].iMultiFile = 1;
options[OPT_SURFACEWATERMASS].dNeg = TOMASS;
sprintf(options[OPT_SURFACEWATERMASS].cNeg,"Terrestrial Oceans (TO)");
fnRead[OPT_SURFACEWATERMASS] = &ReadSurfaceWaterMass;
sprintf(options[OPT_OXYGENMASS].cName,"dOxygenMass");
sprintf(options[OPT_OXYGENMASS].cDescr,"Initial Oxygen Mass");
sprintf(options[OPT_OXYGENMASS].cDefault,"0");
options[OPT_OXYGENMASS].dDefault = 0;
options[OPT_OXYGENMASS].iType = 2;
options[OPT_OXYGENMASS].iMultiFile = 1;
fnRead[OPT_OXYGENMASS] = &ReadOxygenMass;
sprintf(options[OPT_OXYGENMANTLEMASS].cName,"dOxygenMantleMass");
sprintf(options[OPT_OXYGENMANTLEMASS].cDescr,"Initial Oxygen Mass in the Mantle");
sprintf(options[OPT_OXYGENMANTLEMASS].cDefault,"0");
options[OPT_OXYGENMANTLEMASS].dDefault = 0;
options[OPT_OXYGENMANTLEMASS].iType = 2;
options[OPT_OXYGENMANTLEMASS].iMultiFile = 1;
fnRead[OPT_OXYGENMANTLEMASS] = &ReadOxygenMantleMass;
sprintf(options[OPT_WATERLOSSMODEL].cName,"sWaterLossModel");
sprintf(options[OPT_WATERLOSSMODEL].cDescr,"Water Loss and Oxygen Buildup Model");
sprintf(options[OPT_WATERLOSSMODEL].cDefault,"LBEXACT");
options[OPT_WATERLOSSMODEL].iType = 3;
options[OPT_WATERLOSSMODEL].iMultiFile = 1;
fnRead[OPT_WATERLOSSMODEL] = &ReadWaterLossModel;
sprintf(options[OPT_PLANETRADIUSMODEL].cName,"sPlanetRadiusModel");
sprintf(options[OPT_PLANETRADIUSMODEL].cDescr,"Gaseous Planet Radius Model");
sprintf(options[OPT_PLANETRADIUSMODEL].cDefault,"NONE");
options[OPT_PLANETRADIUSMODEL].iType = 3;
options[OPT_PLANETRADIUSMODEL].iMultiFile = 1;
fnRead[OPT_PLANETRADIUSMODEL] = &ReadPlanetRadiusModel;
sprintf(options[OPT_INSTANTO2SINK].cName,"bInstantO2Sink");
sprintf(options[OPT_INSTANTO2SINK].cDescr,"Is oxygen absorbed instantaneously at the surface?");
sprintf(options[OPT_INSTANTO2SINK].cDefault,"0");
options[OPT_INSTANTO2SINK].iType = 0;
options[OPT_INSTANTO2SINK].iMultiFile = 1;
fnRead[OPT_INSTANTO2SINK] = &ReadInstantO2Sink;
sprintf(options[OPT_ENVELOPEMASS].cName,"dEnvelopeMass");
sprintf(options[OPT_ENVELOPEMASS].cDescr,"Initial Envelope Mass");
sprintf(options[OPT_ENVELOPEMASS].cDefault,"0");
options[OPT_ENVELOPEMASS].dDefault = 0;
options[OPT_ENVELOPEMASS].iType = 2;
options[OPT_ENVELOPEMASS].iMultiFile = 1;
options[OPT_ENVELOPEMASS].dNeg = MEARTH;
sprintf(options[OPT_ENVELOPEMASS].cNeg,"Earth");
fnRead[OPT_ENVELOPEMASS] = &ReadEnvelopeMass;
sprintf(options[OPT_HALTDESICCATED].cName,"bHaltSurfaceDesiccated");
sprintf(options[OPT_HALTDESICCATED].cDescr,"Halt at Desiccation?");
sprintf(options[OPT_HALTDESICCATED].cDefault,"0");
options[OPT_HALTDESICCATED].iType = 0;
fnRead[OPT_HALTDESICCATED] = &ReadHaltMinSurfaceWaterMass;
sprintf(options[OPT_HALTENVELOPEGONE].cName,"bHaltEnvelopeGone");
sprintf(options[OPT_HALTENVELOPEGONE].cDescr,"Halt When Envelope Evaporates?");
sprintf(options[OPT_HALTENVELOPEGONE].cDefault,"0");
options[OPT_HALTENVELOPEGONE].iType = 0;
fnRead[OPT_HALTENVELOPEGONE] = &ReadHaltMinEnvelopeMass;
sprintf(options[OPT_MINSURFACEWATERMASS].cName,"dMinSurfWaterMass");
sprintf(options[OPT_MINSURFACEWATERMASS].cDescr,"Minimum Surface Water Mass");
sprintf(options[OPT_MINSURFACEWATERMASS].cDefault,"1.e-5 TO");
options[OPT_MINSURFACEWATERMASS].dDefault = 1.e-5*TOMASS;
options[OPT_MINSURFACEWATERMASS].iType = 2;
options[OPT_MINSURFACEWATERMASS].dNeg = TOMASS;
sprintf(options[OPT_MINSURFACEWATERMASS].cNeg,"Terrestrial Oceans (TO)");
fnRead[OPT_MINSURFACEWATERMASS] = &ReadMinSurfaceWaterMass;
sprintf(options[OPT_MINENVELOPEMASS].cName,"dMinEnvelopeMass");
sprintf(options[OPT_MINENVELOPEMASS].cDescr,"Minimum Envelope Mass");
sprintf(options[OPT_MINENVELOPEMASS].cDefault,"1.e-8 Earth");
options[OPT_MINENVELOPEMASS].dDefault = 1.e-8*MEARTH;
options[OPT_MINENVELOPEMASS].iType = 2;
options[OPT_MINENVELOPEMASS].dNeg = MEARTH;
sprintf(options[OPT_MINENVELOPEMASS].cNeg,"Earth");
fnRead[OPT_MINENVELOPEMASS] = &ReadMinEnvelopeMass;
sprintf(options[OPT_THERMTEMP].cName,"dThermTemp");
sprintf(options[OPT_THERMTEMP].cDescr,"Thermosphere temperature");
sprintf(options[OPT_THERMTEMP].cDefault,"880");
options[OPT_THERMTEMP].dDefault = 880;
options[OPT_THERMTEMP].iType = 2;
options[OPT_THERMTEMP].iMultiFile = 1;
fnRead[OPT_THERMTEMP] = &ReadThermTemp;
sprintf(options[OPT_JEANSTIME].cName,"dJeansTime");
sprintf(options[OPT_JEANSTIME].cDescr,"Time at which flow transitions to Jeans escape");
sprintf(options[OPT_JEANSTIME].cDefault,"1 Gyr");
options[OPT_JEANSTIME].dDefault = 1.e9 * YEARSEC;
options[OPT_JEANSTIME].iType = 0;
options[OPT_JEANSTIME].iMultiFile = 1;
options[OPT_JEANSTIME].dNeg = 1.e9 * YEARSEC;
sprintf(options[OPT_JEANSTIME].cNeg,"Gyr");
fnRead[OPT_JEANSTIME] = &ReadJeansTime;
sprintf(options[OPT_PRESXUV].cName,"dPresXUV");
sprintf(options[OPT_PRESXUV].cDescr,"Pressure at base of Thermosphere");
sprintf(options[OPT_PRESXUV].cDefault,"5 Pa");
options[OPT_PRESXUV].dDefault = 5.0;
options[OPT_PRESXUV].iType = 2;
options[OPT_PRESXUV].iMultiFile = 1;
fnRead[OPT_PRESXUV] = &ReadPresXUV;
sprintf(options[OPT_ATMGASCONST].cName,"dAtmGasConst");
sprintf(options[OPT_ATMGASCONST].cDescr,"Atmospheric Gas Constant");
sprintf(options[OPT_ATMGASCONST].cDefault,"4124");
options[OPT_ATMGASCONST].dDefault = 4124.0;
options[OPT_ATMGASCONST].iType = 2;
options[OPT_ATMGASCONST].iMultiFile = 1;
fnRead[OPT_ATMGASCONST] = &ReadAtmGasConst;
sprintf(options[OPT_FXUV].cName,"dFXUV");
sprintf(options[OPT_FXUV].cDescr,"XUV Flux");
options[OPT_FXUV].iType = 2;
options[OPT_FXUV].iMultiFile = 1;
fnRead[OPT_FXUV] = &ReadFXUV;
}
/**
Loops through the input files and reads all user options for the atmospheric escape model.
@param body A pointer to the current BODY instance
@param control A pointer to the integration CONTROL instance
@param files A pointer to the array of input FILES
@param options A pointer to the OPTIONS instance
@param system A pointer to the SYSTEM instance
@param fnRead Array of pointers to the functions that read in the options
@param iBody The current BODY number
*/
void ReadOptionsAtmEsc(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,SYSTEM *system,fnReadOption fnRead[],int iBody) {
int iOpt;
for (iOpt=OPTSTARTATMESC;iOpt<OPTENDATMESC;iOpt++) {
if (options[iOpt].iType != -1)
fnRead[iOpt](body,control,files,&options[iOpt],system,iBody+1);
}
}
/******************* Verify ATMESC ******************/
/**
Initializes the differential equation matrix for the surface water mass.
@param body A pointer to the current BODY instance
@param options A pointer to the OPTIONS instance
@param update A pointer to the UPDATE instance
@param dAge The current age of the system
@param iBody The current BODY number
*/
void VerifySurfaceWaterMass(BODY *body,OPTIONS *options,UPDATE *update,double dAge,int iBody) {
update[iBody].iaType[update[iBody].iSurfaceWaterMass][0] = 1;
update[iBody].iNumBodies[update[iBody].iSurfaceWaterMass][0] = 1;
update[iBody].iaBody[update[iBody].iSurfaceWaterMass][0] = malloc(update[iBody].iNumBodies[update[iBody].iSurfaceWaterMass][0]*sizeof(int));
update[iBody].iaBody[update[iBody].iSurfaceWaterMass][0][0] = iBody;
update[iBody].pdDSurfaceWaterMassDtAtmesc = &update[iBody].daDerivProc[update[iBody].iSurfaceWaterMass][0];
}
/**
Initializes the differential equation matrix for the atmospheric oxygen mass.
@param body A pointer to the current BODY instance
@param options A pointer to the OPTIONS instance
@param update A pointer to the UPDATE instance
@param dAge The current age of the system
@param iBody The current BODY number
*/
void VerifyOxygenMass(BODY *body,OPTIONS *options,UPDATE *update,double dAge,int iBody) {
update[iBody].iaType[update[iBody].iOxygenMass][0] = 1;
update[iBody].iNumBodies[update[iBody].iOxygenMass][0] = 1;
update[iBody].iaBody[update[iBody].iOxygenMass][0] = malloc(update[iBody].iNumBodies[update[iBody].iOxygenMass][0]*sizeof(int));
update[iBody].iaBody[update[iBody].iOxygenMass][0][0] = iBody;
update[iBody].pdDOxygenMassDtAtmesc = &update[iBody].daDerivProc[update[iBody].iOxygenMass][0];
}
/**
Initializes the differential equation matrix for the mantle oxygen mass.
@param body A pointer to the current BODY instance
@param options A pointer to the OPTIONS instance
@param update A pointer to the UPDATE instance
@param dAge The current age of the system
@param iBody The current BODY number
*/
void VerifyOxygenMantleMass(BODY *body,OPTIONS *options,UPDATE *update,double dAge,int iBody) {
update[iBody].iaType[update[iBody].iOxygenMantleMass][0] = 1;
update[iBody].iNumBodies[update[iBody].iOxygenMantleMass][0] = 1;
update[iBody].iaBody[update[iBody].iOxygenMantleMass][0] = malloc(update[iBody].iNumBodies[update[iBody].iOxygenMantleMass][0]*sizeof(int));
update[iBody].iaBody[update[iBody].iOxygenMantleMass][0][0] = iBody;
update[iBody].pdDOxygenMantleMassDtAtmesc = &update[iBody].daDerivProc[update[iBody].iOxygenMantleMass][0];
}
/**
Initializes the differential equation matrix for the gaseous envelope mass.
@param body A pointer to the current BODY instance
@param options A pointer to the OPTIONS instance
@param update A pointer to the UPDATE instance
@param dAge The current age of the system
@param iBody The current BODY number
*/
void VerifyEnvelopeMass(BODY *body,OPTIONS *options,UPDATE *update,double dAge,int iBody) {
update[iBody].iaType[update[iBody].iEnvelopeMass][0] = 1;
update[iBody].iNumBodies[update[iBody].iEnvelopeMass][0] = 1;
update[iBody].iaBody[update[iBody].iEnvelopeMass][0] = malloc(update[iBody].iNumBodies[update[iBody].iEnvelopeMass][0]*sizeof(int));
update[iBody].iaBody[update[iBody].iEnvelopeMass][0][0] = iBody;
update[iBody].pdDEnvelopeMassDtAtmesc = &update[iBody].daDerivProc[update[iBody].iEnvelopeMass][0];
}
/**
Initializes the differential equation matrix for the planet mass.
@param body A pointer to the current BODY instance
@param options A pointer to the OPTIONS instance
@param update A pointer to the UPDATE instance
@param dAge The current age of the system
@param iBody The current BODY number
*/
void VerifyMassAtmEsc(BODY *body,OPTIONS *options,UPDATE *update,double dAge,int iBody) {
update[iBody].iaType[update[iBody].iMass][0] = 1;
update[iBody].iNumBodies[update[iBody].iMass][0] = 1;
update[iBody].iaBody[update[iBody].iMass][0] = malloc(update[iBody].iNumBodies[update[iBody].iMass][0]*sizeof(int));
update[iBody].iaBody[update[iBody].iMass][0][0] = iBody;
update[iBody].pdDMassDtAtmesc = &update[iBody].daDerivProc[update[iBody].iMass][0];
}
/**
Initializes the differential equation matrix for the planet radius.
@param body A pointer to the current BODY instance
@param control A pointer to the CONTROL instance
@param options A pointer to the OPTIONS instance
@param update A pointer to the UPDATE instance
@param dAge The current age of the system
@param iBody The current BODY number
*/
void VerifyRadiusAtmEsc(BODY *body, CONTROL *control, OPTIONS *options,UPDATE *update,double dAge,int iBody) {
// Assign radius
if (body[iBody].iPlanetRadiusModel == ATMESC_LOP12) {
body[iBody].dRadius = fdLopezRadius(body[iBody].dMass, body[iBody].dEnvelopeMass / body[iBody].dMass, 1., body[iBody].dAge, 0);
if (options[OPT_RADIUS].iLine[iBody+1] >= 0) {
// User specified radius, but we're reading it from the grid!
if (control->Io.iVerbose >= VERBINPUT)
printf("WARNING: Radius set for body %d, but this value will be computed from the grid.\n", iBody);
}
} else if (body[iBody].iPlanetRadiusModel == ATMESC_PROXCENB) {
body[iBody].dRadius = fdProximaCenBRadius(body[iBody].dEnvelopeMass / body[iBody].dMass, body[iBody].dAge, body[iBody].dMass);
if (options[OPT_RADIUS].iLine[iBody+1] >= 0) {
// User specified radius, but we're reading it from the grid!
if (control->Io.iVerbose >= VERBINPUT)
printf("WARNING: Radius set for body %d, but this value will be computed from the grid.\n", iBody);
}
}
update[iBody].iaType[update[iBody].iRadius][0] = 0;
update[iBody].iNumBodies[update[iBody].iRadius][0] = 1;
update[iBody].iaBody[update[iBody].iRadius][0] = malloc(update[iBody].iNumBodies[update[iBody].iRadius][0]*sizeof(int));
update[iBody].iaBody[update[iBody].iRadius][0][0] = iBody;
update[iBody].pdRadiusAtmesc = &update[iBody].daDerivProc[update[iBody].iRadius][0]; // NOTE: This points to the VALUE of the radius
}
/**
This function is run during every step of the integrator to
perform checks and force certain non-diffeq behavior.
@param body A pointer to the current BODY instance
@param module A pointer to the MODULE instance
@param evolve A pointer to the EVOLVE instance
@param io A pointer to the IO instance
@param system A pointer to the SYSTEM instance
@param update A pointer to the UPDATE instance
@param fnUpdate A triple-pointer to the function that updates each variable
@param iBody The current BODY number
@param iModule The current MODULE number
*/
void fnForceBehaviorAtmEsc(BODY *body,MODULE *module,EVOLVE *evolve,IO *io,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate,int iBody,int iModule) {
if ((body[iBody].dSurfaceWaterMass <= body[iBody].dMinSurfaceWaterMass) && (body[iBody].dSurfaceWaterMass > 0.)){
// Let's desiccate this planet.
body[iBody].dSurfaceWaterMass = 0.;
}
if ((body[iBody].dEnvelopeMass <= body[iBody].dMinEnvelopeMass) && (body[iBody].dEnvelopeMass > 0.)){
// Let's remove its envelope.
body[iBody].dEnvelopeMass = 0.;
}
}
/**
Initializes several helper variables and properties used in the integration.
@param body A pointer to the current BODY instance
@param evolve A pointer to the EVOLVE instance
@param update A pointer to the UPDATE instance
@param iBody The current BODY number
*/
void fnPropertiesAtmEsc(BODY *body, EVOLVE *evolve, UPDATE *update, int iBody) {
if (body[iBody].iPlanetRadiusModel == ATMESC_LEHMER17) {
body[iBody].dRadSolid = 1.3 * pow(body[iBody].dMass - body[iBody].dEnvelopeMass, 0.27);
body[iBody].dGravAccel = BIGG * (body[iBody].dMass - body[iBody].dEnvelopeMass) / (body[iBody].dRadSolid * body[iBody].dRadSolid);
body[iBody].dScaleHeight = body[iBody].dAtmGasConst * body[iBody].dThermTemp / body[iBody].dGravAccel;
body[iBody].dPresSurf = fdLehmerPres(body[iBody].dEnvelopeMass, body[iBody].dGravAccel, body[iBody].dRadSolid);
body[iBody].dRadXUV = fdLehmerRadius(body[iBody].dRadSolid, body[iBody].dPresXUV, body[iBody].dScaleHeight,body[iBody].dPresSurf);
}
// Ktide (due to body zero only). WARNING: not suited for binary...
double xi = (pow(body[iBody].dMass / (3. * body[0].dMass), (1. / 3)) *
body[iBody].dSemi) / (body[iBody].dRadius * body[iBody].dXFrac);
// For circumbinary planets, assume no Ktide enhancement
if(body[iBody].bBinary && body[iBody].iBodyType == 0) {
body[iBody].dKTide = 1.0;
}
else {
if (xi > 1)
body[iBody].dKTide = (1 - 3 / (2 * xi) + 1 / (2 * pow(xi, 3)));
else
body[iBody].dKTide = 0;
}
// The XUV flux
if (body[iBody].bCalcFXUV){
body[iBody].dFXUV = fdInsolation(body, iBody, 1);
}
// The H2O XUV escape efficiency
if (body[iBody].iAtmXAbsEffH2OModel == ATMESC_BOL16)
body[iBody].dAtmXAbsEffH2O = fdXUVEfficiencyBolmont2016(body[iBody].dFXUV);
// Reference hydrogen flux for the water loss
body[iBody].dFHRef = (body[iBody].dAtmXAbsEffH2O * body[iBody].dFXUV * body[iBody].dRadius) /
(4 * BIGG * body[iBody].dMass * body[iBody].dKTide * ATOMMASS);
// Surface gravity
double g = (BIGG * body[iBody].dMass) / (body[iBody].dRadius * body[iBody].dRadius);
// Oxygen mixing ratio
double XO = fdAtomicOxygenMixingRatio(body[iBody].dSurfaceWaterMass, body[iBody].dOxygenMass);
// Diffusion-limited H escape rate
body[iBody].dFHDiffLim = BDIFF * g * ATOMMASS * (QOH - 1.) / (KBOLTZ * THERMT * (1. + XO / (1. - XO)));
// Is water escaping?
if (!fbDoesWaterEscape(body, iBody)) {
body[iBody].dOxygenEta = 0;
body[iBody].dCrossoverMass = 0;
body[iBody].bRunaway = 0;
body[iBody].iWaterEscapeRegime = ATMESC_NONE;
body[iBody].dMDotWater = 0;
} else {
body[iBody].bRunaway = 1;
// Select an escape/oxygen buildup model
if (body[iBody].iWaterLossModel == ATMESC_LB15) {
// Luger and Barnes (2015)
double x = (KBOLTZ * THERMT * body[iBody].dFHRef) / (10 * BDIFF * g * ATOMMASS);
if (x < 1) {
body[iBody].dOxygenEta = 0;
body[iBody].dCrossoverMass = ATOMMASS + 1.5 * KBOLTZ * THERMT * body[iBody].dFHRef / (BDIFF * g);
} else {
body[iBody].dOxygenEta = (x - 1) / (x + 8);
body[iBody].dCrossoverMass = 43. / 3. * ATOMMASS + KBOLTZ * THERMT * body[iBody].dFHRef / (6 * BDIFF * g);
}
} else if ((body[iBody].iWaterLossModel == ATMESC_LBEXACT) | (body[iBody].iWaterLossModel == ATMESC_TIAN)) {
double x = (QOH - 1.) * (1. - XO) * (BDIFF * g * ATOMMASS) / (KBOLTZ * THERMT);
double FH;
double rat;
// Get the crossover mass
if (body[iBody].dFHRef < x) {
// mcross < mo
body[iBody].dCrossoverMass = ATOMMASS + (1. / (1. - XO)) * (KBOLTZ * THERMT * body[iBody].dFHRef) / (BDIFF * g);
FH = body[iBody].dFHRef;
rat = (body[iBody].dCrossoverMass / ATOMMASS - QOH) / (body[iBody].dCrossoverMass / ATOMMASS - 1.);
body[iBody].dOxygenEta = 0;
} else {
// mcross >= mo
double num = 1. + (XO / (1. - XO)) * QOH * QOH;
double den = 1. + (XO / (1. - XO)) * QOH;
body[iBody].dCrossoverMass = ATOMMASS * num / den + (KBOLTZ * THERMT * body[iBody].dFHRef) / ((1 + XO * (QOH - 1)) * BDIFF * g);
rat = (body[iBody].dCrossoverMass / ATOMMASS - QOH) / (body[iBody].dCrossoverMass / ATOMMASS - 1.);
FH = body[iBody].dFHRef * pow(1. + (XO / (1. - XO)) * QOH * rat, -1);
body[iBody].dOxygenEta = 2 * XO / (1. - XO) * rat;
}
}
if ((XO > 0.6) && (body[iBody].iWaterLossModel == ATMESC_LBEXACT)) {
// Schaefer et al. (2016) prescription, section 2.2
// NOTE: Perhaps a better criterion is (body[iBody].dOxygenEta > 1),
// which ensures oxygen never escapes faster than it is being produced?
body[iBody].iWaterEscapeRegime = ATMESC_DIFFLIM;
body[iBody].dOxygenEta = 0;
body[iBody].dMDotWater = body[iBody].dFHDiffLim * (4 * ATOMMASS * PI * body[iBody].dRadius * body[iBody].dRadius * body[iBody].dXFrac * body[iBody].dXFrac);
} else {
// In the Tian model, oxygen escapes when it's the dominant species. I think this is wrong...
body[iBody].iWaterEscapeRegime = ATMESC_ELIM;
body[iBody].dMDotWater = body[iBody].dFHRef * (4 * ATOMMASS * PI * body[iBody].dRadius * body[iBody].dRadius * body[iBody].dXFrac * body[iBody].dXFrac);
}
}
}
/**
Assigns functions returning the time-derivatives of each variable
to the magical matrix of function pointers.
@param body A pointer to the current BODY instance
@param evolve A pointer to the EVOLVE instance
@param update A pointer to the UPDATE instance
@param fnUpdate A triple-pointer to the function that updates each variable
@param iBody The current BODY number
*/
void AssignAtmEscDerivatives(BODY *body,EVOLVE *evolve,UPDATE *update,fnUpdateVariable ***fnUpdate,int iBody) {
if (body[iBody].dSurfaceWaterMass > 0) {
fnUpdate[iBody][update[iBody].iSurfaceWaterMass][0] = &fdDSurfaceWaterMassDt;
fnUpdate[iBody][update[iBody].iOxygenMass][0] = &fdDOxygenMassDt;
fnUpdate[iBody][update[iBody].iOxygenMantleMass][0] = &fdDOxygenMantleMassDt;
}
if (body[iBody].dEnvelopeMass > 0) {
fnUpdate[iBody][update[iBody].iEnvelopeMass][0] = &fdDEnvelopeMassDt;
fnUpdate[iBody][update[iBody].iMass][0] = &fdDEnvelopeMassDt;
}
fnUpdate[iBody][update[iBody].iRadius][0] = &fdPlanetRadius; // NOTE: This points to the VALUE of the radius!
}
/**
Assigns null functions to the magical matrix of function pointers
for variables that will not get updated.
@param body A pointer to the current BODY instance
@param evolve A pointer to the EVOLVE instance
@param update A pointer to the UPDATE instance
@param fnUpdate A triple-pointer to the function that updates each variable
@param iBody The current BODY number
*/
void NullAtmEscDerivatives(BODY *body,EVOLVE *evolve,UPDATE *update,fnUpdateVariable ***fnUpdate,int iBody) {
if (body[iBody].dSurfaceWaterMass > 0) {
fnUpdate[iBody][update[iBody].iSurfaceWaterMass][0] = &fndUpdateFunctionTiny;
fnUpdate[iBody][update[iBody].iOxygenMass][0] = &fndUpdateFunctionTiny;
fnUpdate[iBody][update[iBody].iOxygenMantleMass][0] = &fndUpdateFunctionTiny;
}
if (body[iBody].dEnvelopeMass > 0) {
fnUpdate[iBody][update[iBody].iEnvelopeMass][0] = &fndUpdateFunctionTiny;
fnUpdate[iBody][update[iBody].iMass][0] = &fndUpdateFunctionTiny;
}
fnUpdate[iBody][update[iBody].iRadius][0] = &fndUpdateFunctionTiny; // NOTE: This points to the VALUE of the radius!
}
/**
Verify all the inputs for the atmospheric escape module.
@param body A pointer to the current BODY instance
@param control A pointer to the CONTROL instance
@param files A pointer to the FILES instance
@param options A pointer to the OPTIONS instance
@param output A pointer to the OUTPUT instance
@param system A pointer to the SYSTEM instance
@param update A pointer to the UPDATE instance
@param iBody The current BODY number
@param iModule The current MODULE number
*/
void VerifyAtmEsc(BODY *body,CONTROL *control,FILES *files,OPTIONS *options,OUTPUT *output,SYSTEM *system,UPDATE *update,int iBody,int iModule) {
int bAtmEsc=0;
/* AtmEsc is active for this body if this subroutine is called. */
// Is FXUV specified in input file?
if (options[OPT_FXUV].iLine[iBody+1] > -1){
body[iBody].bCalcFXUV = 0;
}
else{
body[iBody].bCalcFXUV = 1;
}
if (body[iBody].iPlanetRadiusModel == ATMESC_LEHMER17) {
body[iBody].dRadSolid = 1.3 * pow(body[iBody].dMass - body[iBody].dEnvelopeMass, 0.27);
body[iBody].dGravAccel = BIGG * (body[iBody].dMass - body[iBody].dEnvelopeMass) / (body[iBody].dRadSolid * body[iBody].dRadSolid);
body[iBody].dScaleHeight = body[iBody].dAtmGasConst * body[iBody].dThermTemp / body[iBody].dGravAccel;
body[iBody].dPresSurf = fdLehmerPres(body[iBody].dEnvelopeMass, body[iBody].dGravAccel, body[iBody].dRadSolid);
body[iBody].dRadXUV = fdLehmerRadius(body[iBody].dRadSolid, body[iBody].dPresXUV, body[iBody].dScaleHeight,body[iBody].dPresSurf);
}
if (body[iBody].dSurfaceWaterMass > 0) {
VerifySurfaceWaterMass(body,options,update,body[iBody].dAge,iBody);
VerifyOxygenMass(body,options,update,body[iBody].dAge,iBody);
VerifyOxygenMantleMass(body,options,update,body[iBody].dAge,iBody);
bAtmEsc = 1;
}
if (body[iBody].dEnvelopeMass > 0) {
VerifyEnvelopeMass(body,options,update,body[iBody].dAge,iBody);
VerifyMassAtmEsc(body,options,update,body[iBody].dAge,iBody);
bAtmEsc = 1;
}
// Ensure envelope mass is physical
if (body[iBody].dEnvelopeMass > body[iBody].dMass) {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: %s cannot be greater than %s in file %s.\n",options[OPT_ENVELOPEMASS].cName,options[OPT_MASS].cName,files->Infile[iBody+1].cIn);
exit(EXIT_INPUT);
}
// Initialize rg duration
body[iBody].dRGDuration = 0.;
if (!bAtmEsc && control->Io.iVerbose >= VERBINPUT)
fprintf(stderr,"WARNING: ATMESC called for body %s, but no atmosphere/water present!\n",body[iBody].cName);
// Radius evolution
if (update[iBody].iNumRadius > 1) {
if (control->Io.iVerbose >= VERBERR)
fprintf(stderr,"ERROR: Looks like there's more than one equation trying to set dRadius for body %d!", iBody);
exit(EXIT_INPUT);
}
VerifyRadiusAtmEsc(body,control,options,update,body[iBody].dAge,iBody);
control->fnForceBehavior[iBody][iModule] = &fnForceBehaviorAtmEsc;
control->fnPropsAux[iBody][iModule] = &fnPropertiesAtmEsc;
control->Evolve.fnBodyCopy[iBody][iModule] = &BodyCopyAtmEsc;
}
/**************** ATMESC update ****************/
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iBody The current BODY number
*/
void InitializeUpdateAtmEsc(BODY *body,UPDATE *update,int iBody) {
if (body[iBody].dSurfaceWaterMass > 0) {
if (update[iBody].iNumSurfaceWaterMass == 0)
update[iBody].iNumVars++;
update[iBody].iNumSurfaceWaterMass++;
if (update[iBody].iNumOxygenMass == 0)
update[iBody].iNumVars++;
update[iBody].iNumOxygenMass++;
if (update[iBody].iNumOxygenMantleMass == 0)
update[iBody].iNumVars++;
update[iBody].iNumOxygenMantleMass++;
}
if (body[iBody].dEnvelopeMass > 0) {
if (update[iBody].iNumEnvelopeMass == 0)
update[iBody].iNumVars++;
update[iBody].iNumEnvelopeMass++;
if (update[iBody].iNumMass == 0)
update[iBody].iNumVars++;
update[iBody].iNumMass++;
}
if (body[iBody].dRadius > 0) {
if (update[iBody].iNumRadius == 0)
update[iBody].iNumVars++;
update[iBody].iNumRadius++;
}
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateEccAtmEsc(BODY *body,UPDATE *update,int *iEqn,int iVar,int iBody,int iFoo) {
/* Nothing */
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateSurfaceWaterMassAtmEsc(BODY *body,UPDATE*update,int *iEqn,int iVar,int iBody,int iFoo) {
update[iBody].iaModule[iVar][*iEqn] = ATMESC;
update[iBody].iNumSurfaceWaterMass = (*iEqn)++;
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateOxygenMassAtmEsc(BODY *body,UPDATE*update,int *iEqn,int iVar,int iBody,int iFoo) {
update[iBody].iaModule[iVar][*iEqn] = ATMESC;
update[iBody].iNumOxygenMass = (*iEqn)++;
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateOxygenMantleMassAtmEsc(BODY *body,UPDATE*update,int *iEqn,int iVar,int iBody,int iFoo) {
update[iBody].iaModule[iVar][*iEqn] = ATMESC;
update[iBody].iNumOxygenMantleMass = (*iEqn)++;
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateEnvelopeMassAtmEsc(BODY *body,UPDATE*update,int *iEqn,int iVar,int iBody,int iFoo) {
update[iBody].iaModule[iVar][*iEqn] = ATMESC;
update[iBody].iNumEnvelopeMass = (*iEqn)++;
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateMassAtmEsc(BODY *body,UPDATE*update,int *iEqn,int iVar,int iBody,int iFoo) {
update[iBody].iaModule[iVar][*iEqn] = ATMESC;
update[iBody].iNumMass = (*iEqn)++;
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateOblAtmEsc(BODY *body,UPDATE *update,int *iEqn,int iVar,int iBody,int iFoo) {
/* Nothing */
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateRotAtmEsc(BODY *body,UPDATE *update,int *iEqn,int iVar,int iBody,int iFoo) {
/* Nothing */
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateSemiAtmEsc(BODY *body,UPDATE *update,int *iEqn,int iVar,int iBody,int iFoo) {
/* Nothing */
}
/**
Internal housekeeping function that determines
which variables get updated every time step.
@param body A pointer to the current BODY instance
@param update A pointer to the UPDATE instance
@param iEqn The current equation number
@param iVar The current variable number
@param iBody The current BODY number
@param iFoo ?!
*/
void FinalizeUpdateRadiusAtmEsc(BODY *body,UPDATE*update,int *iEqn,int iVar,int iBody,int iFoo) {
update[iBody].iaModule[iVar][*iEqn] = ATMESC;
update[iBody].iNumRadius = (*iEqn)++;
}
/***************** ATMESC Halts *****************/
/**
Checks for surface desiccation and halts if necessary.
@param body A pointer to the current BODY instance
@param evolve A pointer to the EVOLVE instance
@param halt A pointer to the HALT instance
@param io A pointer to the IO instance
@param update A pointer to the UPDATE instance
@param iBody The current BODY number
*/
int fbHaltSurfaceDesiccated(BODY *body,EVOLVE *evolve,HALT *halt,IO *io,UPDATE *update,int iBody) {
if (body[iBody].dSurfaceWaterMass <= body[iBody].dMinSurfaceWaterMass) {
if (io->iVerbose >= VERBPROG) {
printf("HALT: %s's surface water mass = ",body[iBody].cName);
fprintd(stdout,body[iBody].dSurfaceWaterMass/TOMASS,io->iSciNot,io->iDigits);
printf("TO.\n");
}
return 1;
}
return 0;
}
/**
Checks for envelope evaporation and halts if necessary.
@param body A pointer to the current BODY instance
@param evolve A pointer to the EVOLVE instance
@param halt A pointer to the HALT instance
@param io A pointer to the IO instance
@param update A pointer to the UPDATE instance
@param iBody The current BODY number
*/
int fbHaltEnvelopeGone(BODY *body,EVOLVE *evolve,HALT *halt,IO *io,UPDATE *update,int iBody) {
if (body[iBody].dEnvelopeMass <= body[iBody].dMinEnvelopeMass) {
if (io->iVerbose >= VERBPROG) {
printf("HALT: %s's envelope mass = ",body[iBody].cName);
fprintd(stdout,body[iBody].dEnvelopeMass/MEARTH,io->iSciNot,io->iDigits);
printf("Earth Masses.\n");
}
return 1;
}
return 0;
}
/**
Count the number of halting conditions.
@param halt A pointer to the HALT instance
@param iHalt The current HALT number
*/
void CountHaltsAtmEsc(HALT *halt,int *iHalt) {
if (halt->bSurfaceDesiccated)
(*iHalt)++;
if (halt->bEnvelopeGone)
(*iHalt)++;
}
/**
Check whether the user wants to halt on certain conditions.
@param body A pointer to the current BODY instance
@param control A pointer to the CONTROL instance
@param options A pointer to the OPTIONS instance
@param iBody The current BODY number
@param iHalt The current HALT number
*/
void VerifyHaltAtmEsc(BODY *body,CONTROL *control,OPTIONS *options,int iBody,int *iHalt) {
if (control->Halt[iBody].bSurfaceDesiccated)
control->fnHalt[iBody][(*iHalt)++] = &fbHaltSurfaceDesiccated;
if (control->Halt[iBody].bEnvelopeGone)
control->fnHalt[iBody][(*iHalt)++] = &fbHaltEnvelopeGone;
}
/************* ATMESC Outputs ******************/
/**
Logs the surface water mass.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteSurfaceWaterMass(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dSurfaceWaterMass;
if (output->bDoNeg[iBody]) {
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else {
*dTmp /= fdUnitsMass(units->iMass);
fsUnitsMass(units->iMass,cUnit);
}
}
/**
Logs the atmospheric oxygen mass.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteOxygenMass(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dOxygenMass;
if (output->bDoNeg[iBody]) {
*dTmp *= 1.e-5 * ((BIGG * body[iBody].dMass) / (4. * PI * pow(body[iBody].dRadius, 4)));
strcpy(cUnit,output->cNeg);
} else {
*dTmp /= fdUnitsMass(units->iMass);
fsUnitsMass(units->iMass,cUnit);
}
}
/**
Logs the mantle oxygen mass.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteOxygenMantleMass(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dOxygenMantleMass;
if (output->bDoNeg[iBody]) {
*dTmp *= 1.e-5 * ((BIGG * body[iBody].dMass) / (4. * PI * pow(body[iBody].dRadius, 4)));
strcpy(cUnit,output->cNeg);
} else {
*dTmp /= fdUnitsMass(units->iMass);
fsUnitsMass(units->iMass,cUnit);
}
}
/**
Logs the planet radius.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WritePlanetRadius(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dRadius;
if (output->bDoNeg[iBody]) {
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else {
*dTmp /= fdUnitsLength(units->iLength);
fsUnitsLength(units->iLength,cUnit);
}
}
/**
Logs the envelope mass.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteEnvelopeMass(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dEnvelopeMass;
if (output->bDoNeg[iBody]) {
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else {
*dTmp /= fdUnitsMass(units->iMass);
fsUnitsMass(units->iMass,cUnit);
}
}
/**
Logs the semi-major axis corresponding to the current runaway greenhouse limit.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteRGLimit(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
// Get the RG flux
double flux = fdHZRG14(body[0].dLuminosity, body[0].dTemperature, body[iBody].dEcc, body[iBody].dMass);
// Convert to semi-major axis *at current eccentricity!*
*dTmp = pow(4 * PI * flux / (body[0].dLuminosity * pow((1 - body[iBody].dEcc * body[iBody].dEcc), 0.5)), -0.5);
if (output->bDoNeg[iBody]) {
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else {
*dTmp /= fdUnitsLength(units->iLength);
fsUnitsLength(units->iLength,cUnit);
}
}
/**
Logs the oxygen mixing ratio at the base of the hydrodynamic wind.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteOxygenMixingRatio(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = fdAtomicOxygenMixingRatio(body[iBody].dSurfaceWaterMass, body[iBody].dOxygenMass);
strcpy(cUnit,"");
}
/**
Logs the oxygen eta parameter from Luger and Barnes (2015).
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteOxygenEta(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dOxygenEta;
strcpy(cUnit,"");
}
/**
Logs the XUV absorption efficiency for water.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteAtmXAbsEffH2O(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dAtmXAbsEffH2O;
strcpy(cUnit,"");
}
/**
Logs the planet's radius in the XUV.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WritePlanetRadXUV(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dRadXUV;
if (output->bDoNeg[iBody]) {
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else {
*dTmp /= fdUnitsLength(units->iLength);
fsUnitsLength(units->iLength,cUnit);
}
}
/**
Logs the atmospheric mass loss rate.
\warning This routine is currently broken.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteDEnvMassDt(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]){
double dDeriv;
dTmp = 0;
/* BROKEN!!!!
dDeriv = *(update[iBody].pdDEnvelopeMassDtAtmesc);
*dTmp = dDeriv;
*dTmp *= fdUnitsTime(units->iTime)/fdUnitsMass(units->iMass);
*/
}
/**
Logs the thermospheric temperature.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteThermTemp(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dThermTemp;
if (output->bDoNeg[iBody]) {
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else { }
}
/**
Logs the surface pressure.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WritePresSurf(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dPresSurf;
if (output->bDoNeg[iBody]){
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else { }
}
/**
Logs the pressure at the XUV absorption radius.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WritePresXUV(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dPresXUV;
if (output->bDoNeg[iBody]){
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else { }
}
/**
Logs the time at which the flow transitioned to Jeans escape.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteJeansTime(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dJeansTime;
if (output->bDoNeg[iBody]){
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else { }
}
/**
Logs the atmospheric scale height.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteScaleHeight(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dScaleHeight;
if (output->bDoNeg[iBody]){
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else { }
}
/**
Logs the gas constant.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteAtmGasConst(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dAtmGasConst;
if (output->bDoNeg[iBody]){
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else { }
}
/**
Logs the planet's solid radius.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteRadSolid(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dRadSolid;
if (output->bDoNeg[iBody]) {
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else {
*dTmp /= fdUnitsLength(units->iLength);
fsUnitsLength(units->iLength,cUnit);
}
}
/**
Logs the XUV flux received by the planet.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param units A pointer to the current UNITS instance
@param update A pointer to the current UPDATE instance
@param iBody The current body Number
@param dTmp Temporary variable used for unit conversions
@param cUnit The unit for this variable
*/
void WriteFXUV(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UNITS *units,UPDATE *update,int iBody,double *dTmp,char cUnit[]) {
*dTmp = body[iBody].dFXUV;
if (output->bDoNeg[iBody]){
*dTmp *= output->dNeg;
strcpy(cUnit,output->cNeg);
} else {
strcpy(cUnit,"W/m^2");
}
}
/**
Set up stuff to be logged for atmesc.
@param output A pointer to the current OUTPUT instance
@param fnWrite A pointer to the function that does the logging
*/
void InitializeOutputAtmEsc(OUTPUT *output,fnWriteOutput fnWrite[]) {
sprintf(output[OUT_SURFACEWATERMASS].cName,"SurfWaterMass");
sprintf(output[OUT_SURFACEWATERMASS].cDescr,"Surface Water Mass");
sprintf(output[OUT_SURFACEWATERMASS].cNeg,"TO");
output[OUT_SURFACEWATERMASS].bNeg = 1;
output[OUT_SURFACEWATERMASS].dNeg = 1./TOMASS;
output[OUT_SURFACEWATERMASS].iNum = 1;
output[OUT_SURFACEWATERMASS].iModuleBit = ATMESC;
fnWrite[OUT_SURFACEWATERMASS] = &WriteSurfaceWaterMass;
sprintf(output[OUT_PLANETRADIUS].cName,"PlanetRadius");
sprintf(output[OUT_PLANETRADIUS].cDescr,"Planet Radius");
sprintf(output[OUT_PLANETRADIUS].cNeg,"Earth Radii");
output[OUT_PLANETRADIUS].bNeg = 1;
output[OUT_PLANETRADIUS].dNeg = 1./REARTH;
output[OUT_PLANETRADIUS].iNum = 1;
output[OUT_PLANETRADIUS].iModuleBit = ATMESC;
fnWrite[OUT_PLANETRADIUS] = &WritePlanetRadius;
sprintf(output[OUT_OXYGENMASS].cName,"OxygenMass");
sprintf(output[OUT_OXYGENMASS].cDescr,"Oxygen Mass");
sprintf(output[OUT_OXYGENMASS].cNeg,"bars");
output[OUT_OXYGENMASS].bNeg = 1;
output[OUT_OXYGENMASS].dNeg = 1;
output[OUT_OXYGENMASS].iNum = 1;
output[OUT_OXYGENMASS].iModuleBit = ATMESC;
fnWrite[OUT_OXYGENMASS] = &WriteOxygenMass;
sprintf(output[OUT_OXYGENMANTLEMASS].cName,"OxygenMantleMass");
sprintf(output[OUT_OXYGENMANTLEMASS].cDescr,"Mass of Oxygen in Mantle");
sprintf(output[OUT_OXYGENMANTLEMASS].cNeg,"bars");
output[OUT_OXYGENMANTLEMASS].bNeg = 1;
output[OUT_OXYGENMANTLEMASS].dNeg = 1;
output[OUT_OXYGENMANTLEMASS].iNum = 1;
output[OUT_OXYGENMANTLEMASS].iModuleBit = ATMESC;
fnWrite[OUT_OXYGENMANTLEMASS] = &WriteOxygenMantleMass;
sprintf(output[OUT_RGLIMIT].cName,"RGLimit");
sprintf(output[OUT_RGLIMIT].cDescr,"Runaway Greenhouse Semi-Major Axis");
sprintf(output[OUT_RGLIMIT].cNeg,"AU");
output[OUT_RGLIMIT].bNeg = 1;
output[OUT_RGLIMIT].dNeg = 1. / AUM;
output[OUT_RGLIMIT].iNum = 1;
output[OUT_RGLIMIT].iModuleBit = ATMESC;
fnWrite[OUT_RGLIMIT] = &WriteRGLimit;
sprintf(output[OUT_XO].cName,"XO");
sprintf(output[OUT_XO].cDescr,"Atomic Oxygen Mixing Ratio in Upper Atmosphere");
output[OUT_XO].bNeg = 0;
output[OUT_XO].iNum = 1;
output[OUT_XO].iModuleBit = ATMESC;
fnWrite[OUT_XO] = &WriteOxygenMixingRatio;
sprintf(output[OUT_ETAO].cName,"EtaO");
sprintf(output[OUT_ETAO].cDescr,"Oxygen Eta Parameter (Luger and Barnes 2015)");
output[OUT_ETAO].bNeg = 0;
output[OUT_ETAO].iNum = 1;
output[OUT_ETAO].iModuleBit = ATMESC;
fnWrite[OUT_ETAO] = &WriteOxygenEta;
sprintf(output[OUT_EPSH2O].cName,"AtmXAbsEffH2O");
sprintf(output[OUT_EPSH2O].cDescr,"XUV Atmospheric Escape Efficiency for H2O");
output[OUT_EPSH2O].bNeg = 0;
output[OUT_EPSH2O].iNum = 1;
output[OUT_EPSH2O].iModuleBit = ATMESC;
fnWrite[OUT_EPSH2O] = &WriteAtmXAbsEffH2O;
sprintf(output[OUT_FXUV].cName,"XUVFlux");
sprintf(output[OUT_FXUV].cDescr,"XUV Flux Incident on Planet");
sprintf(output[OUT_FXUV].cNeg,"erg/cm^2/s");
output[OUT_FXUV].dNeg = 1.e3;
output[OUT_FXUV].bNeg = 1;
output[OUT_FXUV].iNum = 1;
output[OUT_FXUV].iModuleBit = ATMESC;
fnWrite[OUT_FXUV] = &WriteFXUV;
sprintf(output[OUT_ENVELOPEMASS].cName,"EnvelopeMass");
sprintf(output[OUT_ENVELOPEMASS].cDescr,"Envelope Mass");
sprintf(output[OUT_ENVELOPEMASS].cNeg,"Earth");
output[OUT_ENVELOPEMASS].bNeg = 1;
output[OUT_ENVELOPEMASS].dNeg = 1./MEARTH;
output[OUT_ENVELOPEMASS].iNum = 1;
output[OUT_ENVELOPEMASS].iModuleBit = ATMESC;
fnWrite[OUT_ENVELOPEMASS] = &WriteEnvelopeMass;
sprintf(output[OUT_PLANETRADXUV].cName,"RadXUV");
sprintf(output[OUT_PLANETRADXUV].cDescr,"XUV Radius separating hydro. dyn. escpape and equilibrium");
sprintf(output[OUT_PLANETRADXUV].cNeg,"Earth Radii");
output[OUT_PLANETRADXUV].bNeg = 1;
output[OUT_PLANETRADXUV].dNeg = 1./REARTH;
output[OUT_PLANETRADXUV].iNum = 1;
output[OUT_PLANETRADXUV].iModuleBit = ATMESC;
fnWrite[OUT_PLANETRADXUV] = &WritePlanetRadXUV;
sprintf(output[OUT_DENVMASSDT].cName,"DEnvMassDt");
sprintf(output[OUT_DENVMASSDT].cDescr,"Envelope Mass Loss Rate");
sprintf(output[OUT_DENVMASSDT].cNeg,"kg/s");
output[OUT_DENVMASSDT].bNeg = 1;
output[OUT_DENVMASSDT].iNum = 1;
output[OUT_DENVMASSDT].iModuleBit = ATMESC;
fnWrite[OUT_DENVMASSDT] = &WriteDEnvMassDt;
sprintf(output[OUT_THERMTEMP].cName,"ThermTemp");
sprintf(output[OUT_THERMTEMP].cDescr,"Isothermal Atmospheric Temperature");
sprintf(output[OUT_THERMTEMP].cNeg,"K");
output[OUT_THERMTEMP].bNeg = 1;
output[OUT_THERMTEMP].dNeg = 1; // default units are K.
output[OUT_THERMTEMP].iNum = 1;
output[OUT_THERMTEMP].iModuleBit = ATMESC;
fnWrite[OUT_THERMTEMP] = &WriteThermTemp;
sprintf(output[OUT_PRESSURF].cName,"PresSurf");
sprintf(output[OUT_PRESSURF].cDescr,"Surface Pressure due to Atmosphere");
sprintf(output[OUT_PRESSURF].cNeg,"Pa");
output[OUT_PRESSURF].bNeg = 1;
output[OUT_PRESSURF].dNeg = 1;
output[OUT_PRESSURF].iNum = 1;
output[OUT_PRESSURF].iModuleBit = ATMESC;
fnWrite[OUT_PRESSURF] = &WritePresSurf;
sprintf(output[OUT_PRESXUV].cName,"PresXUV");
sprintf(output[OUT_PRESXUV].cDescr,"Pressure at base of Thermosphere");
sprintf(output[OUT_PRESXUV].cNeg,"Pa");
output[OUT_PRESXUV].bNeg = 1;
output[OUT_PRESXUV].dNeg = 1;
output[OUT_PRESXUV].iNum = 1;
output[OUT_PRESXUV].iModuleBit = ATMESC;
fnWrite[OUT_PRESXUV] = &WritePresXUV;
sprintf(output[OUT_SCALEHEIGHT].cName,"ScaleHeight");
sprintf(output[OUT_SCALEHEIGHT].cDescr,"Scaling factor in fdLehmerRadius");
sprintf(output[OUT_SCALEHEIGHT].cNeg,"J s^2 / kg m");
output[OUT_SCALEHEIGHT].bNeg = 1;
output[OUT_SCALEHEIGHT].dNeg = 1;
output[OUT_SCALEHEIGHT].iNum = 1;
output[OUT_SCALEHEIGHT].iModuleBit = ATMESC;
fnWrite[OUT_SCALEHEIGHT] = &WriteScaleHeight;
sprintf(output[OUT_ATMGASCONST].cName,"AtmGasConst");
sprintf(output[OUT_ATMGASCONST].cDescr,"Atmospheric Gas Constant");
sprintf(output[OUT_ATMGASCONST].cNeg,"J / K kg");
output[OUT_ATMGASCONST].bNeg = 1;
output[OUT_ATMGASCONST].dNeg = 1;
output[OUT_ATMGASCONST].iNum = 1;
output[OUT_ATMGASCONST].iModuleBit = ATMESC;
fnWrite[OUT_ATMGASCONST] = &WriteAtmGasConst;
sprintf(output[OUT_RADSOLID].cName,"RadSolid");
sprintf(output[OUT_RADSOLID].cDescr,"Radius to the solid surface");
sprintf(output[OUT_RADSOLID].cNeg,"Earth Radii");
output[OUT_RADSOLID].bNeg = 1;
output[OUT_RADSOLID].dNeg = 1./REARTH;
output[OUT_RADSOLID].iNum = 1;
output[OUT_RADSOLID].iModuleBit = ATMESC;
fnWrite[OUT_RADSOLID] = &WriteRadSolid;
sprintf(output[OUT_FXUV].cName,"FXUV");
sprintf(output[OUT_FXUV].cDescr,"XUV Flux");
sprintf(output[OUT_FXUV].cNeg,"W/m^2");
output[OUT_FXUV].bNeg = 1;
output[OUT_FXUV].dNeg = 1;
output[OUT_FXUV].iNum = 1;
output[OUT_FXUV].iModuleBit = ATMESC;
fnWrite[OUT_FXUV] = &WriteFXUV;
}
/************ ATMESC Logging Functions **************/
/**
Log the global atmesc options.
\warning This routine currently does nothing!
@param control A pointer to the current CONTROL instance
@param fp A FILE pointer
*/
void LogOptionsAtmEsc(CONTROL *control, FILE *fp) {
/* Anything here?
fprintf(fp,"-------- ATMESC Options -----\n\n");
*/
}
/**
Log the global atmesc parameters.
\warning This routine currently does nothing!
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param update A pointer to the current UPDATE instance
@param fnWrite A pointer to the function doing the logging
@param fp A FILE pointer
*/
void LogAtmEsc(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UPDATE *update,fnWriteOutput fnWrite[],FILE *fp) {
/* Anything here?
int iOut;
fprintf(fp,"\n----- ATMESC PARAMETERS ------\n");
for (iOut=OUTSTARTATMESC;iOut<OUTBODYSTARTATMESC;iOut++) {
if (output[iOut].iNum > 0)
WriteLogEntry(control,output[iOut],body,system,fnWrite[iOut],fp,update,0);
}
*/
}
/**
Log the body-specific atmesc parameters.
@param body A pointer to the current BODY instance
@param control A pointer to the current CONTROL instance
@param output A pointer to the current OUTPUT instance
@param system A pointer to the current SYSTEM instance
@param update A pointer to the current UPDATE instance
@param fnWrite A pointer to the function doing the logging
@param fp A FILE pointer
@param iBody The current BODY number
*/
void LogBodyAtmEsc(BODY *body,CONTROL *control,OUTPUT *output,SYSTEM *system,UPDATE *update,fnWriteOutput fnWrite[],FILE *fp,int iBody) {
int iOut;
fprintf(fp,"----- ATMESC PARAMETERS (%s)------\n",body[iBody].cName);
for (iOut=OUTSTARTATMESC;iOut<OUTENDATMESC;iOut++) {
if (output[iOut].iNum > 0) {
WriteLogEntry(body,control,&output[iOut],system,update,fnWrite[iOut],fp,iBody);
}
}
// TODO: Log this the standard way
fprintf(fp,"(RGDuration) Runaway Greenhouse Duration [years]: %.5e\n", body[iBody].dRGDuration / YEARSEC);
}
/**
Adds atmesc to the current array of MODULEs.
@param module A pointer to the current array of MODULE instances
@param iBody The current BODY number
@param iModule The current MODULE number
*/
void AddModuleAtmEsc(MODULE *module,int iBody,int iModule) {
module->iaModule[iBody][iModule] = ATMESC;
module->fnCountHalts[iBody][iModule] = &CountHaltsAtmEsc;
module->fnReadOptions[iBody][iModule] = &ReadOptionsAtmEsc;
module->fnLogBody[iBody][iModule] = &LogBodyAtmEsc;
module->fnVerify[iBody][iModule] = &VerifyAtmEsc;
module->fnAssignDerivatives[iBody][iModule] = &AssignAtmEscDerivatives;
module->fnNullDerivatives[iBody][iModule] = &NullAtmEscDerivatives;
module->fnVerifyHalt[iBody][iModule] = &VerifyHaltAtmEsc;
module->fnInitializeUpdate[iBody][iModule] = &InitializeUpdateAtmEsc;
module->fnFinalizeUpdateSurfaceWaterMass[iBody][iModule] = &FinalizeUpdateSurfaceWaterMassAtmEsc;
module->fnFinalizeUpdateOxygenMass[iBody][iModule] = &FinalizeUpdateOxygenMassAtmEsc;
module->fnFinalizeUpdateOxygenMantleMass[iBody][iModule] = &FinalizeUpdateOxygenMantleMassAtmEsc;
module->fnFinalizeUpdateEnvelopeMass[iBody][iModule] = &FinalizeUpdateEnvelopeMassAtmEsc;
module->fnFinalizeUpdateMass[iBody][iModule] = &FinalizeUpdateEnvelopeMassAtmEsc;
module->fnFinalizeUpdateRadius[iBody][iModule] = &FinalizeUpdateRadiusAtmEsc;
}
/************* ATMESC Functions ************/
/**
The rate of change of the surface water mass.
@param body A pointer to the current BODY instance
@param system A pointer to the current SYSTEM instance
@param iaBody An array of body indices. The current body is index 0.
*/
double fdDSurfaceWaterMassDt(BODY *body,SYSTEM *system,int *iaBody) {
if ((body[iaBody[0]].bRunaway) && (body[iaBody[0]].dSurfaceWaterMass > 0)) {
// This takes care of both energy-limited and diffusion limited escape!
return -(9. / (1 + 8 * body[iaBody[0]].dOxygenEta)) * body[iaBody[0]].dMDotWater;
} else {
return 0.;
}
}
/**
The rate of change of the oxygen mass in the atmosphere.
@param body A pointer to the current BODY instance
@param system A pointer to the current SYSTEM instance
@param iaBody An array of body indices. The current body is index 0.
*/
double fdDOxygenMassDt(BODY *body,SYSTEM *system,int *iaBody) {
if ((body[iaBody[0]].bRunaway) && (!body[iaBody[0]].bInstantO2Sink) && (body[iaBody[0]].dSurfaceWaterMass > 0)) {
if (body[iaBody[0]].iWaterLossModel == ATMESC_LB15) {
// Rodrigo and Barnes (2015)
if (body[iaBody[0]].dCrossoverMass >= 16 * ATOMMASS)
return (320. * PI * BIGG * ATOMMASS * ATOMMASS * BDIFF * body[iaBody[0]].dMass) / (KBOLTZ * THERMT);
else
return (8 - 8 * body[iaBody[0]].dOxygenEta) / (1 + 8 * body[iaBody[0]].dOxygenEta) * body[iaBody[0]].dMDotWater;
} else {
// Exact
return (8 - 8 * body[iaBody[0]].dOxygenEta) / (1 + 8 * body[iaBody[0]].dOxygenEta) * body[iaBody[0]].dMDotWater;
}
} else {
return 0.;
}
}
/**
The rate of change of the oxygen mass in the mantle.
@param body A pointer to the current BODY instance
@param system A pointer to the current SYSTEM instance
@param iaBody An array of body indices. The current body is index 0.
*/
double fdDOxygenMantleMassDt(BODY *body,SYSTEM *system,int *iaBody) {
if ((body[iaBody[0]].bRunaway) && (body[iaBody[0]].bInstantO2Sink) && (body[iaBody[0]].dSurfaceWaterMass > 0)) {
if (body[iaBody[0]].iWaterLossModel == ATMESC_LB15) {
// <NAME> Barnes (2015)
if (body[iaBody[0]].dCrossoverMass >= 16 * ATOMMASS)
return (320. * PI * BIGG * ATOMMASS * ATOMMASS * BDIFF * body[iaBody[0]].dMass) / (KBOLTZ * THERMT);
else
return (8 - 8 * body[iaBody[0]].dOxygenEta) / (1 + 8 * body[iaBody[0]].dOxygenEta) * body[iaBody[0]].dMDotWater;
} else {
// Exact
return (8 - 8 * body[iaBody[0]].dOxygenEta) / (1 + 8 * body[iaBody[0]].dOxygenEta) * body[iaBody[0]].dMDotWater;
}
} else {
return 0.;
}
}
/**
The rate of change of the envelope mass.
@param body A pointer to the current BODY instance
@param system A pointer to the current SYSTEM instance
@param iaBody An array of body indices. The current body is index 0.
*/
double fdDEnvelopeMassDt(BODY *body,SYSTEM *system,int *iaBody) {
// TODO: This needs to be moved. Ideally we'd just remove this equation from the matrix.
if ((body[iaBody[0]].dEnvelopeMass <= 0) || (body[iaBody[0]].dAge > body[iaBody[0]].dJeansTime)){
return 0;
}
if (body[iaBody[0]].iPlanetRadiusModel == ATMESC_LEHMER17){
return -body[iaBody[0]].dAtmXAbsEffH * PI * body[iaBody[0]].dFXUV * pow(body[iaBody[0]].dRadXUV, 3.0) / ( BIGG * (body[iaBody[0]].dMass - body[iaBody[0]].dEnvelopeMass));
}
else{
return -body[iaBody[0]].dFHRef * (body[iaBody[0]].dAtmXAbsEffH / body[iaBody[0]].dAtmXAbsEffH2O) * (4 * ATOMMASS * PI * body[iaBody[0]].dRadius * body[iaBody[0]].dRadius * body[iaBody[0]].dXFrac * body[iaBody[0]].dXFrac);
}
}
/**
This function does nothing in atmesc.
@param body A pointer to the current BODY instance
@param system A pointer to the current SYSTEM instance
@param update A pointer to the current UPDATE instance
@param iBody The current body index
@param iFoo An example of pretty lousy programming
*/
double fdSurfEnFluxAtmEsc(BODY *body,SYSTEM *system,UPDATE *update,int iBody,int iFoo) {
// This is silly, but necessary!
return 0;
}
/**
Returns the planet radius at the current time.
@param body A pointer to the current BODY instance
@param system A pointer to the current SYSTEM instance
@param iaBody An array of body indices. The current body is index 0.
*/
double fdPlanetRadius(BODY *body,SYSTEM *system,int *iaBody) {
if (body[iaBody[0]].iPlanetRadiusModel == ATMESC_LEHMER17) {
body[iaBody[0]].dPresSurf = fdLehmerPres(body[iaBody[0]].dEnvelopeMass, body[iaBody[0]].dGravAccel, body[iaBody[0]].dRadSolid);
body[iaBody[0]].dRadXUV = fdLehmerRadius(body[iaBody[0]].dRadSolid, body[iaBody[0]].dPresXUV, body[iaBody[0]].dScaleHeight,body[iaBody[0]].dPresSurf);
}
double foo;
if (body[iaBody[0]].iPlanetRadiusModel == ATMESC_LOP12) {
foo = fdLopezRadius(body[iaBody[0]].dMass, body[iaBody[0]].dEnvelopeMass / body[iaBody[0]].dMass, 1., body[iaBody[0]].dAge, 0);
if (!isnan(foo))
return foo;
else
return body[iaBody[0]].dRadius;
} else if (body[iaBody[0]].iPlanetRadiusModel == ATMESC_PROXCENB) {
return fdProximaCenBRadius(body[iaBody[0]].dEnvelopeMass / body[iaBody[0]].dMass, body[iaBody[0]].dAge, body[iaBody[0]].dMass);
} else
return body[iaBody[0]].dRadius;
}
/************* ATMESC Helper Functions ************/
/**
Computes the insolation.
@param body A pointer to the current BODY instance
@param iBody The current BODY index
@param iXUV Integer describing the XUV model
*/
double fdInsolation(BODY *body, int iBody, int iXUV) {
double flux;
if (body[iBody].bBinary && body[iBody].iBodyType == 0) {
// Body orbits two stars
if (iXUV)
flux = fndFluxExactBinary(body,iBody,body[0].dLXUV,body[1].dLXUV);
else
flux = fndFluxExactBinary(body,iBody,body[0].dLuminosity,body[1].dLuminosity);
} else {
// Body orbits one star
if (iXUV)
flux = body[0].dLXUV / (4 * PI * pow(body[iBody].dSemi, 2) *
pow((1 - body[iBody].dEcc * body[iBody].dEcc), 0.5));
else
flux = body[0].dLuminosity / (4 * PI * pow(body[iBody].dSemi, 2) *
pow((1 - body[iBody].dEcc * body[iBody].dEcc), 0.5));
}
return flux;
}
/**
Computes whether or not water is escaping.
@param body A pointer to the current BODY instance
@param iBody The current BODY index
*/
int fbDoesWaterEscape(BODY *body, int iBody) {
// TODO: The checks below need to be moved. Ideally we'd
// just remove this equation from the matrix if the
// escape conditions are not met.
// 1. Check if there's hydrogen to be lost; this happens first
if (body[iBody].dEnvelopeMass > 0) {
// (But let's still check whether the RG phase has ended)
if ((body[iBody].dRGDuration == 0.) && (fdInsolation(body, iBody, 0) < fdHZRG14(body[0].dLuminosity, body[0].dTemperature, body[iBody].dEcc, body[iBody].dMass)))
body[iBody].dRGDuration = body[iBody].dAge;
return 0;
}
// 2. Check if planet is beyond RG limit; otherwise, assume the
// cold trap prevents water loss.
// NOTE: The RG flux limit below is calculated based on body zero's
// spectrum! The Kopparapu+14 limit is for a single star only. This
// approximation for a binary is only valid if the two stars have
// similar spectral types, or if body zero dominates the flux.
else if (fdInsolation(body, iBody, 0) < fdHZRG14(body[0].dLuminosity, body[0].dTemperature, body[iBody].dEcc, body[iBody].dMass)){
if (body[iBody].dRGDuration == 0.)
body[iBody].dRGDuration = body[iBody].dAge;
return 0;
}
// 3. Is there still water to be lost?
else if (body[iBody].dSurfaceWaterMass <= 0)
return 0;
// 4. Are we in the ballistic (Jeans) escape limit?
else if (body[iBody].dAge > body[iBody].dJeansTime)
return 0;
else
return 1;
}
/**
Computes the atomic oxygen mixing ratio in the hydrodynamic flow.
@param dSurfaceWaterMass The amount of water in the atmosphere
@param dOxygenMass The amount of oxygen in the atmosphere
*/
double fdAtomicOxygenMixingRatio(double dSurfaceWaterMass, double dOxygenMass) {
// Mixing ratio X_O of atomic oxygen in the upper atmosphere
// assuming atmosphere is well-mixed up to the photolysis layer
double NO2 = dOxygenMass / (32 * ATOMMASS);
double NH2O = dSurfaceWaterMass / (18 * ATOMMASS);
if (NH2O > 0)
return 1. / (1 + 1. / (0.5 + NO2 / NH2O));
else {
if (NO2 > 0)
return 1.;
else
return 0.;
}
}
/**
Performs a simple log-linear fit to the Kopparapu et al. (2014) mass-dependent
runaway greenhouse limit.
\warning Something is wrong with this linear fit in the first 5 Myr or so, as it diverges.
@param dLuminosity The stellar luminosity
@param dTeff The stellar effective temperature
@param dEcc The planet's eccentricity
@param dPlanetMass The planet mass
*/
double fdHZRG14(double dLuminosity, double dTeff, double dEcc, double dPlanetMass) {
// Do a simple log-linear fit to the Kopparapu+14 mass-dependent RG limit
int i;
double seff[3];
double daCoeffs[2];
double tstar = dTeff - 5780;
double daLogMP[3] = {-1.0, 0., 0.69897};
double seffsun[3] = {0.99, 1.107, 1.188};
double a[3] = {1.209e-4, 1.332e-4, 1.433e-4};
double b[3] = {1.404e-8, 1.58e-8, 1.707e-8};
double c[3] = {-7.418e-12, -8.308e-12, -8.968e-12};
double d[3] = {-1.713e-15, -1.931e-15, -2.084e-15};
for (i=0;i<3;i++){
seff[i] = seffsun[i] + a[i]*tstar + b[i]*tstar*tstar + c[i]*pow(tstar,3) + d[i]*pow(tstar,4);
}
fvLinearFit(daLogMP,seff,3,daCoeffs);
return (daCoeffs[0]*log10(dPlanetMass/MEARTH) + daCoeffs[1]) * LSUN / (4 * PI * AUM * AUM);
}
/**
Computes the XUV absorption efficiency for a water vapor atmosphere
based on a fit to the figure in Bolmont et al. (2017).
@param dFXUV The XUV flux incident on the planet.
*/
double fdXUVEfficiencyBolmont2016(double dFXUV) {
// Based on a piecewise polynomial fit to Figure 2
// in Bolmont et al. (2017), the XUV escape efficiency
// as a function of XUV flux for the TRAPPIST-1 planets.
// Polynomial coefficients
double a0 = 1.49202;
double a1 = 5.57875;
double a2 = 2.27482;
double b0 = 0.59182134;
double b1 = -0.36140798;
double b2 = -0.04011933;
double b3 = -0.8988;
double c0 = -0.00441536;
double c1 = -0.03068399;
double c2 = 0.04946948;
double c3 = -0.89880083;
// Convert to erg/cm^2/s and take the log
double x = log10(dFXUV * 1.e3);
double y;
// Piecewise polynomial fit
if ((x >= -2) && (x < -1))
y = pow(10, a0 * x * x + a1 * x + a2);
else if ((x >= -1) && (x < 0))
y = pow(10, b0 * x * x * x + b1 * x * x + b2 * x + b3);
else if ((x >= 0) && (x <= 5))
y = pow(10, c0 * x * x * x + c1 * x * x + c2 * x + c3);
else
y = 0;
return y;
}
/**
Performs a really simple linear least-squares fit on data.
@param x The independent coordinates
@param y The dependent coordinates
@param iLen The length of the arrays
@param daCoeffs The slope and the intercept of the fit
*/
void fvLinearFit(double *x, double *y, int iLen, double *daCoeffs){
// Simple least squares linear regression, y(x) = mx + b
// from http://en.wikipedia.org/wiki/Simple_linear_regression
double num = 0, den = 0;
double xavg = 0,yavg = 0;
double m,b;
int i;
for (i=0;i<iLen;i++){
xavg += x[i];
yavg += y[i];
}
xavg /= iLen;
yavg /= iLen;
for (i=0;i<iLen;i++){
num += (x[i]-xavg)*(y[i]-yavg);
den += (x[i]-xavg)*(x[i]-xavg);
}
daCoeffs[0] = num/den; // Slope
daCoeffs[1] = yavg-daCoeffs[0]*xavg; // Intercept
}
|
package kgo
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/binary"
"encoding/hex"
"fmt"
"hash"
"os"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"unsafe"
)
// md5Str 计算字符串的 MD5 散列值.
func md5Str(str []byte, length uint8) []byte {
var res []byte
h := md5.New()
h.Write(str)
hBytes := h.Sum(nil)
dst := make([]byte, hex.EncodedLen(len(hBytes)))
hex.Encode(dst, hBytes)
if length > 0 && length < 32 {
res = dst[:length]
} else {
res = dst
}
return res
}
// shaXStr 计算字符串的 shaX 散列值,x为1/256/512.
func shaXStr(str []byte, x uint16) []byte {
var h hash.Hash
switch x {
case 1:
h = sha1.New()
break
case 256:
h = sha256.New()
break
case 512:
h = sha512.New()
break
default:
panic("[shaXStr] x must be in [1, 256, 512]")
}
h.Write(str)
hBytes := h.Sum(nil)
res := make([]byte, hex.EncodedLen(len(hBytes)))
hex.Encode(res, hBytes)
return res
}
// isArrayOrSlice 检查变量是否数组或切片.
// chkType为检查类型,枚举值有(1仅数组,2仅切片,3数组或切片);结果为-1表示非,>=0表示是.
func isArrayOrSlice(data interface{}, chkType uint8) int {
if chkType != 1 && chkType != 2 && chkType != 3 {
panic(fmt.Sprintf("[isArrayOrSlice] chkType value muset in (1, 2, 3), but it`s %d", chkType))
}
var res = -1
val := reflect.ValueOf(data)
switch val.Kind() {
case reflect.Array:
if chkType == 1 || chkType == 3 {
res = val.Len()
}
case reflect.Slice:
if chkType == 2 || chkType == 3 {
res = val.Len()
}
}
return res
}
// isMap 检查变量是否字典.
func isMap(data interface{}) bool {
return reflect.ValueOf(data).Kind() == reflect.Map
}
// getEndian 获取系统字节序类型,小端返回binary.LittleEndian,大端返回binary.BigEndian .
func getEndian() binary.ByteOrder {
var nativeEndian binary.ByteOrder = binary.BigEndian
buf := [2]byte{}
*(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD)
switch buf {
case [2]byte{0xCD, 0xAB}:
nativeEndian = binary.LittleEndian
//case [2]byte{0xAB, 0xCD}:
// nativeEndian = binary.BigEndian
}
return nativeEndian
}
// isLittleEndian 系统字节序类型是否小端存储.
func isLittleEndian() bool {
var i int32 = 0x01020304
// 将int32类型的指针转换为byte类型的指针
u := unsafe.Pointer(&i)
pb := (*byte)(u)
// 取得pb位置对应的值
b := *pb
// 由于b是byte类型的,最多保存8位,那么只能取得开始的8位
// 小端: 04 (03 02 01)
// 大端: 01 (02 03 04)
return (b == 0x04)
}
// isInt 变量是否整型数值.
func isInt(val interface{}) bool {
switch val.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return true
case string:
str := val.(string)
if str == "" {
return false
}
_, err := strconv.Atoi(str)
return err == nil
}
return false
}
// isFloat 变量是否浮点数值.
func isFloat(val interface{}) bool {
switch val.(type) {
case float32, float64:
return true
case string:
str := val.(string)
if str == "" {
return false
}
if ok := RegFloat.MatchString(str); ok {
return true
}
}
return false
}
// isNumeric 变量是否数值(不包含复数和科学计数法).
func isNumeric(val interface{}) bool {
switch val.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return true
case float32, float64:
return true
case string:
str := val.(string)
if str == "" {
return false
}
_, err := strconv.ParseFloat(str, 64)
return err == nil
}
return false
}
// numeric2Float 将数值转换为float64.
func numeric2Float(val interface{}) (res float64, err error) {
switch val.(type) {
case int:
res = float64(val.(int))
case int8:
res = float64(val.(int8))
case int16:
res = float64(val.(int16))
case int32:
res = float64(val.(int32))
case int64:
res = float64(val.(int64))
case uint:
res = float64(val.(uint))
case uint8:
res = float64(val.(uint8))
case uint16:
res = float64(val.(uint16))
case uint32:
res = float64(val.(uint32))
case uint64:
res = float64(val.(uint64))
case float32:
res = float64(val.(float32))
case float64:
res = val.(float64)
case string:
str := val.(string)
res, err = strconv.ParseFloat(str, 64)
}
return
}
// arrayValues 返回数组/切片/字典中所有的值.
// filterNil是否过滤空元素(nil,''),true时排除空元素,false时保留空元素.
func arrayValues(arr interface{}, filterNil bool) []interface{} {
var res []interface{}
var item interface{}
val := reflect.ValueOf(arr)
switch val.Kind() {
case reflect.Array, reflect.Slice:
for i := 0; i < val.Len(); i++ {
item = val.Index(i).Interface()
if !filterNil || (filterNil && item != nil && fmt.Sprintf("%v", item) != "") {
res = append(res, item)
}
}
case reflect.Map:
for _, k := range val.MapKeys() {
item = val.MapIndex(k).Interface()
if !filterNil || (filterNil && item != nil && fmt.Sprintf("%v", item) != "") {
res = append(res, item)
}
}
default:
panic("[arrayValues] arr type must be array, slice or map")
}
return res
}
// getTrimMask 去除mask字符.
func getTrimMask(characterMask []string) string {
var mask string
if len(characterMask) == 0 {
mask = " \t\n\r\v\f\x00 "
} else {
mask = strings.Join(characterMask, "")
}
return mask
}
// reflectPtr 获取反射的指向.
func reflectPtr(r reflect.Value) reflect.Value {
// 如果是指针,则获取其所指向的元素
if r.Kind() == reflect.Ptr {
r = r.Elem()
}
return r
}
// creditChecksum 计算身份证校验码,其中id为身份证号码.
func creditChecksum(id string) byte {
//∑(ai×Wi)(mod 11)
// 加权因子
factor := []int{7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2}
// 校验位对应值
code := []byte{'1', '0', 'X', '9', '8', '7', '6', '5', '4', '3', '2'}
leng := len(id)
sum := 0
for i, char := range id[:leng-1] {
num, _ := strconv.Atoi(string(char))
sum += num * factor[i]
}
return code[sum%11]
}
// compareConditionMap 比对数组是否匹配条件.condition为条件字典,arr为要比对的数据数组.
func compareConditionMap(condition map[string]interface{}, arr interface{}) (res interface{}) {
val := reflect.ValueOf(arr)
switch val.Kind() {
case reflect.Map:
condLen := len(condition)
chkNum := 0
if condLen > 0 {
for _, k := range val.MapKeys() {
if condVal, ok := condition[k.String()]; ok && reflect.DeepEqual(val.MapIndex(k).Interface(), condVal) {
chkNum++
}
}
}
if chkNum == condLen {
res = arr
}
default:
return
}
return
}
// getMethod 获取对象的方法.
func getMethod(t interface{}, method string) reflect.Value {
m, b := reflect.TypeOf(t).MethodByName(method)
if !b {
return reflect.ValueOf(nil)
}
return m.Func
}
// ValidFunc 检查是否函数,并且参数个数、类型是否正确.
// 返回有效的函数、有效的参数.
func ValidFunc(f interface{}, args ...interface{}) (vf reflect.Value, vargs []reflect.Value, err error) {
vf = reflect.ValueOf(f)
if vf.Kind() != reflect.Func {
return reflect.ValueOf(nil), nil, fmt.Errorf("[validFunc] %v is not the function", f)
}
tf := vf.Type()
_len := len(args)
if tf.NumIn() != _len {
return reflect.ValueOf(nil), nil, fmt.Errorf("[validFunc] %d number of the argument is incorrect", _len)
}
vargs = make([]reflect.Value, _len)
for i := 0; i < _len; i++ {
typ := tf.In(i).Kind()
if (typ != reflect.Interface) && (typ != reflect.TypeOf(args[i]).Kind()) {
return reflect.ValueOf(nil), nil, fmt.Errorf("[validFunc] %d-td argument`s type is incorrect", i+1)
}
vargs[i] = reflect.ValueOf(args[i])
}
return vf, vargs, nil
}
// CallFunc 动态调用函数.
func CallFunc(f interface{}, args ...interface{}) (results []interface{}, err error) {
vf, vargs, _err := ValidFunc(f, args...)
if _err != nil {
return nil, _err
}
ret := vf.Call(vargs)
_len := len(ret)
results = make([]interface{}, _len)
for i := 0; i < _len; i++ {
results[i] = ret[i].Interface()
}
return
}
// camelCaseToLowerCase 驼峰转为小写.
func camelCaseToLowerCase(str string, connector rune) string {
if len(str) == 0 {
return ""
}
buf := &bytes.Buffer{}
var prev, r0, r1 rune
var size int
r0 = connector
for len(str) > 0 {
prev = r0
r0, size = utf8.DecodeRuneInString(str)
str = str[size:]
switch {
case r0 == utf8.RuneError:
continue
case unicode.IsUpper(r0):
if prev != connector && !unicode.IsNumber(prev) {
buf.WriteRune(connector)
}
buf.WriteRune(unicode.ToLower(r0))
if len(str) == 0 {
break
}
r0, size = utf8.DecodeRuneInString(str)
str = str[size:]
if !unicode.IsUpper(r0) {
buf.WriteRune(r0)
break
}
// find next non-upper-case character and insert connector properly.
// it's designed to convert `HTTPServer` to `http_server`.
// if there are more than 2 adjacent upper case characters in a word,
// treat them as an abbreviation plus a normal word.
for len(str) > 0 {
r1 = r0
r0, size = utf8.DecodeRuneInString(str)
str = str[size:]
if r0 == utf8.RuneError {
buf.WriteRune(unicode.ToLower(r1))
break
}
if !unicode.IsUpper(r0) {
if isCaseConnector(r0) {
r0 = connector
buf.WriteRune(unicode.ToLower(r1))
} else if unicode.IsNumber(r0) {
// treat a number as an upper case rune
// so that both `http2xx` and `HTTP2XX` can be converted to `http_2xx`.
buf.WriteRune(unicode.ToLower(r1))
buf.WriteRune(connector)
buf.WriteRune(r0)
} else {
buf.WriteRune(connector)
buf.WriteRune(unicode.ToLower(r1))
buf.WriteRune(r0)
}
break
}
buf.WriteRune(unicode.ToLower(r1))
}
if len(str) == 0 || r0 == connector {
buf.WriteRune(unicode.ToLower(r0))
}
case unicode.IsNumber(r0):
if prev != connector && !unicode.IsNumber(prev) {
buf.WriteRune(connector)
}
buf.WriteRune(r0)
default:
if isCaseConnector(r0) {
r0 = connector
}
buf.WriteRune(r0)
}
}
return buf.String()
}
// isCaseConnector 是否字符转换连接符.
func isCaseConnector(r rune) bool {
return r == '-' || r == '_' || unicode.IsSpace(r)
}
// getPidByInode 根据套接字的inode获取PID.须root权限.
func getPidByInode(inode string, procDirs []string) (pid int) {
if len(procDirs) == 0 {
procDirs, _ = filepath.Glob("/proc/[0-9]*/fd/[0-9]*")
}
re := regexp.MustCompile(inode)
for _, item := range procDirs {
path, _ := os.Readlink(item)
out := re.FindString(path)
if len(out) != 0 {
pid, _ = strconv.Atoi(strings.Split(item, "/")[2])
break
}
}
return pid
}
// getProcessPathByPid 根据PID获取进程的执行路径.
func getProcessPathByPid(pid int) string {
exe := fmt.Sprintf("/proc/%d/exe", pid)
path, _ := os.Readlink(exe)
return path
}
// pkcs7Padding PKCS7填充.
// cipherText为密文;blockSize为分组长度;isZero是否零填充.
func pkcs7Padding(cipherText []byte, blockSize int, isZero bool) []byte {
clen := len(cipherText)
if cipherText == nil || clen == 0 || blockSize <= 0 {
return nil
}
var padtext []byte
padding := blockSize - clen%blockSize
if isZero {
padtext = bytes.Repeat([]byte{0}, padding)
} else {
padtext = bytes.Repeat([]byte{byte(padding)}, padding)
}
return append(cipherText, padtext...)
}
// pkcs7UnPadding PKCS7拆解.
// origData为源数据;blockSize为分组长度.
func pkcs7UnPadding(origData []byte, blockSize int) []byte {
olen := len(origData)
if origData == nil || olen == 0 || blockSize <= 0 || olen%blockSize != 0 {
return nil
}
unpadding := int(origData[olen-1])
if unpadding == 0 || unpadding > olen {
return nil
}
return origData[:(olen - unpadding)]
}
// zeroPadding PKCS7使用0填充.
func zeroPadding(cipherText []byte, blockSize int) []byte {
return pkcs7Padding(cipherText, blockSize, true)
}
// zeroUnPadding PKCS7-0拆解.
func zeroUnPadding(origData []byte) []byte {
return bytes.TrimRightFunc(origData, func(r rune) bool {
return r == rune(0)
})
}
|
#!/bin/bash
killtree() {
local _pid=$1
local _sig=${2-TERM}
for _child in $(ps -o pid --no-headers --ppid ${_pid}); do
killtree ${_child} ${_sig}
done
kill -${_sig} ${_pid}
}
if [ $# -eq 0 -o $# -gt 2 ]; then
echo "Usage: $(basename $0) <pid> [signal]"
exit 1
fi
killtree $@
|
import fs from 'fs/promises';
import findProjectRoot from '../utils/find-project-root.js';
import writeIfNotExist from '../utils/write-if-not-exists.js';
const initializerCode = `export function initialize(/* application */) {
// application.inject('route', 'foo', 'service:foo');
}
export default {
initialize
};
`;
export default async function(name, projectRoot, applicationName) {
const PROJECT_ROOT = projectRoot || (await findProjectRoot());
const TARGET_FILE_NAME = `${PROJECT_ROOT}/src/init/initializers/${name}`;
await fs.mkdir(`${PROJECT_ROOT}/src/init/initializers`, { recursive: true });
await Promise.all([
writeIfNotExist(`${TARGET_FILE_NAME}.ts`, initializerCode, PROJECT_ROOT),
writeIfNotExist(
`${TARGET_FILE_NAME}-test.ts`,
getInitializerTestCode(name, applicationName),
PROJECT_ROOT
)
]);
}
function getInitializerTestCode(name, applicationName) {
return `import Application from '@ember/application';
import { module, test } from 'qunit';
import { run } from '@ember/runloop';
import { setupTest } from '${applicationName}/tests/helpers';
import { initialize } from './${name}';
module('Unit | Initializer | ${name}', function(hooks) {
setupTest(hooks);
hooks.beforeEach(function() {
this.TestApplication = Application.extend();
this.TestApplication.initializer({
name: 'initializer under test',
initialize
});
this.application = this.TestApplication.create({ autoboot: false });
});
hooks.afterEach(function() {
run(this.application, 'destroy');
});
// Replace this with your real tests.
test('it works', async function(assert) {
await this.application.boot();
assert.ok(true);
});
});
`;
}
|
def sort_by_second_item(tuples):
"""Sorts a list of tuples by the second item in the tuples.
Args:
tuples: A list of tuples to be sorted.
Returns:
sorted_list: The sorted list of tuples.
"""
sorted_list = sorted(tuples, key = lambda x: x[1])
return sorted_list |
<filename>speed-typer/main.py
import os
import pickle
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtMultimedia import QSoundEffect
from PyQt5.QtGui import QIcon, QFontDatabase
from pathlib import Path
from source_ui import main_window
from type_test import highscores, settings, statistics, type_test
# PATHS
BASE_FOLDER = Path(__file__).parents[0]
ASSETS_FOLDER = BASE_FOLDER / "assets"
DATA_FOLDER = BASE_FOLDER / "data"
ICON_PATH = ASSETS_FOLDER / "icon.png"
FONT_PATH = ASSETS_FOLDER / "InconsolataBold.ttf"
SOUND_FOLDER = ASSETS_FOLDER / "sounds"
DATA_FILE = DATA_FOLDER / "data.pkl"
class MainWindow(QtWidgets.QWidget, main_window.Ui_mainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.ICON = QIcon(str(ICON_PATH))
self.setWindowIcon(self.ICON)
# BUTTONS
self.buttonStart.clicked.connect(self.on_clicked_start)
self.buttonSettings.clicked.connect(self.on_clicked_settings)
self.buttonStatistics.clicked.connect(self.on_clicked_statistics)
self.buttonExit.clicked.connect(QtWidgets.QApplication.instance().quit)
self.comboBoxSelectMode.currentIndexChanged.connect(self.on_change_mode)
# HIGHSCORES HANDLER
self.highscore = highscores.Highscores()
self.update_highscores()
# DATA AND SETTINGS
if DATA_FILE.is_file():
self.load_data_from_file()
else:
self.data = settings.DEFAULT_DATA
self.comboBoxSelectMode.setCurrentIndex(self.data.get("selected_mode", 0))
# SOUND
self.set_key_sound(self.get_setting("sound_filename"))
# FONT
self.inconsolata_bold = self.load_custom_font(str(FONT_PATH))
# Stylesheet is set in the main program after instantiation
# Button methods
def on_clicked_start(self) -> None:
self.make_mode_window(str(self.comboBoxSelectMode.currentText()))
self.show_window(self.mode_window, self.isMaximized())
self.mode_window.setStyleSheet(self.get_setting("stylesheet"))
self.mode_window.set_colours(self.get_setting("rich_text_colours"))
self.hide()
def on_clicked_main_menu(self, window: QtWidgets.QWidget) -> None:
self.update_highscores()
self.show_window(self, window.isMaximized())
window.close()
del window
def on_clicked_settings(self) -> None:
self.make_settings_window()
self.show_window(self.settings_window, self.isMaximized())
self.settings_window.setStyleSheet(self.get_setting("stylesheet"))
self.hide()
def on_clicked_apply(self) -> None:
"""Executed when apply button in settings window is clicked."""
self.data["settings"] = self.settings_window.get_settings()
# Key sound
self.set_key_sound(self.get_setting("sound_filename"))
# Stylesheet
self.settings_window.setStyleSheet(self.get_setting("stylesheet"))
self.setStyleSheet(self.get_setting("stylesheet"))
# Save
self.save_data_to_file()
def on_clicked_statistics(self) -> None:
self.make_stats_window()
self.show_window(self.stats_window, self.isMaximized())
self.stats_window.setStyleSheet(self.get_setting("stylesheet"))
self.hide()
def on_clicked_reset_daily(self) -> None:
"""
To be executed when 'Reset today's highscore' is pressed in the stats window.
"""
self.highscore.delete_daily_highscore()
self.update_highscores()
self.update_stats_highscores()
def on_clicked_reset_all_time(self) -> None:
"""
To be executed when 'Reset all-time highscore' is pressed in the stats window.
"""
self.highscore.delete_all_time_highscore()
self.update_highscores()
self.update_stats_highscores()
def on_clicked_reset_all(self) -> None:
"""
To be executed when 'Reset all highscores' is pressed in the stats window.
"""
self.highscore.delete_all_highscores()
self.update_highscores()
self.update_stats_highscores()
def on_change_mode(self):
"""
Saves the selected mode to self.data and pickles self.data so the selection is
remembered.
"""
self.data["selected_mode"] = self.comboBoxSelectMode.currentIndex()
self.save_data_to_file()
# Helper Methods
def get_setting(self, setting: str):
"""
Convenience method for getting a specific setting from self.data, or a
default value.
"""
return self.data["settings"].get(
setting, settings.DEFAULT_SETTINGS.get(setting)
)
def load_custom_font(self, font: str) -> int:
"""Adds custom font to QFontDatabase, and returns its corresponding font id."""
return QFontDatabase.addApplicationFont(font)
def show_window(self, window: QtWidgets.QWidget, fullscreen: bool) -> None:
"""
Used to show windows, with the option to have them maximised provided.
"""
window.show()
if fullscreen:
window.setWindowState(QtCore.Qt.WindowMaximized)
def make_mode_window(self, mode: str) -> None:
self.mode_window = type_test.TypingWindow(self.highscore)
self.mode_window.set_mode(mode)
self.mode_window.setWindowIcon(self.ICON)
self.mode_window.buttonMainMenu.clicked.connect(
lambda: self.on_clicked_main_menu(self.mode_window)
)
# Sets key sound if enabled
if self.get_setting("play_sound"):
self.mode_window.set_key_sound(self.key_sound)
def make_settings_window(self) -> None:
self.settings_window = settings.SettingsWindow()
self.settings_window.setWindowIcon(self.ICON)
self.settings_window.buttonMainMenu.clicked.connect(
lambda: self.on_clicked_main_menu(self.settings_window)
)
self.settings_window.buttonApply.clicked.connect(self.on_clicked_apply)
# Keystroke sound toggle
if self.get_setting("play_sound"):
self.settings_window.toggleKeystrokeSound.setChecked(True)
# Dark mode toggle
if self.get_setting("dark_mode"):
self.settings_window.toggleDarkMode.setChecked(True)
self.set_settings_sounds_options()
self.set_selected_sound_option(self.get_setting("sound_filename"))
def make_stats_window(self) -> None:
self.stats_window = statistics.StatsWindow()
self.stats_window.setWindowIcon(self.ICON)
# Update labels
self.update_stats_highscores()
self.update_stats_days_ago()
# Set up graph
self.stats_window.set_up_graph(
self.highscore.get_stats_dailies(), self.get_setting("graph_colours")
)
# Connect buttons
self.stats_window.buttonMainMenu.clicked.connect(
lambda: self.on_clicked_main_menu(self.stats_window)
)
self.stats_window.buttonResetDaily.clicked.connect(self.on_clicked_reset_daily)
self.stats_window.buttonResetAllTime.clicked.connect(
self.on_clicked_reset_all_time
)
self.stats_window.buttonResetAll.clicked.connect(self.on_clicked_reset_all)
def update_highscores(self) -> None:
self.today_wpm, self.all_time_wpm = self.highscore.get_wpm()
def save_data_to_file(self) -> None:
"""Pickles self.data into a file in the data folder."""
with open(DATA_FILE, "wb") as data_pickle:
pickle.dump(self.data, data_pickle)
def load_data_from_file(self) -> None:
"""Sets self.data to the values saved on the data.pkl file."""
with open(DATA_FILE, "rb") as data_pickle:
self.data = pickle.load(data_pickle)
def get_sounds_list(self) -> list:
"""Returns a list of the sound files present in the sounds folder."""
return os.listdir(SOUND_FOLDER)
def set_settings_sounds_options(self) -> None:
"""
Sets up options for the dropdown menu to select keystroke sounds in the
settings menu.
"""
for sound_file in self.get_sounds_list():
# Add sound file name to dropdown menu
self.settings_window.comboSelectSound.addItem(sound_file)
def find_sound_file_index(self, sound_file: str) -> int:
"""
Returns the index of the given file name within the settings window
comboSelectSound object.
"""
return self.settings_window.comboSelectSound.findText(
sound_file, QtCore.Qt.MatchFixedString
)
def set_selected_sound_option(self, sound_file: str) -> None:
"""
Sets the selected option for sound file from the settings window's
comboSelectSound object to the given sound file name.
"""
index: int = self.find_sound_file_index(sound_file)
if index >= 0:
self.settings_window.comboSelectSound.setCurrentIndex(index)
def set_key_sound(self, sound_file: str) -> None:
"""
Sets the given sound file to a QSoundEffect object which will be played on each
keystroke in the mode window.
"""
self.key_sound_path = os.path.join(SOUND_FOLDER, sound_file)
self.key_sound_url = QtCore.QUrl.fromLocalFile(self.key_sound_path)
self.key_sound = QSoundEffect()
self.key_sound.setSource(self.key_sound_url)
self.key_sound.setVolume(0.5)
self.key_sound.setLoopCount(1)
def update_stats_highscores(self) -> None:
"""Updates highscores displayed in the stats window."""
self.stats_window.labelTodayScore.setText(f"{self.today_wpm} WPM")
self.stats_window.labelAllTimeScore.setText(f"{self.all_time_wpm} WPM")
def update_stats_days_ago(self) -> None:
"""
Updates the labelDaysAgo element in the stats window with the
number of days since the all-time highscore was set.
"""
self.stats_window.update_days_ago(self.highscore.days_since_set())
if __name__ == "__main__":
app = QtWidgets.QApplication([])
window = MainWindow()
window.show()
# Stylesheet must be changed after window is shown
window.setStyleSheet(window.get_setting("stylesheet"))
app.exec_()
|
import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { ToggleComponent } from './toggle/toggle.component';
import { ToggleDirective } from './toggle/toggle.directive';
import { IndexComponent } from './index/index.component';
import { FormsModule } from '@angular/forms';
import { NgZorroAntdModule } from 'ng-zorro-antd';
@NgModule({
imports: [CommonModule, FormsModule, NgZorroAntdModule],
declarations: [ToggleComponent, ToggleDirective, IndexComponent],
exports: [IndexComponent],
})
export class CustomExtendsCompModule {}
|
using System;
public class SoftwareInfo
{
private string source;
public string Source
{
get { return source; }
set { source = value; OnPropertyChanged("Source"); }
}
public bool HasInstalled { get; set; } = RegeditUtility.SearchItemRegEdit(ApplicationParameters.UninstallRegeditPath, ApplicationParameters.DisplayName);
private void OnPropertyChanged(string propertyName)
{
// Implement the logic for property change notification
Console.WriteLine($"Property {propertyName} has changed.");
}
}
public static class RegeditUtility
{
public static bool SearchItemRegEdit(string path, string displayName)
{
// Implement the logic to search the Windows Registry for the installation status
return false; // Placeholder return value
}
}
public static class ApplicationParameters
{
public static string UninstallRegeditPath { get; } = "Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall";
public static string DisplayName { get; } = "YourSoftwareDisplayName";
} |
#!/usr/bin/env bash
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
function is_absolute {
[[ "$1" = /* ]] || [[ "$1" =~ ^[a-zA-Z]:[/\\].* ]]
}
function real_path() {
is_absolute "$1" && echo "$1" || echo "$PWD/${1#./}"
}
function cp_external() {
local src_dir=$1
local dest_dir=$2
pushd .
cd "$src_dir"
for f in `find . ! -type d ! -name '*.py' ! -path '*local_config_cuda*' ! -path '*local_config_tensorrt*' ! -path '*local_config_syslibs*' ! -path '*org_tensorflow*'`; do
mkdir -p "${dest_dir}/$(dirname ${f})"
cp "${f}" "${dest_dir}/$(dirname ${f})/"
done
popd
mkdir -p "${dest_dir}/local_config_cuda/cuda/cuda/"
cp "${src_dir}/local_config_cuda/cuda/cuda/cuda_config.h" "${dest_dir}/local_config_cuda/cuda/cuda/"
}
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
function is_windows() {
if [[ "${PLATFORM}" =~ (cygwin|mingw32|mingw64|msys)_nt* ]]; then
true
else
false
fi
}
function prepare_src() {
if [ $# -lt 1 ] ; then
echo "No destination dir provided"
exit 1
fi
TMPDIR="$1"
mkdir -p "$TMPDIR"
EXTERNAL_INCLUDES="${TMPDIR}/tensorflow/include/external"
echo $(date) : "=== Preparing sources in dir: ${TMPDIR}"
if [ ! -d bazel-bin/tensorflow ]; then
echo "Could not find bazel-bin. Did you run from the root of the build tree?"
exit 1
fi
if is_windows; then
rm -rf ./bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip
mkdir -p ./bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip
echo "Unzipping simple_console_for_windows.zip to create runfiles tree..."
unzip -o -q ./bazel-bin/tensorflow/tools/pip_package/simple_console_for_windows.zip -d ./bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip
echo "Unzip finished."
# runfiles structure after unzip the python binary
cp -R \
bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip/runfiles/org_tensorflow/tensorflow \
"${TMPDIR}"
cp_external \
bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip/runfiles \
"${EXTERNAL_INCLUDES}/"
RUNFILES=bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip/runfiles/org_tensorflow
else
RUNFILES=bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow
if [ -d bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/external ]; then
# Old-style runfiles structure (--legacy_external_runfiles).
cp -R \
bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/tensorflow \
"${TMPDIR}"
cp_external \
bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/external \
"${EXTERNAL_INCLUDES}"
# Copy MKL libs over so they can be loaded at runtime
so_lib_dir=$(ls $RUNFILES | grep solib) || true
if [ -n "${so_lib_dir}" ]; then
mkl_so_dir=$(ls ${RUNFILES}/${so_lib_dir} | grep mkl) || true
if [ -n "${mkl_so_dir}" ]; then
mkdir "${TMPDIR}/${so_lib_dir}"
cp -R ${RUNFILES}/${so_lib_dir}/${mkl_so_dir} "${TMPDIR}/${so_lib_dir}"
fi
fi
else
# New-style runfiles structure (--nolegacy_external_runfiles).
cp -R \
bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/tensorflow \
"${TMPDIR}"
cp_external \
bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles \
"${EXTERNAL_INCLUDES}"
# Copy MKL libs over so they can be loaded at runtime
so_lib_dir=$(ls $RUNFILES | grep solib) || true
if [ -n "${so_lib_dir}" ]; then
mkl_so_dir=$(ls ${RUNFILES}/${so_lib_dir} | grep mkl) || true
if [ -n "${mkl_so_dir}" ]; then
mkdir "${TMPDIR}/${so_lib_dir}"
cp -R ${RUNFILES}/${so_lib_dir}/${mkl_so_dir} "${TMPDIR}/${so_lib_dir}"
fi
fi
fi
fi
# protobuf pip package doesn't ship with header files. Copy the headers
# over so user defined ops can be compiled.
mkdir -p ${TMPDIR}/google
mkdir -p ${TMPDIR}/third_party
pushd ${RUNFILES%org_tensorflow} > /dev/null
for header in $(find protobuf_archive -type f,l \( -name "*.h" -o -name "*.inc" \)); do
mkdir -p "${TMPDIR}/google/$(dirname ${header})"
cp "$header" "${TMPDIR}/google/$(dirname ${header})/"
done
popd > /dev/null
cp -R $RUNFILES/third_party/eigen3 ${TMPDIR}/third_party
cp tensorflow/tools/pip_package/MANIFEST.in ${TMPDIR}
cp tensorflow/tools/pip_package/README ${TMPDIR}
cp tensorflow/tools/pip_package/setup.py ${TMPDIR}
rm -f ${TMPDIR}/tensorflow/libtensorflow_framework.so
rm -f ${TMPDIR}/tensorflow/libtensorflow_framework.so.[0-9].*
}
function build_wheel() {
if [ $# -lt 2 ] ; then
echo "No src and dest dir provided"
exit 1
fi
TMPDIR="$1"
DEST="$2"
PKG_NAME_FLAG="$3"
# Before we leave the top-level directory, make sure we know how to
# call python.
if [[ -e tools/python_bin_path.sh ]]; then
source tools/python_bin_path.sh
fi
pushd ${TMPDIR} > /dev/null
rm -f MANIFEST
echo $(date) : "=== Building wheel"
"${PYTHON_BIN_PATH:-python}" setup.py bdist_wheel ${PKG_NAME_FLAG} >/dev/null
mkdir -p ${DEST}
cp dist/* ${DEST}
popd > /dev/null
echo $(date) : "=== Output wheel file is in: ${DEST}"
}
function usage() {
echo "Usage:"
echo "$0 [--src srcdir] [--dst dstdir] [options]"
echo "$0 dstdir [options]"
echo ""
echo " --src prepare sources in srcdir"
echo " will use temporary dir if not specified"
echo ""
echo " --dst build wheel in dstdir"
echo " if dstdir is not set do not build, only prepare sources"
echo ""
echo " Options:"
echo " --project_name <name> set project name to name"
echo " --gpu build tensorflow_gpu"
echo " --gpudirect build tensorflow_gpudirect"
echo " --nightly_flag build tensorflow nightly"
echo ""
exit 1
}
function main() {
PKG_NAME_FLAG=""
PROJECT_NAME=""
GPU_BUILD=0
NIGHTLY_BUILD=0
SRCDIR=""
DSTDIR=""
CLEANSRC=1
while true; do
if [[ "$1" == "--help" ]]; then
usage
exit 1
elif [[ "$1" == "--nightly_flag" ]]; then
NIGHTLY_BUILD=1
elif [[ "$1" == "--gpu" ]]; then
GPU_BUILD=1
elif [[ "$1" == "--gpudirect" ]]; then
PKG_NAME_FLAG="--project_name tensorflow_gpudirect"
elif [[ "$1" == "--project_name" ]]; then
shift
if [[ -z "$1" ]]; then
break
fi
PROJECT_NAME="$1"
elif [[ "$1" == "--src" ]]; then
shift
SRCDIR="$(real_path $1)"
CLEANSRC=0
elif [[ "$1" == "--dst" ]]; then
shift
DSTDIR="$(real_path $1)"
else
DSTDIR="$(real_path $1)"
fi
shift
if [[ -z "$1" ]]; then
break
fi
done
if [[ -z "$DSTDIR" ]] && [[ -z "$SRCDIR" ]]; then
echo "No destination dir provided"
usage
exit 1
fi
if [[ -z "$SRCDIR" ]]; then
# make temp srcdir if none set
SRCDIR="$(mktemp -d -t tmp.XXXXXXXXXX)"
fi
prepare_src "$SRCDIR"
if [[ -z "$DSTDIR" ]]; then
# only want to prepare sources
exit
fi
if [[ -n ${PROJECT_NAME} ]]; then
PKG_NAME_FLAG="--project_name ${PROJECT_NAME}"
elif [[ ${NIGHTLY_BUILD} == "1" && ${GPU_BUILD} == "1" ]]; then
PKG_NAME_FLAG="--project_name tf_nightly_gpu"
elif [[ ${NIGHTLY_BUILD} == "1" ]]; then
PKG_NAME_FLAG="--project_name tf_nightly"
elif [[ ${GPU_BUILD} == "1" ]]; then
PKG_NAME_FLAG="--project_name tensorflow_gpu"
fi
build_wheel "$SRCDIR" "$DSTDIR" "$PKG_NAME_FLAG"
if [[ $CLEANSRC -ne 0 ]]; then
rm -rf "${TMPDIR}"
fi
}
main "$@"
|
""" Functions for custom metrics.
"""
import datasets
from sklearn.metrics import recall_score, f1_score, log_loss, precision_score
_CITATION = """
"""
_DESCRIPTION = """\
"""
_KWARGS_DESCRIPTION = """
"""
def simple_accuracy(preds, labels):
"""calculate Accuracy
Args:
preds: Predictions
labels: Lables
Returns:
int: Accuracy
"""
return (preds == labels).mean()
def prob2label(prod):
"""Transforms Probability to 0/1 Labels
Args:
prod: Probability of prediction (confidence)
Returns:
int: 0/1 Labels
"""
return (prod > 0.5)
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class custom_metric(datasets.Metric):
"""Create Custom Metric for huggingface. Computes F1, Accuracy, Recall, Precision and Log Loss.
"""
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("float32"),
"references": datasets.Value("float32"),
}
),
codebase_urls=[],
reference_urls=[],
format="numpy",
)
def _compute(self, predictions, references):
"""Compute F1, Accuracy, Recall, Precision and Log Loss.
Args:
predictions: Predictions of Model (in probability)
references: Correct Labels
Returns:
dict: F1, Accuracy, Recall, Precision and Log Loss
"""
references = [int(single_labels) for single_labels in references]
predictions_label = prob2label(predictions)
return {"accuracy": simple_accuracy(predictions_label, references),
"recall": recall_score(references, predictions_label),
"precision": precision_score(references, predictions_label),
"f1": f1_score(references, predictions_label),
"log_loss": log_loss(references, predictions, labels=[0, 1])}
|
<reponame>dk123sw/hybrid-Development
package com.example.jingbin.webviewstudy.utils;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.os.CountDownTimer;
import android.widget.Toast;
import com.example.jingbin.webviewstudy.WebViewActivity;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
public class TimeCount extends CountDownTimer {
private int mSampleRateInHZ = 16000; //采样率
private String path;
private WebViewActivity mActivity;
public TimeCount(long millisInFuture, long countDownInterval, String path) {
super(millisInFuture, countDownInterval);// 参数依次为总时长,和计时的时间间隔
this.path = path;
}
@Override
public void onTick(long millisUntilFinished) {
File f = new File(path + ".pcm");
if(f.exists()) {
pcmToWave(path + ".pcm", path + ".wav");
}
}
@Override
public void onFinish() {
Toast.makeText(mActivity, "转换wav失败,请手动转换", Toast.LENGTH_SHORT).show();
}
private void pcmToWave(String inFileName, String outFileName) {
int mRecorderBufferSize = AudioRecord.getMinBufferSize(mSampleRateInHZ, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
FileInputStream in = null;
FileOutputStream out = null;
long totalAudioLen = 0;
long longSampleRate = mSampleRateInHZ;
long totalDataLen = totalAudioLen + 36;
int channels = 1;//你录制是单声道就是1 双声道就是2(如果错了声音可能会急促等)
long byteRate = 16 * longSampleRate * channels / 8;
byte[] data = new byte[mRecorderBufferSize];
try {
in = new FileInputStream(inFileName);
out = new FileOutputStream(outFileName);
totalAudioLen = in.getChannel().size();
totalDataLen = totalAudioLen + 36;
writeWaveFileHeader(out, totalAudioLen, totalDataLen, longSampleRate, channels, byteRate);
while (in.read(data) != -1) {
out.write(data);
}
in.close();
out.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/*
任何一种文件在头部添加相应的头文件才能够确定的表示这种文件的格式,wave是RIFF文件结构,每一部分为一个chunk,其中有RIFF WAVE chunk,
FMT Chunk,Fact chunk,Data chunk,其中Fact chunk是可以选择的,
*/
private void writeWaveFileHeader(FileOutputStream out, long totalAudioLen, long totalDataLen, long longSampleRate,
int channels, long byteRate) throws IOException {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);//数据大小
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';//WAVE
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
//FMT Chunk
header[12] = 'f'; // 'fmt '
header[13] = 'm';
header[14] = 't';
header[15] = ' ';//过渡字节
//数据大小
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
//编码方式 10H为PCM编码格式
header[20] = 1; // format = 1
header[21] = 0;
//通道数
header[22] = (byte) channels;
header[23] = 0;
//采样率,每个通道的播放速度
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
//音频数据传送速率,采样率*通道数*采样深度/8
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
// 确定系统一次要处理多少个这样字节的数据,确定缓冲区,通道数*采样位数
header[32] = (byte) (1 * 16 / 8);
header[33] = 0;
//每个样本的数据位数
header[34] = 16;
header[35] = 0;
//Data chunk
header[36] = 'd';//data
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
}
|
/**
* Created by hfutlj on 2018/5/10.
* 测试类,输出为:
* 做烤鸡翅
* 做烤鱼
* 做烤鱼
*/
public class Test {
public static void main(String[] args) {
// 开店先请厨师
Cook cook = new Cook();
// 创建两条命令,即提供两种菜供顾客点
AbstractOrder chicken = new DoChickenWingOrder();
AbstractOrder fish = new DoFishOrder();
//这两门菜都由刚请的厨师来做
chicken.setCook(cook);
fish.setCook(cook);
// 招一个服务员传达命令
Waiter waiter = new Waiter();
// 开张,客户点菜
waiter.addOrder(chicken);
waiter.addOrder(fish);
waiter.addOrder(fish);
waiter.executeOrder();
}
}
|
<gh_stars>0
package com.alipay.api.response;
import java.util.List;
import com.alipay.api.internal.mapping.ApiField;
import com.alipay.api.internal.mapping.ApiListField;
import com.alipay.api.domain.PromoAccessBaseCatalogVO;
import com.alipay.api.domain.PromoContentApiSchemaVO;
import com.alipay.api.AlipayResponse;
/**
* ALIPAY API: alipay.open.app.service.promotemplate.query response.
*
* @author auto create
* @since 1.0, 2021-10-22 14:25:30
*/
public class AlipayOpenAppServicePromotemplateQueryResponse extends AlipayResponse {
private static final long serialVersionUID = 5496895517466281694L;
/**
* 展台准入类目列表
*/
@ApiListField("promo_access_catalog_vos")
@ApiField("promo_access_base_catalog_v_o")
private List<PromoAccessBaseCatalogVO> promoAccessCatalogVos;
/**
* 展台的描述信息
*/
@ApiField("promo_booth_desc")
private String promoBoothDesc;
/**
* https://opendocs.alipay.com/mini/operation/as2i2u#FAQ
*/
@ApiField("promo_booth_desc_url")
private String promoBoothDescUrl;
/**
* 展台允许的实体数量类型;
SINGLE_SERVICE("SINGLE_SERVICE", "单个服务"),
MULTI_SERVICE_OF_ONE_CAT("MULTI_SERVICE_OF_ONE_CAT", "同类目下多个服务")
*/
@ApiField("promo_booth_entity_num_type")
private String promoBoothEntityNumType;
/**
* 展台允许的实体数量类型。枚举值如下:
STD_SERVICE("STD_SERVICE", "标准化服务"),
MINI_APP("MINI_APP", "小程序"),
*/
@ApiField("promo_booth_entity_type")
private String promoBoothEntityType;
/**
* 展台ID
*/
@ApiField("promo_booth_id")
private String promoBoothId;
/**
* 展台名称
*/
@ApiField("promo_booth_name")
private String promoBoothName;
/**
* 内容schema
*/
@ApiListField("promo_content_api_schema_vos")
@ApiField("promo_content_api_schema_v_o")
private List<PromoContentApiSchemaVO> promoContentApiSchemaVos;
public void setPromoAccessCatalogVos(List<PromoAccessBaseCatalogVO> promoAccessCatalogVos) {
this.promoAccessCatalogVos = promoAccessCatalogVos;
}
public List<PromoAccessBaseCatalogVO> getPromoAccessCatalogVos( ) {
return this.promoAccessCatalogVos;
}
public void setPromoBoothDesc(String promoBoothDesc) {
this.promoBoothDesc = promoBoothDesc;
}
public String getPromoBoothDesc( ) {
return this.promoBoothDesc;
}
public void setPromoBoothDescUrl(String promoBoothDescUrl) {
this.promoBoothDescUrl = promoBoothDescUrl;
}
public String getPromoBoothDescUrl( ) {
return this.promoBoothDescUrl;
}
public void setPromoBoothEntityNumType(String promoBoothEntityNumType) {
this.promoBoothEntityNumType = promoBoothEntityNumType;
}
public String getPromoBoothEntityNumType( ) {
return this.promoBoothEntityNumType;
}
public void setPromoBoothEntityType(String promoBoothEntityType) {
this.promoBoothEntityType = promoBoothEntityType;
}
public String getPromoBoothEntityType( ) {
return this.promoBoothEntityType;
}
public void setPromoBoothId(String promoBoothId) {
this.promoBoothId = promoBoothId;
}
public String getPromoBoothId( ) {
return this.promoBoothId;
}
public void setPromoBoothName(String promoBoothName) {
this.promoBoothName = promoBoothName;
}
public String getPromoBoothName( ) {
return this.promoBoothName;
}
public void setPromoContentApiSchemaVos(List<PromoContentApiSchemaVO> promoContentApiSchemaVos) {
this.promoContentApiSchemaVos = promoContentApiSchemaVos;
}
public List<PromoContentApiSchemaVO> getPromoContentApiSchemaVos( ) {
return this.promoContentApiSchemaVos;
}
}
|
<filename>src/pages/basePage.tsx
import React, { Component } from 'react';
import { connect } from 'react-redux';
import { Route, Switch } from 'react-router';
import { bindActionCreators } from 'redux';
import Footer from '../components/footer';
import Header from '../components/header';
import Loading from '../components/Loading';
import IAppState from '../interfaces/IAppState';
import RouteConstants from './../constants/routeConstants';
const ContactPage = React.lazy(() => import('./contactPage'));
const HomePage = React.lazy(() => import('./homePage'));
const SignUpPage = React.lazy(() => import('./signUpPage'));
const CheckListPage = React.lazy(() => import('./checkListPage'));
const LoginPage = React.lazy(() => import('./loginPage'));
interface StateProps {
}
class DispatchProps {
}
export class BasePageComponent extends Component<StateProps & DispatchProps, any>
{
render() {
return (
<div className="BasePage">
<Header />
<Switch>
<Route path={RouteConstants.loginRoute}>
<React.Suspense fallback={<Loading />}>
<LoginPage />
</React.Suspense>
</Route>
<Route path={RouteConstants.checkListRoute}>
<React.Suspense fallback={<Loading />}>
<CheckListPage />
</React.Suspense>
</Route>
<Route path={RouteConstants.contactRoute}>
<React.Suspense fallback={<Loading />}>
<ContactPage />
</React.Suspense>
</Route>
<Route path={RouteConstants.signUpRoute}>
<React.Suspense fallback={<Loading />}>
<SignUpPage />
</React.Suspense>
</Route>
<Route path={RouteConstants.homeRoute}>
<React.Suspense fallback={<Loading />}>
<HomePage />
</React.Suspense>
</Route>
</Switch>
<Footer />
</div>
);
}
}
function connectStateToProps(state: IAppState): StateProps {
return {
};
}
function connectDispatchToProps(dispatch: any): DispatchProps {
return bindActionCreators({ ...new DispatchProps() }, dispatch);
}
let BasePage = connect(connectStateToProps, connectDispatchToProps)(BasePageComponent);
export default BasePage |
package com.github.open96.jypm.fxml;
import com.github.open96.jypm.playlist.PlaylistManager;
import com.github.open96.jypm.playlist.pojo.Playlist;
import com.github.open96.jypm.thread.TASK_TYPE;
import com.github.open96.jypm.thread.ThreadManager;
import javafx.application.Platform;
import javafx.collections.ObservableList;
import javafx.fxml.FXML;
import javafx.fxml.Initializable;
import javafx.scene.control.ListView;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.net.URL;
import java.util.ResourceBundle;
/**
* Controller for the list of managed playlists that are displayed in center of the main window in application.
*/
public class RootListViewController implements Initializable {
private static ObservableList<Playlist> playlistObservableList;
private static final Logger LOG = LogManager.getLogger(RootListViewController.class.getName());
@FXML
private ListView<Playlist> listView;
public RootListViewController() {
LOG.debug("Loading playlists in controller...");
//Fill ObservableList with playlists that should be displayed for user in UI
playlistObservableList = PlaylistManager
.getInstance()
.getPlaylists();
LOG.debug("Finished loading playlists in controller, succesfully loaded " +
playlistObservableList.size() + " playlists into the ListView");
}
@Override
public void initialize(URL url, ResourceBundle resourceBundle) {
//Make ListView display elements from playlistObservableList
listView.setItems(playlistObservableList);
//Make sure ListView will display them in from now objects from RootListCellController class
listView.setCellFactory(playlistListView -> new RootListCellController());
startUIUpdaterThread();
}
private void startUIUpdaterThread() {
ThreadManager.getInstance().sendVoidTask(new Thread(() -> {
try {
int lastKnownObservableListSize = playlistObservableList.size();
while (ThreadManager.getExecutionPermission()) {
if (playlistObservableList.size() != lastKnownObservableListSize) {
Platform.runLater(() -> {
listView.setItems(null);
listView.setItems(playlistObservableList);
});
lastKnownObservableListSize = playlistObservableList.size();
}
Thread.sleep(255);
}
} catch (InterruptedException e) {
LOG.error("Thread sleep has been interrupted");
}
}), TASK_TYPE.UI);
}
}
|
class ParseRunJob < ApplicationJob
# Adding or deleting a job? Reflect the change in the QUEUES environment variable in docker-compose.yml and
# docker-compose-production.yml.
queue_as :parse_run
def perform(run)
return if run.nil?
run.parse_into_db
raise UnparsedRun::UnparsableRun unless run.parsed?
Api::V4::RunChannel.broadcast_to(run, Api::V4::WebsocketMessage.new('run_parsed', message: 'Run parsed').to_h)
rescue UnparsedRun::UnparsableRun, UnparsedRun::RunFileMissing
Api::V4::RunChannel.broadcast_to(
run,
Api::V4::WebsocketMessage.new('cant_parse_run', message: 'Run cannot be parsed').to_h
)
end
end
|
<filename>src/actions/inputs.js
import { ADD_INPUT } from "../constants/actionTypes";
export const updateInputDetails = (
state,
district,
block,
year
) => dispatch => {
dispatch({
type: ADD_INPUT,
state: state,
district: district,
block: block,
year: year
});
};
|
import org.junit.*;
public class OneFail {
@Test public void pass() {
Assert.assertTrue(false);
}
}
|
<reponame>test-summary/action
import * as chai from "chai"
import chaiAsPromised from 'chai-as-promised'
import { expect } from "chai"
import { TestStatus, parseFile } from "../src/test_parser"
chai.use(chaiAsPromised)
const tapResourcePath = `${__dirname}/resources/tap`
const junitResourcePath = `${__dirname}/resources/junit`
describe("file", async () => {
it("identifies common tap", async () => {
const result = await parseFile(`${tapResourcePath}/01-common.tap`)
expect(result.counts.passed).to.eql(6)
expect(result.counts.failed).to.eql(0)
expect(result.counts.skipped).to.eql(0)
})
it("identifies creative liberties tap", async () => {
const result = await parseFile(`${tapResourcePath}/07-creative-liberties.tap`)
expect(result.counts.passed).to.eql(9)
expect(result.counts.failed).to.eql(0)
expect(result.counts.skipped).to.eql(0)
})
it("identifies node-tap output", async () => {
const result = await parseFile(`${tapResourcePath}/09-node-tap.tap`)
expect(result.counts.passed).to.eql(4)
expect(result.counts.failed).to.eql(4)
expect(result.counts.skipped).to.eql(2)
})
it("rejects invalid tap file", async () => {
expect(parseFile(`${tapResourcePath}/10-results-after-trailer.tap`)).to.be.rejectedWith(Error)
})
it("identifies common junit", async () => {
const result = await parseFile(`${junitResourcePath}/01-common.xml`)
expect(result.counts.passed).to.eql(7)
expect(result.counts.failed).to.eql(1)
expect(result.counts.skipped).to.eql(0)
})
it("identifies example junit", async () => {
const result = await parseFile(`${junitResourcePath}/02-example.xml`)
expect(result.counts.passed).to.eql(21)
expect(result.counts.failed).to.eql(9)
expect(result.counts.skipped).to.eql(0)
})
it("identifies junit", async () => {
const result = await parseFile(`${junitResourcePath}/03-junit.xml`)
expect(result.counts.passed).to.eql(4)
expect(result.counts.failed).to.eql(4)
expect(result.counts.skipped).to.eql(2)
})
})
|
#!/usr/bin/env bats
# Copyright 2017 tsuru authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
setup() {
rm -rf /home/application/current && mkdir /home/application/current
chown ubuntu /home/application/current
}
@test "using default php(5.6) + apache-mod-php" {
run /var/lib/tsuru/deploy
run cat /home/application/current/Procfile
[ "$status" -eq 0 ]
[ "$output" == 'web: /bin/bash -lc "sudo -E /usr/sbin/apache2 -d /etc/apache2 -k start -DNO_DETACH "' ]
}
@test "using php7.1 + apache-mod-php" {
cat >/home/application/current/tsuru.yaml <<EOF
php:
version: 7.1
EOF
run /var/lib/tsuru/deploy
run ls /etc/apache2/mods-enabled/php*.conf
[ "$status" -eq 0 ]
[[ "$output" == "/etc/apache2/mods-enabled/php7.1.conf" ]]
}
@test "using invalid version backs to default_version" {
cat >/home/application/current/tsuru.yaml <<EOF
php:
version: 1000
EOF
run /var/lib/tsuru/deploy
run ls /etc/apache2/mods-enabled/php*.conf
[ "$status" -eq 0 ]
[[ "$output" == "/etc/apache2/mods-enabled/php5.6.conf" ]]
}
@test "using old fpm format and default frontend" {
cat >/home/application/current/tsuru.yaml <<EOF
php:
interpretor:
name: fpm54
EOF
run /var/lib/tsuru/deploy
run cat /home/application/current/Procfile
[ "$output" == 'web: /bin/bash -lc "sudo -E /usr/sbin/apache2 -d /etc/apache2 -k start && /usr/sbin/php-fpm5.6 --fpm-config /etc/php/5.6/fpm/php-fpm.conf "' ]
}
@test "php 7.0 using fpm and default frontend" {
cat >/home/application/current/tsuru.yaml <<EOF
php:
version: 7.0
interpretor:
name: fpm
EOF
run /var/lib/tsuru/deploy
run cat /home/application/current/Procfile
[ "$output" == 'web: /bin/bash -lc "sudo -E /usr/sbin/apache2 -d /etc/apache2 -k start && /usr/sbin/php-fpm7.0 --fpm-config /etc/php/7.0/fpm/php-fpm.conf "' ]
}
@test "php 7.0 using fpm and nginx as frontend" {
cat >/home/application/current/tsuru.yaml <<EOF
php:
version: 7.0
interpretor:
name: fpm
frontend:
name: nginx
EOF
run /var/lib/tsuru/deploy
run cat /home/application/current/Procfile
[ "$output" == 'web: /bin/bash -lc "sudo -E /usr/sbin/nginx && /usr/sbin/php-fpm7.0 --fpm-config /etc/php/7.0/fpm/php-fpm.conf "' ]
}
@test "php 7.1 using fpm, apache2 as frontend and old php5-mysql module format" {
cat >/home/application/current/tsuru.yaml <<EOF
php:
version: 7.1
interpretor:
name: fpm
options:
extensions:
- php5-mysql
frontend:
name: apache
EOF
run /var/lib/tsuru/deploy
run cat /home/application/current/Procfile
[ "$output" == 'web: /bin/bash -lc "sudo -E /usr/sbin/apache2 -d /etc/apache2 -k start && /usr/sbin/php-fpm7.1 --fpm-config /etc/php/7.1/fpm/php-fpm.conf "' ]
run bash -c 'dpkg -s php7.1-mysql | grep Status'
[ "$output" == 'Status: install ok installed' ]
}
@test "install composer modules" {
cat >/home/application/current/composer.json <<EOF
{
"require": {
"ehime/hello-world": "*"
}
}
EOF
run /var/lib/tsuru/deploy
[ "$status" -eq 0 ]
run su - ubuntu -c "cd /home/application/current && composer_phar show"
match="ehime/hello-world .+"
[[ $output =~ $match ]]
}
@test "generate environment.conf for all php-fpm versions" {
cat >/home/application/current/tsuru.yaml <<EOF
php:
version: 7.1
interpretor:
name: fpm
EOF
export FOO=1
export BAR=2
run /var/lib/tsuru/deploy
for version in 5.6 7.0 7.1; do
run bash -c "egrep '(FOO|BAR)' /etc/php/${version}/fpm/environment.conf | tr '\n' ' '"
[ "$output" = "env[BAR] = 2 env[FOO] = 1 " ]
done
}
@test "update-alternatives for php and phar" {
for version in 5.6 7.0 7.1; do
cat >/home/application/current/tsuru.yaml <<EOF
php:
version: ${version}
interpretor:
name: fpm
EOF
run /var/lib/tsuru/deploy
run bash -c "php --version | grep \"PHP ${version}\""
[[ $output =~ ^PHP\ ${version}.+ ]]
run bash -c "phar.phar version | grep 'PHP Version'"
[[ $output =~ ^PHP\ Version:\ +${version}.+ ]]
run bash -c "phar version | grep 'PHP Version'"
[[ $output =~ ^PHP\ Version:\ +${version}.+ ]]
done
}
|
import queryString from "query-string";
import * as Yup from "yup";
import { Box, Container, Input, Button, Text } from "@chakra-ui/react";
import { Formik, Field, Form } from "formik";
import { useHistory, useLocation } from "react-router-dom";
import useAuth from "../../common/hooks/useAuth";
import { _post } from "../../common/httpClient";
import decodeJWT from "../../common/utils/decodeJWT";
const ResetPasswordPage = () => {
let [, setAuth] = useAuth();
let history = useHistory();
let location = useLocation();
let { passwordToken } = queryString.parse(location.search);
let uai = decodeJWT(passwordToken).sub;
let showError = (meta) => {
return meta.touched && meta.error
? {
feedback: meta.error,
invalid: true,
}
: {};
};
let changePassword = async (values, { setStatus }) => {
try {
let { token } = await _post("/api/password/reset-password", { ...values, passwordToken });
setAuth(token);
history.push("/");
} catch (e) {
console.error(e);
setStatus({
error: (
<span>
Le lien est expiré ou invalide, merci de prendre contact avec un administrateur en précisant votre adresse
mail :
<br />
<a href="mailto:<EMAIL>"><EMAIL></a>
</span>
),
});
}
};
return (
<Box p={5} bg="#FAFAFA">
<Container border="1px solid #E0E5ED" bg="white" p={0} maxW="45ch">
<Box borderBottom="1px solid #E0E5ED" p={4}>
<Text fontSize="16px" ml={2}>
Changement du mot de passe pour le CFA {uai}
</Text>
</Box>
<Box mx={5} mt={5}>
<Formik
initialValues={{
newPassword: "",
}}
validationSchema={Yup.object().shape({
newPassword: Yup.string()
.required("Veuillez saisir un mot de passe")
.matches(
"^(?=.*[A-Za-z])(?=.*\\d)(?=.*[@$!%*#?&])[A-Za-z\\d@$!%*#?&]{8,}$",
"Le mot de passe doit contenir au moins 8 caractères, une lettre en majuscule, un chiffre et un caractère spécial"
),
})}
onSubmit={changePassword}
>
{({ status = {} }) => {
return (
<Form>
<Text textStyle="h6" fontSize="12px">
Nouveau mot de passe
</Text>
<Field name="newPassword">
{({ field, meta }) => {
return (
<Input type={"password"} placeholder="Votre mot de passe..." {...field} {...showError(meta)} />
);
}}
</Field>
<Button variant="primary" type={"submit"} fontSize="12px" fontWeight="700" mt={5}>
Réinitialiser le mot de passe
</Button>
<Box mb={5}>{status.error && <Text color="#cd201f">{status.error}</Text>}</Box>
</Form>
);
}}
</Formik>
</Box>
</Container>
</Box>
);
};
export default ResetPasswordPage;
|
<gh_stars>1-10
import csv
import glob
import time
from datetime import datetime, timedelta
from os import system, name
from time import sleep
import pandas as pd
import requests
import requests_cache
# Variables
api_key = ''
base_api = 'https://cardano-mainnet.blockfrost.io/api/v0/'
api_counter = 0
cache_counter = 0
shelley_start_epoch = 208
shelley_start_datetime = datetime(2020, 7, 29, 21, 44, 51)
wallet_files = glob.glob('wallets/*.wallet')
wallet_counter = len(wallet_files)
calculated_wallet_counter = 0
address_counter = 0
calculated_address_counter = 0
start_time = time.time()
request_time = time.time()
stake_key = None
cache_all = False
# HTTP header
headers = {
'project_id': api_key
}
# CSV data
csv_header = ['transactionType', 'date', 'inBuyAmount', 'inBuyAsset', 'outSellAmount', 'outSellAsset',
'feeAmount (optional)', 'feeAsset (optional)', 'classification (optional)', 'operationId (optional)']
# Function to clear the terminal output
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
# Function to request the api with simple builtin retry
def request_api(url):
global request_time
retries = 0
response_code = None
while response_code != 200 and retries < 20:
if retries > 0:
sleep(retries * 5)
print('Response code was: ' + str(response_code) + ' -> Retrying ' + str(retries) + '...')
response = requests.get(url, headers=headers)
response_code = response.status_code
check_cached(response)
request_time = time.time()
retries += 1
check_content(response)
return response
# Function to check if the response was cached; needed for limiting api requests
def check_cached(response):
global cache_counter
global api_counter
global request_time
elapsed_time = time.time() - start_time
elapsed_since_request = time.time() - request_time
if not getattr(response, 'from_cache', False):
api_counter += 1
if elapsed_time > 5 and elapsed_since_request < 0.1:
sleep(0.1 - elapsed_since_request)
else:
cache_counter += 1
# Check if the received response content type is json
def check_content(response):
if 'json' not in response.headers.get('Content-Type'):
print('The content type of the received data is not json but ' + response.headers)
exit(1)
# Add row to array if it not exists
def add_row(data):
if data not in csv_data:
csv_data.append(data)
# Aggregate the utxos of a transaction into one
def aggregate_utxos(data):
aggregated_utxos = []
aggregated_data = []
seen_deposit = []
seen_withdraw = []
for d in data:
tx_type = d[0]
tx_time = d[1]
tx_id = d[9]
utxos = list(filter(lambda x: x[9] == tx_id and x[0] == tx_type, data))
aggregate = d
for utxo in utxos:
if tx_type == 'deposit' and utxo != d and tx_time == utxo[1] and utxo[9] not in seen_deposit:
aggregate[2] += utxo[2]
elif tx_type == 'withdraw' and utxo != d and tx_time == utxo[1] and utxo[9] not in seen_withdraw:
aggregate[4] += utxo[4]
if tx_type == 'deposit':
seen_deposit.append(tx_id)
if tx_type == 'withdraw':
seen_withdraw.append(tx_id)
if not list(filter(lambda x: x[9] == aggregate[9] and x[0] == aggregate[0] and x[1] == aggregate[1],
aggregated_utxos)):
aggregated_utxos.append(aggregate)
# Build transaction fees and deposits
transactions = list(filter(lambda x: x[9] != '', aggregated_utxos))
rewards = list(filter(lambda x: x[9] == '', aggregated_utxos))
seen_transactions = []
for transaction in transactions:
tx_pair = list(filter(lambda x: x[9] == transaction[9], transactions))
tx_pair = sorted(tx_pair, key=lambda x: x[0], reverse=True)
result = None
if transaction[9] not in seen_transactions and len(tx_pair) > 1:
result = tx_pair[0][4] - tx_pair[1][2] - transaction[6]
if result is not None:
date = transaction[1]
seen_transactions.append(transaction[9])
fee = transaction[6]
fee_asset = transaction[7]
classification = ''
operation_id = transaction[9]
if result < 0.000001:
result = fee
fee = ''
fee_asset = ''
classification = 'fee'
final_tx = ['withdraw', date, '', '', round(result, 5), 'ADA', fee, fee_asset, classification, operation_id]
add_row(final_tx)
aggregated_data.append(final_tx)
elif transaction[9] not in seen_transactions:
seen_transactions.append(transaction[9])
aggregated_data.append(tx_pair[0])
return sorted(aggregated_data + rewards, key=lambda x: datetime.strptime(x[1], '%m/%d/%Y %H:%M:%S'))
# Write csv data into file
def write_data():
print('-- Sort calculated data')
sorted_data = sorted(csv_data, key=lambda x: datetime.strptime(x[1], '%m/%d/%Y %H:%M:%S'))
print('-- Aggregate UTXOs')
aggregated_data = aggregate_utxos(sorted_data)
print('-- Write data on drive')
with open(filename, mode='w') as transactions_file:
transactions_writer = csv.writer(transactions_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
transactions_writer.writerow(csv_header)
for d in aggregated_data:
transactions_writer.writerow(d)
def convert_csv_to_xlsx():
csv_files = glob.glob('wallets/*.csv')
for csv_file in csv_files:
out = csv_file.split('.')[0] + '.xlsx'
df = pd.read_csv(csv_file)
df.to_excel(out, index=False)
# Start of the script itself
requests_cache.install_cache(expire_after=None)
for wallet in wallet_files:
calculated_wallet_counter += 1
print('Calculating wallet ' + str(calculated_wallet_counter) + ' of ' + str(wallet_counter))
print('-- Reading wallet ' + wallet)
csv_data = []
stake_keys_calculated = set()
stake_key = None
filename = wallet.split('.')[0] + '.csv'
wallet_file = open(wallet, 'r')
addresses = wallet_file.readlines()
for i in range(0, len(addresses)):
addresses[i] = addresses[i].strip()
if len(addresses) == 1 and addresses[0].startswith('stake1u'):
print('---- Stake key detected ' + addresses[0])
print('------ Get addresses for ' + addresses[0])
page = 1
new_results = True
stake_key = addresses[0]
addresses = []
while new_results:
addresses_r = request_api(base_api + 'accounts/' + stake_key + '/addresses' + '?page=' + str(page))
new_results = addresses_r.json()
page += 1
for address in addresses_r.json():
addresses.append(address['address'])
print('-------- Address found ' + address['address'])
address_counter += len(addresses)
# Wallet Transaction History
for address in addresses:
global elapsed_time
calculated_address_counter += 1
clear()
elapsed_time = time.time() - start_time
print('Calculating wallet ' + str(calculated_wallet_counter) + ' of ' + str(wallet_counter) + ' - Elapsed Time: ' + str(round(elapsed_time, 2)))
print('Calculating address ' + str(calculated_address_counter) + ' of ' + str(address_counter))
# Address request
addr_r = request_api(base_api + 'addresses/' + address)
if stake_key is None:
stake_key = addr_r.json()['stake_address']
# Reward History
print('-- Get reward history')
reward_history_r = None
reward_history = []
if stake_key is not None:
if stake_key not in stake_keys_calculated:
print('---- for stake key ' + stake_key)
with requests_cache.disabled():
page = 1
new_results = True
while new_results:
reward_history_r = request_api(base_api + 'accounts/' + stake_key + '/rewards' + '?page=' + str(page))
new_results = reward_history_r.json()
reward_history.append(reward_history_r.json())
page += 1
reward_history = [item for sublist in reward_history for item in sublist]
for reward in reward_history:
datetime_delta = (reward['epoch'] - shelley_start_epoch) * 5
reward_time = shelley_start_datetime + timedelta(days=datetime_delta) + timedelta(days=10)
amount = int(reward['amount']) / 1000000
deposit = ['deposit', reward_time.strftime('%m/%d/%Y %H:%M:%S'), amount, 'ADA', '', '', '', '',
'staked', '']
add_row(deposit)
else:
print('---- skipping rewards already calculated for ' + stake_key)
else:
print('---- no stake key found for address: ' + address)
stake_keys_calculated.add(stake_key)
# Get all transactions for a specific address
print('-- Get all transactions for ' + address)
addr_txs = []
with requests_cache.disabled():
page = 1
new_results = True
while new_results:
addr_txs_r = request_api(base_api + 'addresses/' + address + '/txs' + '?page=' + str(page))
new_results = addr_txs_r.json()
addr_txs.append(addr_txs_r.json())
page += 1
addr_txs = [item for sublist in addr_txs for item in sublist]
# Get detailed transaction information
print('-- Get detailed transaction information')
txs_details = []
for tx in addr_txs:
print('---- for transaction ' + tx)
tx_details_r = request_api(base_api + 'txs/' + tx)
txs_details.append([tx, tx_details_r.json()])
# Get blocks for transactions
print('-- Get blocks for transactions')
blocks_for_txs = []
for tx in txs_details:
print('---- for transaction ' + tx[0])
blocks_for_txs.append(tx[1]['block'])
# Get block details
print("-- Get details")
block_details = []
for block in blocks_for_txs:
print('---- for block ' + block)
block_details_r = request_api(base_api + 'blocks/' + block)
block_details.append(block_details_r.json())
# Get time for blocks
print('-- Get time for blocks')
block_times = []
for block in block_details:
print('---- for block ' + block['hash'])
block_time = block['time']
block_times.append(block_time)
# Combine tx with time
print('-- Combine tx with time')
tx_with_time = []
i = 0
for tx in txs_details:
print('---- for transaction ' + tx[0])
tx_with_time.append([tx[0], tx[1], block_times[i]])
i += 1
# Get UTXOs for all transactions
print('-- Get transaction UTXOs')
txs_utxos = []
for tx in tx_with_time:
print('---- for transaction ' + tx[0])
tx_utxo_r = request_api(base_api + 'txs/' + tx[0] + '/utxos')
txs_utxos.append([tx_utxo_r.json(), tx])
# Filter inputs and outputs
print('-- Filter inputs and outputs')
inputs = []
outputs = []
reward_withdrawals = []
for tx in txs_utxos:
ins = tx[0]['inputs']
outs = tx[0]['outputs']
if int(tx[1][1]['withdrawal_count']) > 0:
tx_withdrawal_r = request_api(base_api + 'txs/' + tx[1][0] + '/withdrawals')
if stake_key == tx_withdrawal_r.json()[0]['address'] \
and [tx, tx_withdrawal_r.json()] not in reward_withdrawals:
reward_withdrawals.append([tx[0], tx[1], tx_withdrawal_r.json()])
for i in ins:
if i['address'] in addresses:
inputs.append([i, tx[1]])
for o in outs:
if o['address'] in addresses:
outputs.append([o, tx[1]])
# Collect inputs
print('-- Calculate withdrawals')
for addr in addresses:
for i in inputs:
if addr in i[0]['address']:
tx_time = datetime.utcfromtimestamp(i[1][2]).strftime('%m/%d/%Y %H:%M:%S')
amount = i[0]['amount']
quantity = int(amount[0]['quantity']) / 1000000
fee = int(i[1][1]['fees']) / 1000000
tx_hash = i[1][0]
withdraw = ['withdraw', tx_time, '', '', quantity, 'ADA', fee, 'ADA', '', tx_hash]
add_row(withdraw)
# Collect outputs
print('-- Calculate deposits')
for addr in addresses:
for o in outputs:
if addr in o[0]['address']:
tx_time = datetime.utcfromtimestamp(o[1][2]).strftime('%m/%d/%Y %H:%M:%S')
amount = o[0]['amount']
quantity = int(amount[0]['quantity']) / 1000000
tx_hash = o[1][0]
deposit = ['deposit', tx_time, quantity, 'ADA', '', '', '', '', '', tx_hash]
add_row(deposit)
# Collect reward withdrawals
print('-- Calculate reward withdrawals')
for reward_withdrawal in reward_withdrawals:
tx_time = datetime.utcfromtimestamp(reward_withdrawal[1][2]).strftime('%m/%d/%Y %H:%M:%S')
amount = reward_withdrawal[2][0]['amount']
tx_hash = reward_withdrawal[1][0]
withdraw = ['withdraw', tx_time, '', '', int(amount) / 1000000, 'ADA', '', '', '', tx_hash]
add_row(withdraw)
write_data()
convert_csv_to_xlsx()
end_time = time.time()
elapsed_time = end_time - start_time
print('\nTransaction history created successfully in ' + str(round(elapsed_time, 4)) + 's using ' + str(cache_counter) +
' cached calls and ' + str(api_counter) + ' API calls for ' + str(len(wallet_files)) + ' wallet/s with ' +
str(address_counter) + ' address/es.')
|
<filename>node_modules/react-icons-kit/noto_emoji_regular/u1F4C9.js<gh_stars>1-10
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.u1F4C9 = void 0;
var u1F4C9 = {
"viewBox": "0 0 2600 2760.837",
"children": [{
"name": "path",
"attribs": {
"d": "M2367 2413q20 0 35 14.5t15 35.5q0 20-15 34.5t-35 14.5H232q-21 0-35.5-14.5T182 2463V340q0-20 14.5-34.5T232 291q20 0 34.5 14.5T281 340v62l290 289 191-146q17-14 41-14 23 0 39.5 12t24.5 32l509 1297 217-404q8-17 22.5-26.5t34.5-9.5q46 0 67 40l215 464 408 342q25 20 25 53 0 28-21 48.5t-49 20.5q-22 0-44-17l-418-351q-13-9-19-24l-164-354-224 414q-9 17-24 26.5t-33 9.5q-50 0-67-43L771 710 607 836q-18 15-43 15-28 0-47-20L281 595v1818h2086z"
},
"children": []
}]
};
exports.u1F4C9 = u1F4C9; |
import java.awt.*;
import javax.swing.*;
import java.awt.event.*;
import javax.swing.border.Border;
public class SortGUI extends JFrame {
JPanel mainPanel;
JTextField[] inputNumbers;
JButton sortButton;
JLabel resultLabel;
public SortGUI() {
setTitle("Sort Numbers");
setSize(450, 200);
setDefaultCloseOperation(EXIT_ON_CLOSE);
setLayout(new FlowLayout());
mainPanel = new JPanel(new GridLayout(2,1));
add(mainPanel);
JPanel inputsPanel = new JPanel(new FlowLayout());
inputsPanel.setBorder(BorderFactory.createTitledBorder("Inputs"));
inputNumbers = new JTextField[2];
for (int i = 0; i < inputNumbers.length; i++) {
inputNumbers[i] = new JTextField(5);
inputsPanel.add(inputNumbers[i]);
}
mainPanel.add(inputsPanel);
sortButton = new JButton("Sort");
sortButton.addActionListener(new SortActionListener());
mainPanel.add(sortButton);
resultLabel = new JLabel("");
add(resultLabel);
setVisible(true);
}
class SortActionListener implements ActionListener {
@Override
public void actionPerformed(ActionEvent e) {
int num1 = Integer.parseInt(inputNumbers[0].getText());
int num2 = Integer.parseInt(inputNumbers[1].getText());
if (num1 <= num2)
resultLabel.setText("Sorted: " + num1 +", "+ num2);
else
resultLabel.setText("Sorted: " + num2 +", "+ num1);
}
}
public static void main(String[] args) {
new SortGUI();
}
} |
declare const _default: {
rpc: {};
types: {
OrmlAccountData: {
free: string;
frozen: string;
reserved: string;
};
OrmlBalanceLock: {
amount: string;
id: string;
};
};
typesAlias: {
tokens: {
AccountData: string;
BalanceLock: string;
};
};
};
export default _default;
|
package kubeagg
import "sync"
// New return Contexts object
func New() *Contexts {
contexts := Contexts{}
contexts.WaitGroup = sync.WaitGroup{}
return &contexts
}
// Run package entrypoint
func Run() {
// Get reference to Contexts object
c := New()
// Populate contexts
c.GetContexts()
// Handle namespaced and non namespaced object differently
if contains(GetNonNamespacedObjects(), getConfigVar.ObjectType) {
c.PopulateNonNamespacedObjectsAsync()
} else {
c.GetNamespaces()
c.PopulateNamespacedObjectsAsync()
}
// Output in provided format
c.Output(globalConfigVar.Output)
}
|
<filename>src/main/java/utils/AlgorithmComplexity.java<gh_stars>0
package utils;
public enum AlgorithmComplexity {
LINEAR, QUADRATIC, CUBIC, LOG_N, N_LOG_N
}
|
/*
4- Ler dois valores inteiros negativos ( consistir se é negativo), multiplicar e mostrar o resultado
*/
#include <iostream>
using namespace std;
int main() {
int n1=0, n2=0, i=10;
cout <<"primeiro numero inteiro positivo ";
cin >> n1;
cout <<"segundo numero inteiro positivo ";
cin >> n2;
if(n1<0 || n2<0){
while(n1<0) {
cout <<"primeiro numero inteiro positivo ";
cin >> n1;
}
while(n2<0) {
cout <<"segundo numero inteiro positivo ";
cin >> n2;
}
}
cout <<"\n Soma " <<n1+n2;
}
|
#! /bin/bash -e
docker login -u ${DOCKER_USER_ID?} -p ${DOCKER_PASSWORD?}
./gradlew publishEventuateArtifacts
./gradlew publishEventuateDockerImages
|
<gh_stars>0
/**
* Copyright (C) 2010-2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.googlecode.flyway.ant;
import com.googlecode.flyway.core.Flyway;
import com.googlecode.flyway.core.util.StringUtils;
import com.googlecode.flyway.core.validation.ValidationErrorMode;
import java.util.ArrayList;
import java.util.List;
/**
* Base class for tasks that rely on loading migrations from the classpath.
*/
@SuppressWarnings({"UnusedDeclaration"})
public abstract class AbstractMigrationLoadingTask extends AbstractFlywayTask {
/**
* Locations on the classpath to scan recursively for migrations. Locations may contain both sql
* and java-based migrations. (default: db.migration)<br/>Also configurable with Ant Property: ${flyway.locations}
*/
private String[] locations;
/**
* The base package where the Java migrations are located. (default: db.migration)<br/>Also configurable with Ant Property: ${flyway.basePackage}
*/
private String basePackage;
/**
* The base directory on the classpath where the Sql migrations are located. (default: db/migration)<br/>Also configurable with Ant Property: ${flyway.baseDir}
*/
private String baseDir;
/**
* The encoding of Sql migrations. (default: UTF-8)<br/>Also configurable with Ant Property: ${flyway.encoding}
*/
private String encoding;
/**
* The file name prefix for Sql migrations (default: V)<br/>Also configurable with Ant Property: ${flyway.sqlMigrationPrefix}
*/
private String sqlMigrationPrefix;
/**
* The file name suffix for Sql migrations (default: .sql)<br/>Also configurable with Ant Property: ${flyway.sqlMigrationSuffix}
*/
private String sqlMigrationSuffix;
/**
* The action to take when validation fails.<br/> <br/> Possible values are:<br/> <br/> <b>FAIL</b> (default)<br/>
* Throw an exception and fail.<br/> <br/> <b>CLEAN (Warning ! Do not use in produktion !)</b><br/> Cleans the
* database.<br/> <br/> This is exclusively intended as a convenience for development. Even tough we strongly
* recommend not to change migration scripts once they have been checked into SCM and run, this provides a way of
* dealing with this case in a smooth manner. The database will be wiped clean automatically, ensuring that the next
* migration will bring you back to the state checked into SCM.<br/> <br/> This property has no effect when
* <i>validationMode</i> is set to <i>NONE</i>.<br/> <br/>Also configurable with Ant Property: ${flyway.validationErrorMode}
*/
private String validationErrorMode;
/**
* Do not use. For Ant itself.
*
* @param locations The locations on the classpath.
*/
public void addConfiguredLocations(Locations locations) {
this.locations = locations.locations.toArray(new String[locations.locations.size()]);
}
/**
* @param basePackage The base package where the Java migrations are located. (default: db.migration)<br/>Also configurable with Ant Property: ${flyway.basePackage}
* @deprecated Use locations instead. Will be removed in Flyway 2.0.
*/
@Deprecated
public void setBasePackage(String basePackage) {
this.basePackage = basePackage;
}
/**
* @param baseDir The base directory on the classpath where the Sql migrations are located. (default: db/migration)<br/>Also configurable with Ant Property: ${flyway.baseDir}
* @deprecated Use locations instead. Will be removed in Flyway 2.0.
*/
@Deprecated
public void setBaseDir(String baseDir) {
this.baseDir = baseDir;
}
/**
* @param encoding The encoding of Sql migrations. (default: UTF-8)<br/>Also configurable with Ant Property: ${flyway.encoding}
*/
public void setEncoding(String encoding) {
this.encoding = encoding;
}
/**
* @param sqlMigrationPrefix The file name prefix for Sql migrations (default: V)<br/>Also configurable with Ant Property: ${flyway.sqlMigrationPrefix}
*/
public void setSqlMigrationPrefix(String sqlMigrationPrefix) {
this.sqlMigrationPrefix = sqlMigrationPrefix;
}
/**
* @param sqlMigrationSuffix The file name suffix for Sql migrations (default: .sql)<br/>Also configurable with Ant Property: ${flyway.sqlMigrationSuffix}
*/
public void setSqlMigrationSuffix(String sqlMigrationSuffix) {
this.sqlMigrationSuffix = sqlMigrationSuffix;
}
/**
* @param validationErrorMode The action to take when validation fails.<br/> <br/> Possible values are:<br/> <br/> <b>FAIL</b> (default)<br/>
* Throw an exception and fail.<br/> <br/> <b>CLEAN (Warning ! Do not use in produktion !)</b><br/> Cleans the
* database.<br/> <br/> This is exclusively intended as a convenience for development. Even tough we strongly
* recommend not to change migration scripts once they have been checked into SCM and run, this provides a way of
* dealing with this case in a smooth manner. The database will be wiped clean automatically, ensuring that the next
* migration will bring you back to the state checked into SCM.<br/> <br/> This property has no effect when
* <i>validationMode</i> is set to <i>NONE</i>.<br/> <br/>Also configurable with Ant Property: ${flyway.validationErrorMode}
*/
public void setValidationErrorMode(String validationErrorMode) {
this.validationErrorMode = validationErrorMode;
}
@Override
protected void doExecute(Flyway flyway) throws Exception {
String locationsProperty = getProject().getProperty("flyway.locations");
if (locationsProperty != null) {
flyway.setLocations(StringUtils.tokenizeToStringArray(locationsProperty, ","));
} else if (locations != null) {
flyway.setLocations(locations);
}
String basePackageValue = useValueIfPropertyNotSet(basePackage, "basePackage");
if (basePackageValue != null) {
flyway.setBasePackage(basePackageValue);
}
String baseDirValue = useValueIfPropertyNotSet(baseDir, "baseDir");
if (baseDirValue != null) {
flyway.setBaseDir(baseDirValue);
}
String encodingValue = useValueIfPropertyNotSet(encoding, "encoding");
if (encodingValue != null) {
flyway.setEncoding(encodingValue);
}
String sqlMigrationPrefixValue = useValueIfPropertyNotSet(sqlMigrationPrefix, "sqlMigrationPrefix");
if (sqlMigrationPrefixValue != null) {
flyway.setSqlMigrationPrefix(sqlMigrationPrefixValue);
}
String sqlMigrationSuffixValue = useValueIfPropertyNotSet(sqlMigrationSuffix, "sqlMigrationSuffix");
if (sqlMigrationSuffixValue != null) {
flyway.setSqlMigrationSuffix(sqlMigrationSuffixValue);
}
String validationErrorModeValue = useValueIfPropertyNotSet(validationErrorMode, "validationErrorMode");
if (validationErrorModeValue != null) {
flyway.setValidationErrorMode(ValidationErrorMode.valueOf(validationErrorModeValue.toUpperCase()));
}
}
/**
* The nested <locations> element of the task. Contains 1 or more <location> sub-elements.
*/
public static class Locations {
/**
* The classpath locations.
*/
List<String> locations = new ArrayList<String>();
/**
* Do not use. For Ant itself.
*
* @param location A location on the classpath.
*/
public void addConfiguredLocation(LocationElement location) {
locations.add(location.path);
}
}
/**
* One <location> sub-element within the <locations> element.
*/
public static class LocationElement {
/**
* The path of the location.
*/
private String path;
/**
* Do not use. For Ant itself.
*
* @param path The path of the location.
*/
public void setPath(String path) {
this.path = path;
}
}
}
|
#!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# hack script for running a cluster-api-provider-gcp e2e
set -o errexit -o nounset -o pipefail
GOOGLE_APPLICATION_CREDENTIALS=${GOOGLE_APPLICATION_CREDENTIALS:-""}
GCP_PROJECT=${GCP_PROJECT:-""}
GCP_REGION=${GCP_REGION:-"us-east4"}
CLUSTER_NAME=${CLUSTER_NAME:-"test1"}
NETWORK_NAME=${NETWORK_NAME:-"${CLUSTER_NAME}-mynetwork"}
KUBERNETES_VERSION=${KUBERNETES_VERSION:-"v1.16.2"}
TIMESTAMP=$(date +"%Y-%m-%dT%H:%M:%SZ")
ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}"
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
# dump logs from kind and all the nodes
dump-logs() {
# log version information
echo "=== versions ==="
echo "kind : $(kind version)" || true
echo "bootstrap cluster:"
kubectl --context=kind-clusterapi version || true
echo "deployed cluster:"
kubectl --kubeconfig=${PWD}/kubeconfig version || true
echo ""
# dump all the info from the CAPI related CRDs
kubectl --context=kind-clusterapi get \
clusters,gcpclusters,machines,gcpmachines,kubeadmconfigs,machinedeployments,gcpmachinetemplates,kubeadmconfigtemplates,machinesets \
--all-namespaces -o yaml >> "${ARTIFACTS}/logs/capg.info" || true
# dump images info
echo "images in docker" >> "${ARTIFACTS}/logs/images.info"
docker images >> "${ARTIFACTS}/logs/images.info"
echo "images from bootstrap using containerd CLI" >> "${ARTIFACTS}/logs/images.info"
docker exec clusterapi-control-plane ctr -n k8s.io images list >> "${ARTIFACTS}/logs/images.info" || true
echo "images in bootstrap cluster using kubectl CLI" >> "${ARTIFACTS}/logs/images.info"
(kubectl --context=kind-clusterapi get pods --all-namespaces -o json \
| jq --raw-output '.items[].spec.containers[].image' | sort) >> "${ARTIFACTS}/logs/images.info" || true
echo "images in deployed cluster using kubectl CLI" >> "${ARTIFACTS}/logs/images.info"
(kubectl --kubeconfig=${PWD}/kubeconfig get pods --all-namespaces -o json \
| jq --raw-output '.items[].spec.containers[].image' | sort) >> "${ARTIFACTS}/logs/images.info" || true
# dump cluster info for kind
kubectl --context=kind-clusterapi cluster-info dump > "${ARTIFACTS}/logs/kind-cluster.info" || true
# dump cluster info for kind
echo "=== gcloud compute instances list ===" >> "${ARTIFACTS}/logs/capg-cluster.info" || true
gcloud compute instances list --project "${GCP_PROJECT}" >> "${ARTIFACTS}/logs/capg-cluster.info" || true
echo "=== cluster-info dump ===" >> "${ARTIFACTS}/logs/capg-cluster.info" || true
kubectl --kubeconfig=${PWD}/kubeconfig cluster-info dump >> "${ARTIFACTS}/logs/capg-cluster.info" || true
# export all logs from kind
kind "export" logs --name="clusterapi" "${ARTIFACTS}/logs" || true
for node_name in $(gcloud compute instances list --project "${GCP_PROJECT}" --format='value(name)')
do
node_zone=$(gcloud compute instances list --project "${GCP_PROJECT}" --filter="name:(${node_name})" --format='value(zone)')
echo "collecting logs from ${node_name} in zone ${node_zone}"
dir="${ARTIFACTS}/logs/${node_name}"
mkdir -p ${dir}
gcloud compute instances get-serial-port-output --project "${GCP_PROJECT}" \
--zone "${node_zone}" --port 1 "${node_name}" > "${dir}/serial-1.log" || true
ssh-to-node "${node_name}" "${node_zone}" "sudo chmod -R a+r /var/log" || true
gcloud compute scp --recurse --project "${GCP_PROJECT}" --zone "${node_zone}" \
"${node_name}:/var/log/cloud-init.log" "${node_name}:/var/log/cloud-init-output.log" \
"${node_name}:/var/log/pods" "${node_name}:/var/log/containers" \
"${dir}" || true
ssh-to-node "${node_name}" "${node_zone}" "sudo journalctl --output=short-precise -k" > "${dir}/kern.log" || true
ssh-to-node "${node_name}" "${node_zone}" "sudo journalctl --output=short-precise" > "${dir}/systemd.log" || true
ssh-to-node "${node_name}" "${node_zone}" "sudo crictl version && sudo crictl info" > "${dir}/containerd.info" || true
ssh-to-node "${node_name}" "${node_zone}" "sudo journalctl --no-pager -u kubelet.service" > "${dir}/kubelet.log" || true
ssh-to-node "${node_name}" "${node_zone}" "sudo journalctl --no-pager -u containerd.service" > "${dir}/containerd.log" || true
done
gcloud logging read --order=asc \
--format='table(timestamp,jsonPayload.resource.name,jsonPayload.event_subtype)' \
--project "${GCP_PROJECT}" \
"timestamp >= \"${TIMESTAMP}\"" \
> "${ARTIFACTS}/logs/activity.log" || true
}
# cleanup all resources we use
cleanup() {
# KIND_IS_UP is true once we: kind create
if [[ "${KIND_IS_UP:-}" = true ]]; then
timeout 60 kubectl \
--context=kind-clusterapi \
delete cluster test1 || true
timeout 60 kubectl \
--context=kind-clusterapi \
wait --for=delete cluster/test1 || true
make kind-reset || true
fi
# clean up e2e.test symlink
(cd "$(go env GOPATH)/src/k8s.io/kubernetes" && rm -f _output/bin/e2e.test) || true
# Force a cleanup of cluster api created resources using gcloud commands
gcloud compute forwarding-rules delete --project $GCP_PROJECT --global $CLUSTER_NAME-apiserver --quiet || true
gcloud compute target-tcp-proxies delete --project $GCP_PROJECT $CLUSTER_NAME-apiserver --quiet || true
gcloud compute backend-services delete --project $GCP_PROJECT --global $CLUSTER_NAME-apiserver --quiet || true
gcloud compute health-checks delete --project $GCP_PROJECT $CLUSTER_NAME-apiserver --quiet || true
(gcloud compute instances list --project $GCP_PROJECT | grep $CLUSTER_NAME \
| awk '{print "gcloud compute instances delete --project '$GCP_PROJECT' --quiet " $1 " --zone " $2 "\n"}' \
| bash) || true
(gcloud compute instance-groups list --project $GCP_PROJECT | grep $CLUSTER_NAME \
| awk '{print "gcloud compute instance-groups unmanaged delete --project '$GCP_PROJECT' --quiet " $1 " --zone " $2 "\n"}' \
| bash) || true
(gcloud compute firewall-rules list --project $GCP_PROJECT | grep $CLUSTER_NAME \
| awk '{print "gcloud compute firewall-rules delete --project '$GCP_PROJECT' --quiet " $1 "\n"}' \
| bash) || true
# cleanup the networks
gcloud compute routers nats delete "${CLUSTER_NAME}-mynat" --project="${GCP_PROJECT}" \
--router-region="${GCP_REGION}" --router="${CLUSTER_NAME}-myrouter" --quiet || true
gcloud compute routers delete "${CLUSTER_NAME}-myrouter" --project="${GCP_PROJECT}" \
--region="${GCP_REGION}" --quiet || true
if [[ ${NETWORK_NAME} != "default" ]]; then
(gcloud compute firewall-rules list --project $GCP_PROJECT | grep $NETWORK_NAME \
| awk '{print "gcloud compute firewall-rules delete --project '$GCP_PROJECT' --quiet " $1 "\n"}' \
| bash) || true
gcloud compute networks delete --project="${GCP_PROJECT}" \
--quiet "${NETWORK_NAME}" || true
fi
if [[ "${REUSE_OLD_IMAGES:-false}" == "false" ]]; then
(gcloud compute images list --project $GCP_PROJECT \
--no-standard-images --filter="family:capi-ubuntu-1804-k8s-v1-16" --format="table[no-heading](name)" \
| awk '{print "gcloud compute images delete --project '$GCP_PROJECT' --quiet " $1 "\n"}' \
| bash) || true
fi
# remove our tempdir
# NOTE: this needs to be last, or it will prevent kind delete
if [[ -n "${TMP_DIR:-}" ]]; then
rm -rf "${TMP_DIR}" || true
fi
}
# our exit handler (trap)
exit-handler() {
dump-logs
cleanup
}
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node() {
local node="$1"
local zone="$2"
local cmd="$3"
# ensure we have an IP to connect to
gcloud compute --project "${GCP_PROJECT}" instances add-access-config --zone "${zone}" "${node}" || true
# Loop until we can successfully ssh into the box
for try in {1..5}; do
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" \
--project "${GCP_PROJECT}" --zone "${zone}" "${node}" --command "echo test > /dev/null"; then
break
fi
sleep 5
done
# Then actually try the command.
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" \
--project "${GCP_PROJECT}" --zone "${zone}" "${node}" --command "${cmd}"
}
init_image() {
if [[ "${REUSE_OLD_IMAGES:-false}" == "true" ]]; then
image=$(gcloud compute images list --project $GCP_PROJECT \
--no-standard-images --filter="family:capi-ubuntu-1804-k8s-v1-16" --format="table[no-heading](name)")
if [[ ! -z "$image" ]]; then
return
fi
fi
if ! command -v ansible &> /dev/null; then
if [[ $EUID -ne 0 ]]; then
echo "Please install ansible and try again."
exit 1
else
# we need pip to install ansible
curl -L https://bootstrap.pypa.io/get-pip.py -o get-pip.py
python get-pip.py --user
rm -f get-pip.py
# install ansible needed by packer
version="2.8.5"
python -m pip install "ansible==${version}"
fi
fi
if ! command -v packer &> /dev/null; then
hostos=$(go env GOHOSTOS)
hostarch=$(go env GOHOSTARCH)
version="1.4.3"
url="https://releases.hashicorp.com/packer/${version}/packer_${version}_${hostos}_${hostarch}.zip"
echo "Downloading packer from $url"
wget --quiet -O packer.zip $url && \
unzip packer.zip && \
rm packer.zip && \
ln -s $PWD/packer /usr/local/bin/packer
fi
(cd "$(go env GOPATH)/src/sigs.k8s.io/image-builder/images/capi" && \
sed -i 's/1\.15\.4/1.16.2/' packer/config/kubernetes.json && \
sed -i 's/1\.15/1.16/' packer/config/kubernetes.json)
if [[ $EUID -ne 0 ]]; then
(cd "$(go env GOPATH)/src/sigs.k8s.io/image-builder/images/capi" && \
GCP_PROJECT_ID=$GCP_PROJECT GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS \
make build-gce-default)
else
# assume we are running in the CI environment as root
# Add a user for ansible to work properly
groupadd -r packer && useradd -m -s /bin/bash -r -g packer packer
# use the packer user to run the build
su - packer -c "bash -c 'cd /home/prow/go/src/sigs.k8s.io/image-builder/images/capi && GCP_PROJECT_ID=$GCP_PROJECT GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS make build-gce-default'"
fi
}
# build kubernetes / node image, e2e binaries
build() {
# possibly enable bazel build caching before building kubernetes
if [[ "${BAZEL_REMOTE_CACHE_ENABLED:-false}" == "true" ]]; then
create_bazel_cache_rcs.sh || true
fi
pushd "$(go env GOPATH)/src/k8s.io/kubernetes"
# make sure we have e2e requirements
bazel build //cmd/kubectl //test/e2e:e2e.test //vendor/github.com/onsi/ginkgo/ginkgo
# ensure the e2e script will find our binaries ...
mkdir -p "${PWD}/_output/bin/"
cp "${PWD}/bazel-bin/test/e2e/e2e.test" "${PWD}/_output/bin/e2e.test"
PATH="$(dirname "$(find "${PWD}/bazel-bin/" -name kubectl -type f)"):${PATH}"
export PATH
# attempt to release some memory after building
sync || true
echo 1 > /proc/sys/vm/drop_caches || true
popd
}
# generate manifests needed for creating the GCP cluster to run the tests
generate_manifests() {
if ! command -v kustomize >/dev/null 2>&1; then
(cd ./hack/tools/ && GO111MODULE=on go install sigs.k8s.io/kustomize/kustomize/v3)
fi
PULL_POLICY=Never GCP_PROJECT=$GCP_PROJECT \
make modules docker-build
GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS \
GCP_REGION=$GCP_REGION \
GCP_PROJECT=$GCP_PROJECT \
CLUSTER_NAME=$CLUSTER_NAME \
NETWORK_NAME=$NETWORK_NAME \
KUBERNETES_VERSION=$KUBERNETES_VERSION \
make generate-examples
}
# fix manifests to use k/k from CI
fix_manifests() {
# TODO: revert to https://dl.k8s.io/ci/latest-green.txt once https://github.com/kubernetes/release/issues/897 is fixed.
CI_VERSION=${CI_VERSION:-$(curl -sSL https://dl.k8s.io/ci/k8s-master.txt)}
echo "Overriding Kubernetes version to : ${CI_VERSION}"
sed -i 's|kubernetesVersion: .*|kubernetesVersion: "ci/'${CI_VERSION}'"|' examples/_out/controlplane.yaml
sed -i 's|CI_VERSION=.*|CI_VERSION='$CI_VERSION'|' examples/_out/controlplane.yaml
sed -i 's|CI_VERSION=.*|CI_VERSION='$CI_VERSION'|' examples/_out/machinedeployment.yaml
}
# up a cluster with kind
create_cluster() {
# actually create the cluster
KIND_IS_UP=true
# Load the newly built image into kind and start the cluster
LOAD_IMAGE="gcr.io/${GCP_PROJECT}/cluster-api-gcp-controller-amd64:dev" make create-cluster
# Wait till all machines are running (bail out at 30 mins)
attempt=0
while true; do
kubectl get machines --context=kind-clusterapi
read running total <<< $(kubectl get machines --context=kind-clusterapi \
-o json | jq -r '.items[].status.phase' | awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}') ;
if [[ $total == "5" && $running == "5" ]]; then
return 0
fi
read failed total <<< $(kubectl get machines --context=kind-clusterapi \
-o json | jq -r '.items[].status.phase' | awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}') ;
if [[ ! $failed -eq 0 ]]; then
echo "$failed machines (out of $total) in cluster failed ... bailing out"
exit 1
fi
timestamp=$(date +"[%H:%M:%S]")
if [ $attempt -gt 180 ]; then
echo "cluster did not start in 30 mins ... bailing out!"
exit 1
fi
echo "$timestamp Total machines : $total / Running : $running .. waiting for 10 seconds"
sleep 10
attempt=$((attempt+1))
done
}
# run e2es with kubetest
run_tests() {
# export the KUBECONFIG
KUBECONFIG="${PWD}/kubeconfig"
export KUBECONFIG
# ginkgo regexes
SKIP="${SKIP:-}"
FOCUS="${FOCUS:-"\\[Conformance\\]"}"
# if we set PARALLEL=true, skip serial tests set --ginkgo-parallel
if [[ "${PARALLEL:-false}" == "true" ]]; then
export GINKGO_PARALLEL=y
if [[ -z "${SKIP}" ]]; then
SKIP="\\[Serial\\]"
else
SKIP="\\[Serial\\]|${SKIP}"
fi
fi
# get the number of worker nodes
# TODO(bentheelder): this is kinda gross
NUM_NODES="$(kubectl get nodes --kubeconfig=$KUBECONFIG \
-o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.taints}{"\n"}{end}' \
| grep -cv "node-role.kubernetes.io/master" )"
# wait for all the nodes to be ready
kubectl wait --for=condition=Ready node --kubeconfig=$KUBECONFIG --all || true
# setting this env prevents ginkg e2e from trying to run provider setup
export KUBERNETES_CONFORMANCE_TEST="y"
# run the tests
(cd "$(go env GOPATH)/src/k8s.io/kubernetes" && ./hack/ginkgo-e2e.sh \
'--provider=skeleton' "--num-nodes=${NUM_NODES}" \
"--ginkgo.focus=${FOCUS}" "--ginkgo.skip=${SKIP}" \
"--report-dir=${ARTIFACTS}" '--disable-log-dump=true')
unset KUBECONFIG
unset KUBERNETES_CONFORMANCE_TEST
}
# initialize a router and cloud NAT
init_networks() {
if [[ ${NETWORK_NAME} != "default" ]]; then
gcloud compute networks create --project $GCP_PROJECT ${NETWORK_NAME} --subnet-mode auto --quiet
gcloud compute firewall-rules create ${NETWORK_NAME}-allow-http --project $GCP_PROJECT \
--allow tcp:80 --network ${NETWORK_NAME} --quiet
gcloud compute firewall-rules create ${NETWORK_NAME}-allow-https --project $GCP_PROJECT \
--allow tcp:443 --network ${NETWORK_NAME} --quiet
gcloud compute firewall-rules create ${NETWORK_NAME}-allow-icmp --project $GCP_PROJECT \
--allow icmp --network ${NETWORK_NAME} --priority 65534 --quiet
gcloud compute firewall-rules create ${NETWORK_NAME}-allow-internal --project $GCP_PROJECT \
--allow "tcp:0-65535,udp:0-65535,icmp" --network ${NETWORK_NAME} --priority 65534 --quiet
gcloud compute firewall-rules create ${NETWORK_NAME}-allow-rdp --project $GCP_PROJECT \
--allow "tcp:3389" --network ${NETWORK_NAME} --priority 65534 --quiet
gcloud compute firewall-rules create ${NETWORK_NAME}-allow-ssh --project $GCP_PROJECT \
--allow "tcp:22" --network ${NETWORK_NAME} --priority 65534 --quiet
fi
gcloud compute firewall-rules list --project $GCP_PROJECT
gcloud compute networks list --project="${GCP_PROJECT}"
gcloud compute networks describe ${NETWORK_NAME} --project="${GCP_PROJECT}"
gcloud compute routers create "${CLUSTER_NAME}-myrouter" --project="${GCP_PROJECT}" \
--region="${GCP_REGION}" --network=${NETWORK_NAME}
gcloud compute routers nats create "${CLUSTER_NAME}-mynat" --project="${GCP_PROJECT}" \
--router-region="${GCP_REGION}" --router="${CLUSTER_NAME}-myrouter" \
--nat-all-subnet-ip-ranges --auto-allocate-nat-external-ips
}
# setup kind, build kubernetes, create a cluster, run the e2es
main() {
for arg in "$@"
do
if [[ "$arg" == "--verbose" ]]; then
set -o xtrace
fi
if [[ "$arg" == "--clean" ]]; then
cleanup
return 0
fi
if [[ "$arg" == "--use-ci-artifacts" ]]; then
USE_CI_ARTIFACTS="1"
fi
if [[ "$arg" == "--skip-init-image" ]]; then
SKIP_INIT_IMAGE="1"
fi
done
if [[ -z "$GOOGLE_APPLICATION_CREDENTIALS" ]]; then
cat <<EOF
$GOOGLE_APPLICATION_CREDENTIALS is not set.
Please set this to the path of the service account used to run this script.
EOF
return 2
else
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
if [[ -z "$GCP_PROJECT" ]]; then
GCP_PROJECT=$(cat ${GOOGLE_APPLICATION_CREDENTIALS} | jq -r .project_id)
cat <<EOF
GCP_PROJECT is not set. Using project_id $GCP_PROJECT
EOF
fi
if [[ -z "$GCP_REGION" ]]; then
cat <<EOF
GCP_REGION is not set.
Please specify which the GCP region to use.
EOF
return 2
fi
# create temp dir and setup cleanup
TMP_DIR=$(mktemp -d)
SKIP_CLEANUP=${SKIP_CLEANUP:-""}
if [[ -z "${SKIP_CLEANUP}" ]]; then
trap exit-handler EXIT
fi
# ensure artifacts exists when not in CI
export ARTIFACTS
mkdir -p "${ARTIFACTS}/logs"
source "${REPO_ROOT}/hack/ensure-go.sh"
source "${REPO_ROOT}/hack/ensure-kind.sh"
# now build and run the cluster and tests
init_networks
build
generate_manifests
if [[ ${USE_CI_ARTIFACTS:-""} == "yes" || ${USE_CI_ARTIFACTS:-""} == "1" ]]; then
echo "Fixing manifests to use latest CI artifacts..."
fix_manifests
fi
SKIP_INIT_IMAGE=${SKIP_INIT_IMAGE:-""}
if [[ "${SKIP_INIT_IMAGE}" == "yes" || "${SKIP_INIT_IMAGE}" == "1" ]]; then
echo "Skipping image initialization..."
else
init_image
fi
create_cluster
SKIP_RUN_TESTS=${SKIP_RUN_TESTS:-""}
if [[ -z "${SKIP_RUN_TESTS}" ]]; then
run_tests
fi
}
main "$@"
|
/*
* Merlin
*
* API Guide for accessing Merlin's model management, deployment, and serving functionalities
*
* API version: 0.14.0
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package client
type PredictionJobConfigModel struct {
Type_ string `json:"type,omitempty"`
Uri string `json:"uri,omitempty"`
Result *PredictionJobConfigModelResult `json:"result,omitempty"`
Options map[string]string `json:"options,omitempty"`
}
|
<filename>blockchain/btc/blockcypherclient_test.go
//+build !test
package btc
import (
"testing"
)
func TestGetChainTimeStampAndNonceBlockCypher(t *testing.T) {
var btcClient = BlockCypherClient{}
_, err := btcClient.GetCurrentChainTimeStamp()
if err != nil {
t.Error("Fail to get chain timestamp and nonce")
}
}
func TestGetTimestampAndNonceByBlockHeightBlockCypher(t *testing.T) {
var btcClient = BlockCypherClient{}
timestamp, nonce, err := btcClient.GetTimeStampAndNonceByBlockHeight(2)
t.Log(timestamp, nonce)
if err != nil {
t.Error("Fail to get timestamp and nonce")
}
if timestamp != 1231469744 {
t.Error("Wrong Timestamp")
}
if nonce != 1639830024 {
t.Error("Wrong Nonce")
}
}
func TestGetNonceByTimeStampBlockCypher(t *testing.T) {
var btcClient = BlockCypherClient{}
blockHeight, timestamp, nonce, err := btcClient.GetNonceByTimestamp(1373297940)
t.Log(blockHeight, timestamp, nonce)
if err != nil {
t.Error("Fail to get chain timestamp and nonce", err)
t.Fatal()
}
if blockHeight != 245502 {
t.Error("Wrong Block")
}
if timestamp != int64(1373298838) {
t.Error("Wrong Timestamp")
}
if nonce != int64(3029573794) {
t.Error("Wrong Nonce")
}
}
func TestVerifyNonceByTimeStampBlockCypher(t *testing.T) {
var btcClient = BlockCypherClient{}
isOk, err := btcClient.VerifyNonceWithTimestamp(1373297940, 3029573794)
if err != nil {
t.Error("Fail to get chain timestamp and nonce")
t.FailNow()
}
if !isOk {
t.Error("Fail to verify nonce by timestamp")
}
}
|
import py
import time, gc, thread, os
from pypy.conftest import gettestobjspace, option
from pypy.interpreter.gateway import ObjSpace, W_Root, interp2app_temp
from pypy.module.thread import gil
NORMAL_TIMEOUT = 300.0 # 5 minutes
def waitfor(space, w_condition, delay=1):
adaptivedelay = 0.04
limit = time.time() + delay * NORMAL_TIMEOUT
while time.time() <= limit:
gil.before_external_call()
time.sleep(adaptivedelay)
gil.after_external_call()
gc.collect()
if space.is_true(space.call_function(w_condition)):
return
adaptivedelay *= 1.05
print '*** timed out ***'
def timeout_killer(pid, delay):
def kill():
for x in range(delay * 10):
time.sleep(0.1)
os.kill(pid, 0)
os.kill(pid, 9)
print "process %s killed!" % (pid,)
thread.start_new_thread(kill, ())
class GenericTestThread:
def setup_class(cls):
space = gettestobjspace(usemodules=('thread', 'time', 'signal'))
cls.space = space
if option.runappdirect:
def plain_waitfor(self, condition, delay=1):
adaptivedelay = 0.04
limit = time.time() + NORMAL_TIMEOUT * delay
while time.time() <= limit:
time.sleep(adaptivedelay)
gc.collect()
if condition():
return
adaptivedelay *= 1.05
print '*** timed out ***'
cls.w_waitfor = plain_waitfor
else:
cls.w_waitfor = space.wrap(lambda self, condition, delay=1: waitfor(space, condition, delay))
cls.w_busywait = space.appexec([], """():
import time
return time.sleep
""")
cls.w_timeout_killer = space.wrap(lambda self, *args, **kwargs: timeout_killer(*args, **kwargs))
|
import { IRoomBlock } from '../rooms/context'
import { dsmod } from './get-template'
import { findRoom } from './find-room'
import { cloneRoom } from './clone-room'
export const cloneDsRoomInto = (room1: IRoomBlock, templateRoomUid: string) => {
const templateRoom = findRoom(dsmod, templateRoomUid)
if (!templateRoom) throw new Error(`given template room was not found '${templateRoomUid}'`)
cloneRoom(room1, templateRoom)
}
|
<reponame>youaxa/ara-poc-open<filename>server/src/main/java/com/decathlon/ara/service/dto/setting/SettingOptionDTO.java
package com.decathlon.ara.service.dto.setting;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.experimental.Wither;
@Data
@NoArgsConstructor
@AllArgsConstructor
@Wither
public class SettingOptionDTO {
/**
* Technical value to save in database for the option.
*/
private String value;
/**
* User-visible text value to show in the GUI for the option.
*/
private String label;
}
|
def round_to_two_decimals(number):
return round(number, 2) |
import jwt from "jsonwebtoken";
export function createJwt(data, duration) {
const options = {
issuer: 'ban-appeals-backend'
};
if (duration) {
options.expiresIn = duration;
}
return jwt.sign(data, process.env.JWT_SECRET, options);
}
export function decodeJwt(token) {
return jwt.verify(token, process.env.JWT_SECRET);
}
|
#!/bin/bash
# Temp version
VERSION="XXX"
# Styling variables
red=$"\n\e[1;31m(✖) "
green=$"\n\e[1;32m(✔) "
blue=$"\n\e[1;34m(ℹ) "
end=$"\e[0m\n"
# While running yarn, the registry changes to registry.yarnpkg.com which is a mirror to the public NPM registry
YARN_PUBLIC_REGISTRY_REGEX="^https://registry\.yarnpkg\.com/$"
NPM_PUBLIC_REGISTRY_REGEX="^https://registry\.npmjs\.org/$"
NPM_PUBLIC_REGISTRY="https://registry.npmjs.org"
check_release_scripts_changed() {
if [[ $(git diff --shortstat HEAD..release/master scripts 2> /dev/null | tail -n1) != "" ]] ; then
printf "${red}Build scripts have changed, aborting! Run release command from master.${end}"
return 1
fi
}
setup_remote() {
# Adds the release remote branch by nuking existing if any.
# We add this because we don't want to assume what people call their
# origin or what people call their upstream.
if git remote get-url release; then
printf "${blue}Removing existing release remote branch...${end}"
git remote remove release || return 1
printf "${green}Removed existing release remote branch!${end}"
fi
printf "${blue}Adding release remote branch...${end}"
git remote add release "https://$GITHUB_TOKEN@github.com/box/box-ui-elements.git" || return 1
printf "${green}Release remote branch added!${end}"
}
fetch_and_prune_tags() {
# Fetch from release remote and prune tags
printf "${blue}Fetching release remote and pruning tags...${end}"
git fetch release || return 1
git fetch release --prune 'refs/tags/*:refs/tags/*' || return 1
printf "${green}Fetched and pruned tags!${end}"
}
checkout_branch() {
printf "${blue}Determining dist-tag and branch...${end}"
if [[ "$DIST" == "" ]]; then
printf "${red}Could not determine a dist-tag, it should be either beta, latest, next or another string${end}"
return 1
elif [[ "$BRANCH" == "" ]]; then
printf "${red}Could not determine the branch, it should be a valid branch like master, next or a tag${end}"
if [[ "$HOTFIX" == true ]]; then
printf "${red}For hotfix you must pass in the git tag branch, eg: BRANCH=vX.X.X yarn release:hotfix${end}"
fi
return 1
else
GIT_BRANCH=$BRANCH
if [[ "$HOTFIX" == true ]]; then
printf "${blue}This is a hotfix release from ${BRANCH}...${end}"
git checkout $BRANCH || return 1
elif [[ "$BRANCH" == 'master' ]]; then
printf "${blue}This is a ${DIST} release, resetting hard from master...${end}"
git checkout master || return 1
git reset --hard release/master || return 1
else
printf "${blue}This is a ${DIST} release, resetting hard from ${BRANCH}...${end}"
git checkout -t release/$BRANCH || return 1
fi
printf "${green}${BRANCH} checkout complete and dist-tag=${DIST} determined!${end}"
fi
}
setup() {
# Setup remote git url
setup_remote || return 1
# Fetch and prune
fetch_and_prune_tags || return 1
# Only proceed if release scripts haven't changed
# Master branch should have latest build scripts
printf "${blue}Checking out master...${end}"
git checkout master || return 1
check_release_scripts_changed || return 1
# Checkout the branch from which we want to release
checkout_branch || return 1
# Clean untracked files
printf "${blue}Cleaning untracked files...${end}"
git clean -fd || return 1
printf "${green}Cleaned untracked files!${end}"
# Run install and build locales
printf "${blue}Running setup...${end}"
yarn setup || return 1
printf "${green}Setup done!${end}"
}
lint_and_test() {
# ESLint and Stylelint
printf "${blue}Running linter...${end}"
yarn lint || return 1
printf "${green}Linting done!${end}"
# Flow
printf "${blue}Running flow...${end}"
yarn flow check || return 1
printf "${green}Flow check done!${end}"
# Tests
printf "${blue}Running tests...${end}"
yarn test || return 1
printf "${green}Tests done!${end}"
}
build_assets() {
printf "${blue}Building assets...${end}"
yarn build:npm || return 1
printf "${green}Built assets!${end}"
printf "${blue}Building locales...${end}"
yarn build:i18n || return 1
printf "${green}Built locales!${end}"
}
push_to_npm() {
printf "${blue}Publishing assets to npmjs...${end}"
npm publish --access public --tag "$DIST" || return 1
printf "${green}Published npm using dist-tag=${DISTTAG}!${end}"
}
build_examples() {
printf "${blue}Building styleguide...${end}"
yarn build:prod:examples || return 1
printf "${green}Built styleguide!${end}"
}
push_to_gh_pages() {
printf "${blue}Pushing styleguide to gh-pages...${end}"
if [[ $(git branch | grep -w "gh-pages") != "" ]] ; then
git branch -D gh-pages || return 1
printf "${green}Deleted existing gh-pages branch!${end}"
fi
git checkout -b gh-pages || return 1
rm -rf build
cp -R styleguide/. ./ || return 1
cp examples/gitignore .gitignore || return 1
git rm -rf --cached . || return 1
git add -A || return 1
git commit --no-verify -am "build(examples): v$VERSION" || return 1
git push release gh-pages --force --no-verify || return 1
printf "${blue}Pushed styleguide to gh-pages...${end}"
}
check_untracked_files() {
if [[ $(git status --porcelain 2>/dev/null| grep "^??") != "" ]] ; then
printf "${red}Your branch has untracked files!${end}"
return 1
fi
}
check_uncommitted_files() {
if [[ $(git status --porcelain 2>/dev/null| egrep "^(M| M)") != "" ]] ; then
printf "${red}Your branch has uncommitted files!${end}"
return 1
fi
}
check_uncommitted_files_ignoring_package_json() {
if [[ $(git status --porcelain | sed s/^...//) != "package.json" ]] ; then
printf "${red}Your branch has uncommitted files!${end}"
return 1
fi
}
check_branch_dirty() {
if [[ $(git diff --shortstat 2> /dev/null | tail -n1) != "" ]] ; then
printf "${red}Your branch is dirty!${end}"
return 1
fi
}
check_npm_registry() {
if [[ ! $(npm config get registry) =~ (${YARN_PUBLIC_REGISTRY_REGEX}|${NPM_PUBLIC_REGISTRY_REGEX}) ]] ; then
printf "${red}Not pointing at the right npm registry! Make sure ~/.npmrc points to ${NPM_PUBLIC_REGISTRY}${end}"
return 1
fi
}
check_npm_login() {
if [[ ! $(npm whoami --registry ${NPM_PUBLIC_REGISTRY} 2>/dev/null) ]] ; then
printf "${red}Not logged into npm! Try running npm login${end}"
return 1
fi
}
push_new_release() {
# Check branch being dirty
check_branch_dirty || return 1
# Check uncommitted files
check_uncommitted_files || return 1
# Check untracked files
check_untracked_files || return 1
# Check npm registry is correct
check_npm_registry || return 1
# Check npm login
check_npm_login || return 1
# Setup
if ! setup; then
printf "${red}Failed setup!${end}"
return 1
fi
# Linting and testing
if ! lint_and_test; then
printf "${red}Failed linting and testing!${end}"
return 1
fi
# Build npm assets
if ! build_assets; then
printf "${red}Failed building npm assets!${end}"
return 1
fi
# Check uncommitted files
check_uncommitted_files || return 1
# Check untracked files
check_untracked_files || return 1
# Run the release
if ! HUSKY_SKIP_HOOKS=1 BRANCH=$BRANCH DIST=$DIST yarn semantic-release --no-ci; then
printf "${red}Failed semantic release!${end}"
return 1
fi
# Get the latest version from uncommitted package.json
VERSION=$(./node_modules/@box/frontend/shell/version.sh)
# Make sure the version doesn't match the placeholder
if [[ $VERSION == "0.0.0-semantically-released" ]] ; then
printf "${red}No need to run a release!${end}"
return 0
fi
# package.json should be the only updated and uncommitted file
check_uncommitted_files_ignoring_package_json || return 1
# Check untracked files
check_untracked_files || return 1
# Publish to npm
if ! push_to_npm; then
printf "${red}Failed pushing to npm!${end}"
return 1
fi
# package.json should be the only updated and uncommitted file
check_uncommitted_files_ignoring_package_json || return 1
# Check untracked files
check_untracked_files || return 1
# Build examples
if ! build_examples; then
printf "${red}Failed building styleguide!${end}"
return 1
fi
# Publish gh-pages
if ! push_to_gh_pages; then
printf "${red}Failed pushing styleguide to gh-pages!${end}"
return 1
fi
# Check uncommitted files
check_uncommitted_files || return 1
# Check untracked files
check_untracked_files || return 1
}
|
pkg_name=socat
pkg_origin=core
pkg_version=1.7.4.1
pkg_source=http://www.dest-unreach.org/${pkg_name}/download/${pkg_name}-${pkg_version}.tar.gz
pkg_shasum=0c7e635070af1b9037fd96869fc45eacf9845cb54547681de9d885044538736d
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_description="Multipurpose relay for bidirectional data transfer between two independent data channels"
pkg_upstream_url=http://www.dest-unreach.org/socat/
pkg_license=('GPL-2.0')
pkg_bin_dirs=(bin)
pkg_build_deps=(core/make core/gcc)
pkg_deps=(core/glibc core/readline core/openssl)
#
# TODO(ssd) 2017-06-11: The following is a summary of my attempt to
# get the test suit passing. Most tests pass; however it requires
# substantial setup and there are number of failing tests so I've left
# them disabled.
#
# The tests depend on the underlying kernel supporting the features it
# has been built with. Notably, your kernel must support SCTP for the
# SCTP tests to pass.
#
# The test suite has at least the following dependencies
#
# core/diffutils
# core/iproute2
# core/net-tools (for ifconfig)
# core/which
# core/grep (to ensure we don't get grep from busybox)
# core/coreutils (to ensure we don't get stat from busybox)
# core/busybox (for ping)
#
# The tests currently hang at the following test:
#
# test 320 UDP4MAXCHILDREN: max-children option...
#
# If you comment that test out, there are still 4-5 failing tests that
# would need to be addressed.
#
# do_prepare() {
# if [[ ! -r /sbin/ifconfig ]]; then
# ln -sv "$(pkg_path_for net-tools)/sbin/ifconfig" /sbin/ifconfig
# _clean_ifconfig=true
# fi
# }
#
# do_end() {
# if [[ -n "$_clean_ifconfig" ]]; then
# rm -fv /sbin/ifconfig
# fi
# }
#
# do_check() {
# make test
# }
|
#!/usr/bin/env bash
set -e
proc_number=`python -c 'import multiprocessing; print(multiprocessing.cpu_count())'`
# Run pylint/flake8 on IoT extension
pylint azext_iot/ --ignore=models,service_sdk,device_sdk,custom_sdk,dps_sdk --rcfile=./.pylintrc -j $proc_number
flake8 --statistics --exclude=*_sdk --append-config=./.flake8 azext_iot/
|
var app = getApp();
const util = require('../../utils/util.js');
const user = require('../../utils/user.js');
const api = require('../../config/api.js');
Page({
data: {
heightTop: '',
token: wx.getStorageSync('accesstoken') || '',
info: {
focusNum: '',
footprintNum: '',
collNum: '',
},
showHeaderType: '-1',
brandList: []
},
onLoad: function (options) {
user.login();
if (!wx.getStorageSync("accesstoken")) {
setTimeout(() => {
wx.setStorageSync('accessPath', '39我的-首次进入弹出')
app.sensors.track('registerLoginButtonClick', {
login_source: '39我的-首次进入弹出'
})
this.goLogin()
}, 0);
}
},
onShow: function () {
const {
top,
height
} = app.globalData
this.setData({
heightTop: (top + height),
token: wx.getStorageSync('accesstoken') || '',
})
let newInfo = JSON.parse(JSON.stringify(this.data.info))
if (wx.getStorageSync("accesstoken")) {
this.getMyInfo()
} else {
newInfo.focusNum = 0
newInfo.footprintNum = 0
newInfo.collNum = 0
this.setData({
showHeaderType: '2',
info: newInfo
})
}
this.getBrandList()
},
getMyInfo() {
let params = {
deviceId: "wechat_Mini_Program",
deviceType: 0,
accessToken: wx.getStorageSync("accesstoken"),
userId: wx.getStorageSync("userId"),
}
util.request(api.javaUserHost + "v1.0/app/sys/userGet", params, 'POST')
.then((res) => {
console.log('rs', res)
if (res.code == 0) {
this.setData({
showHeaderType: '1',
info: res.data,
})
}
})
.catch((error) => {
console.log(error);
});
},
getPhoneNum(e) {
app.getPhoneNumber(e).then(res => {
if (res) {
this.setData({
token: wx.getStorageSync('accesstoken') || ''
});
this.getMyInfo();
}
})
},
//去登录
goLogin() {
var url = "/pages/login/login";
wx.navigateTo({
url
});
},
//去分类
goClassificationList() {
var url = "/pages/classificationList/classificationList";
wx.switchTab({
url
});
},
//去入住
goBrandEntry() {
var url = "/pages/brandEntry/brandEntry";
wx.navigateTo({
url
});
},
goMyInformation() {
var url = "/pages/myInformation/myInformation";
wx.navigateTo({
url
});
},
goCallPhone(e) {
wx.makePhoneCall({
phoneNumber: '4000330560'
}).catch((e)=>{
console.log(e)
})
},
goPage(e) {
if (!wx.getStorageSync("accesstoken")) {
// this.goLogin()
return
}
var url = e.currentTarget.dataset.url
wx.navigateTo({
url
});
},
openWeChat() {
if (wx.getStorageSync("accesstoken")) {
app.sensors.track('consultationInitiate', {
click_source: '我的-加盟手册',
brand_name: '',
brand_id: ''
})
let data = {
phone: wx.getStorageSync('userPhone'),
accountId: wx.getStorageSync('accountId'),
imId: wx.getStorageSync('nimId'),
channel: 'xcx'
// brandName
}
util.request(api.javaCustserviceUrl + "custservice/v1.0/huiju/getSaleIm", data, 'POST')
.then((res) => {
console.log(res)
if (res.code == 0) {
var url = '/pages/customerChat/customerChat?sessionId=p2p-' + res.data.imId
wx.navigateTo({
url: url
});
}
})
.catch((error) => { });
}
// else {
// let url = "/pages/login/login"
// wx.navigateTo({ url })
// }
},
getBrandList() {
let params = {
id: wx.getStorageSync("userId"),
}
util.request(api.javaBrandHost + "brand/v1.0/phone/guessYouLike", params, 'POST')
.then((res) => {
if (res.code == 0) {
this.setData({
brandList: res.data
})
}
})
.catch((error) => {
console.log(error);
});
},
goH5ranking(){
// app.sensors.track('consultationInitiate', {
// click_source: '我的-榜单',
// brand_name: '',
// brand_id: ''
// })
let url = "/pages/webViewList/webView/webView?url=" + api.webViewUrl+"rankingList?type=xcx";
wx.navigateTo({
url
});
this.getActivityBrowse('个人中心-banner','榜单')
},
/*埋点H5*/
getActivityBrowse(sourceMsg, nameMsg) {
app.sensors.track('activityBrowse', {
activity_source: sourceMsg,
activity_name: nameMsg
})
},
}) |
#!/bin/bash
### /etc/ssh/sshd_config
###
###
printf "\n\n***********************************************\n\nConfigure /etc/ssh/sshd_config [y/n]: "
if [ "$u_ssh" = "" ]; then
read u_ssh
fi
if [ "$u_ssh" = "y" ]; then
file_ssh001=/etc/ssh/sshd_config
file_ssh002=/etc/ssh/sshd_config.bak
sed -i 's/^#PasswordAuthentication yes/PasswordAuthentication no/' $file_ssh001
sed -i 's/^PasswordAuthentication yes/PasswordAuthentication no/' $file_ssh001
sed -i 's/^#MaxAuthTries 6/MaxAuthTries 2/' $file_ssh001
sed -i 's/^#UseDNS no/UseDNS no/' $file_ssh001
#grep BitWorker
u_bitworker=$(grep -m 1 "### by BitWorker" /etc/ssh/sshd_config)
if [ -f "$file_ssh001" ] && [ "$u_bitworker" != "### by BitWorker" ]; then
# make backup
cp $file_ssh001 $file_ssh002
cat $u_path/files/ssh/sshd_config >> $file_ssh001
# add backup remote client ssh connection
sed -i 's/^Match Host XXX/Match Host '"$u_client_ip"'/' $file_ssh001
fi
cp $u_path/files/ssh/authorized_keys /root/.ssh/
systemctl restart sshd
fi
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.orc.writer;
import com.facebook.presto.common.block.Block;
import com.facebook.presto.orc.ColumnWriterOptions;
import com.facebook.presto.orc.DictionaryCompressionOptimizer.DictionaryColumn;
import com.facebook.presto.orc.DwrfDataEncryptor;
import com.facebook.presto.orc.OrcEncoding;
import com.facebook.presto.orc.checkpoint.StreamCheckpoint;
import com.facebook.presto.orc.metadata.ColumnEncoding;
import com.facebook.presto.orc.metadata.CompressedMetadataWriter;
import com.facebook.presto.orc.metadata.MetadataWriter;
import com.facebook.presto.orc.metadata.RowGroupIndex;
import com.facebook.presto.orc.metadata.Stream;
import com.facebook.presto.orc.metadata.Stream.StreamKind;
import com.facebook.presto.orc.metadata.statistics.ColumnStatistics;
import com.facebook.presto.orc.stream.LongOutputStream;
import com.facebook.presto.orc.stream.PresentOutputStream;
import com.facebook.presto.orc.stream.StreamDataOutput;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.airlift.slice.Slice;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.OptionalInt;
import static com.facebook.presto.common.array.Arrays.ExpansionFactor.MEDIUM;
import static com.facebook.presto.common.array.Arrays.ExpansionOption.PRESERVE;
import static com.facebook.presto.common.array.Arrays.ensureCapacity;
import static com.facebook.presto.orc.DictionaryCompressionOptimizer.estimateIndexBytesPerValue;
import static com.facebook.presto.orc.metadata.CompressionKind.NONE;
import static com.facebook.presto.orc.stream.LongOutputStream.createDataOutputStream;
import static com.facebook.presto.orc.writer.ColumnWriterUtils.buildRowGroupIndexes;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static io.airlift.slice.SizeOf.sizeOf;
import static java.lang.Math.toIntExact;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.toList;
public abstract class DictionaryColumnWriter
implements ColumnWriter, DictionaryColumn
{
// In theory, nulls are stored using bit fields with 1 bit per entry
// In code, though they use Byte RLE, using 8 is a good heuristic and close to worst case.
public static final int NUMBER_OF_NULLS_PER_BYTE = 8;
private static final int EXPECTED_ROW_GROUP_SEGMENT_SIZE = 10_000;
protected final int column;
protected final int sequence;
protected final ColumnWriterOptions columnWriterOptions;
protected final Optional<DwrfDataEncryptor> dwrfEncryptor;
protected final OrcEncoding orcEncoding;
protected final MetadataWriter metadataWriter;
private final CompressedMetadataWriter compressedMetadataWriter;
private final List<DictionaryRowGroup> rowGroups = new ArrayList<>();
private final DictionaryRowGroupBuilder rowGroupBuilder = new DictionaryRowGroupBuilder();
private final int preserveDirectEncodingStripeCount;
private PresentOutputStream presentStream;
private LongOutputStream dataStream;
private int[] rowGroupIndexes;
private int rowGroupOffset;
private long rawBytesEstimate;
private long totalValueCount;
private long totalNonNullValueCount;
private boolean closed;
private boolean inRowGroup;
private boolean directEncoded;
private long rowGroupRetainedSizeInBytes;
private int preserveDirectEncodingStripeIndex;
public DictionaryColumnWriter(
int column,
int sequence,
ColumnWriterOptions columnWriterOptions,
Optional<DwrfDataEncryptor> dwrfEncryptor,
OrcEncoding orcEncoding,
MetadataWriter metadataWriter)
{
checkArgument(column >= 0, "column is negative");
checkArgument(sequence >= 0, "sequence is negative");
this.column = column;
this.sequence = sequence;
this.columnWriterOptions = requireNonNull(columnWriterOptions, "columnWriterOptions is null");
this.dwrfEncryptor = requireNonNull(dwrfEncryptor, "dwrfEncryptor is null");
this.orcEncoding = requireNonNull(orcEncoding, "orcEncoding is null");
this.compressedMetadataWriter = new CompressedMetadataWriter(metadataWriter, columnWriterOptions, dwrfEncryptor);
this.preserveDirectEncodingStripeCount = columnWriterOptions.getPreserveDirectEncodingStripeCount();
this.dataStream = createDataOutputStream(columnWriterOptions, dwrfEncryptor, orcEncoding);
this.presentStream = new PresentOutputStream(columnWriterOptions, dwrfEncryptor);
this.metadataWriter = requireNonNull(metadataWriter, "metadataWriter is null");
this.rowGroupIndexes = new int[EXPECTED_ROW_GROUP_SEGMENT_SIZE];
}
protected abstract ColumnWriter createDirectColumnWriter();
protected abstract ColumnWriter getDirectColumnWriter();
protected abstract boolean tryConvertRowGroupToDirect(int dictionaryIndexCount, int[] dictionaryIndexes, int maxDirectBytes);
protected abstract boolean tryConvertRowGroupToDirect(int dictionaryIndexCount, short[] dictionaryIndexes, int maxDirectBytes);
protected abstract boolean tryConvertRowGroupToDirect(int dictionaryIndexCount, byte[] dictionaryIndexes, int maxDirectBytes);
protected abstract ColumnEncoding getDictionaryColumnEncoding();
protected abstract BlockStatistics addBlockToDictionary(Block block, int rowGroupOffset, int[] rowGroupIndexes);
protected abstract long getRetainedDictionaryBytes();
/**
* writeDictionary to the Streams and optionally return new mappings to be used.
* The mapping is used for sorting the indexes. ORC dictionary needs to be sorted,
* but DWRF sorting is optional.
*
* @return new mappings to be used for indexes, if no new mappings, Optional.empty.
*/
protected abstract Optional<int[]> writeDictionary();
protected abstract void beginDataRowGroup();
protected abstract void movePresentStreamToDirectWriter(PresentOutputStream presentStream);
protected abstract void writeDataStreams(
int rowGroupValueCount,
byte[] rowGroupIndexes,
Optional<int[]> originalDictionaryToSortedIndex,
LongOutputStream dataStream);
protected abstract void writeDataStreams(
int rowGroupValueCount,
short[] rowGroupIndexes,
Optional<int[]> originalDictionaryToSortedIndex,
LongOutputStream dataStream);
protected abstract void writeDataStreams(
int rowGroupValueCount,
int[] rowGroupIndexes,
Optional<int[]> originalDictionaryToSortedIndex,
LongOutputStream dataStream);
protected abstract void resetDictionary();
protected abstract void closeDictionary();
protected abstract List<StreamDataOutput> getDictionaryStreams(int column, int sequence);
protected abstract ColumnStatistics createColumnStatistics();
@Override
public long getRawBytesEstimate()
{
checkState(!directEncoded);
return rawBytesEstimate;
}
@Override
public boolean isDirectEncoded()
{
return directEncoded;
}
@Override
public int getIndexBytes()
{
checkState(!directEncoded);
return toIntExact(estimateIndexBytesPerValue(getDictionaryEntries()) * getNonNullValueCount());
}
@Override
public long getValueCount()
{
checkState(!directEncoded);
return totalValueCount;
}
@Override
public long getNonNullValueCount()
{
checkState(!directEncoded);
return totalNonNullValueCount;
}
@Override
public long getNullValueCount()
{
checkState(!directEncoded);
return totalValueCount - totalNonNullValueCount;
}
private boolean tryConvertRowGroupToDirect(byte[][] byteSegments, short[][] shortSegments, int[][] intSegments, int maxDirectBytes)
{
// The row group indexes may be split between byte, short and int segments. They need to be processed in
// byte, short and int order. If they are processed in different order, it will result in data corruption.
if (byteSegments != null) {
for (byte[] byteIndexes : byteSegments) {
if (!tryConvertRowGroupToDirect(byteIndexes.length, byteIndexes, maxDirectBytes)) {
return false;
}
}
}
if (shortSegments != null) {
for (short[] shortIndexes : shortSegments) {
if (!tryConvertRowGroupToDirect(shortIndexes.length, shortIndexes, maxDirectBytes)) {
return false;
}
}
}
if (intSegments != null) {
for (int[] intIndexes : intSegments) {
if (!tryConvertRowGroupToDirect(intIndexes.length, intIndexes, maxDirectBytes)) {
return false;
}
}
}
return true;
}
@Override
public OptionalInt tryConvertToDirect(int maxDirectBytes)
{
checkState(!closed);
checkState(!directEncoded);
ColumnWriter directWriter = createDirectColumnWriter();
checkState(directWriter.getBufferedBytes() == 0, "direct writer should have no data");
for (DictionaryRowGroup rowGroup : rowGroups) {
beginDataRowGroup();
// todo we should be able to pass the stats down to avoid recalculating min and max
boolean success = tryConvertRowGroupToDirect(rowGroup.getByteSegments(), rowGroup.getShortSegments(), rowGroup.getIntSegments(), maxDirectBytes);
if (!success) {
return resetDirectWriter(directWriter);
}
directWriter.finishRowGroup();
}
if (inRowGroup) {
beginDataRowGroup();
boolean success = tryConvertRowGroupToDirect(
rowGroupBuilder.getByteSegments(),
rowGroupBuilder.getShortSegments(),
rowGroupBuilder.getIntegerSegments(),
maxDirectBytes);
if (!success) {
return resetDirectWriter(directWriter);
}
if (!tryConvertRowGroupToDirect(rowGroupOffset, rowGroupIndexes, maxDirectBytes)) {
return resetDirectWriter(directWriter);
}
}
else {
checkState(rowGroupOffset == 0);
}
// Conversion to DirectStream succeeded, Transfer the present stream to direct writer and assign
// this a new PresentStream, so one writer is responsible for one present stream.
movePresentStreamToDirectWriter(presentStream);
presentStream = new PresentOutputStream(columnWriterOptions, dwrfEncryptor);
// free the dictionary
rawBytesEstimate = 0;
totalValueCount = 0;
totalNonNullValueCount = 0;
resetRowGroups();
closeDictionary();
resetDictionary();
directEncoded = true;
return OptionalInt.of(toIntExact(directWriter.getBufferedBytes()));
}
private OptionalInt resetDirectWriter(ColumnWriter directWriter)
{
directWriter.close();
directWriter.reset();
return OptionalInt.empty();
}
@Override
public Map<Integer, ColumnEncoding> getColumnEncodings()
{
checkState(closed);
if (directEncoded) {
return getDirectColumnWriter().getColumnEncodings();
}
return ImmutableMap.of(column, getDictionaryColumnEncoding());
}
@Override
public void beginRowGroup()
{
checkState(!inRowGroup);
inRowGroup = true;
if (directEncoded) {
getDirectColumnWriter().beginRowGroup();
}
else {
presentStream.recordCheckpoint();
}
}
@Override
public long writeBlock(Block block)
{
checkState(!closed);
checkArgument(block.getPositionCount() > 0, "Block is empty");
if (directEncoded) {
return getDirectColumnWriter().writeBlock(block);
}
rowGroupIndexes = ensureCapacity(rowGroupIndexes, rowGroupOffset + block.getPositionCount(), MEDIUM, PRESERVE);
for (int position = 0; position < block.getPositionCount(); position++) {
presentStream.writeBoolean(!block.isNull(position));
}
BlockStatistics blockStatistics = addBlockToDictionary(block, rowGroupOffset, rowGroupIndexes);
totalNonNullValueCount += blockStatistics.getNonNullValueCount();
rawBytesEstimate += blockStatistics.getRawBytesEstimate();
rowGroupOffset += blockStatistics.getNonNullValueCount();
totalValueCount += block.getPositionCount();
if (rowGroupOffset >= EXPECTED_ROW_GROUP_SEGMENT_SIZE) {
rowGroupBuilder.addIndexes(getDictionaryEntries() - 1, rowGroupIndexes, rowGroupOffset);
rowGroupOffset = 0;
}
return blockStatistics.getRawBytesIncludingNulls();
}
@Override
public Map<Integer, ColumnStatistics> finishRowGroup()
{
checkState(!closed);
checkState(inRowGroup);
inRowGroup = false;
if (directEncoded) {
return getDirectColumnWriter().finishRowGroup();
}
ColumnStatistics statistics = createColumnStatistics();
rowGroupBuilder.addIndexes(getDictionaryEntries() - 1, rowGroupIndexes, rowGroupOffset);
DictionaryRowGroup rowGroup = rowGroupBuilder.build(statistics);
rowGroups.add(rowGroup);
if (columnWriterOptions.isIgnoreDictionaryRowGroupSizes()) {
rowGroupRetainedSizeInBytes += rowGroup.getColumnStatistics().getRetainedSizeInBytes();
}
else {
rowGroupRetainedSizeInBytes += rowGroup.getShallowRetainedSizeInBytes();
rowGroupRetainedSizeInBytes += rowGroupBuilder.getIndexRetainedBytes();
}
rowGroupOffset = 0;
rowGroupBuilder.reset();
return ImmutableMap.of(column, statistics);
}
@Override
public void close()
{
checkState(!closed);
checkState(!inRowGroup);
closed = true;
if (directEncoded) {
getDirectColumnWriter().close();
}
else {
bufferOutputData();
}
}
@Override
public Map<Integer, ColumnStatistics> getColumnStripeStatistics()
{
checkState(closed);
if (directEncoded) {
return getDirectColumnWriter().getColumnStripeStatistics();
}
return ImmutableMap.of(column, ColumnStatistics.mergeColumnStatistics(rowGroups.stream()
.map(DictionaryRowGroup::getColumnStatistics)
.collect(toList())));
}
private void bufferOutputData()
{
checkState(closed);
checkState(!directEncoded);
Optional<int[]> originalDictionaryToSortedIndex = writeDictionary();
if (!rowGroups.isEmpty()) {
dataStream.recordCheckpoint();
}
for (DictionaryRowGroup rowGroup : rowGroups) {
// The row group indexes may be split between byte, short and int segments. They need to be processed in
// byte, short and int order. If they are processed in different order, it will result in data corruption.
byte[][] byteSegments = rowGroup.getByteSegments();
if (byteSegments != null) {
for (byte[] byteIndexes : byteSegments) {
writeDataStreams(
byteIndexes.length,
byteIndexes,
originalDictionaryToSortedIndex,
dataStream);
}
}
short[][] shortSegments = rowGroup.getShortSegments();
if (shortSegments != null) {
for (short[] shortIndexes : shortSegments) {
writeDataStreams(
shortIndexes.length,
shortIndexes,
originalDictionaryToSortedIndex,
dataStream);
}
}
int[][] intSegments = rowGroup.getIntSegments();
if (intSegments != null) {
for (int[] integerIndexes : intSegments) {
writeDataStreams(
integerIndexes.length,
integerIndexes,
originalDictionaryToSortedIndex,
dataStream);
}
}
dataStream.recordCheckpoint();
}
closeDictionary();
dataStream.close();
presentStream.close();
}
@Override
public List<StreamDataOutput> getIndexStreams(Optional<List<? extends StreamCheckpoint>> prependCheckpoints)
throws IOException
{
checkState(closed);
if (directEncoded) {
return getDirectColumnWriter().getIndexStreams(prependCheckpoints);
}
boolean compressed = columnWriterOptions.getCompressionKind() != NONE;
List<ColumnStatistics> rowGroupColumnStatistics = rowGroups.stream().map(DictionaryRowGroup::getColumnStatistics).collect(toList());
List<RowGroupIndex> rowGroupIndexes = buildRowGroupIndexes(compressed, rowGroupColumnStatistics, prependCheckpoints, presentStream, dataStream);
Slice slice = compressedMetadataWriter.writeRowIndexes(rowGroupIndexes);
Stream stream = new Stream(column, sequence, StreamKind.ROW_INDEX, slice.length(), false);
return ImmutableList.of(new StreamDataOutput(slice, stream));
}
@Override
public List<StreamDataOutput> getDataStreams()
{
checkState(closed);
if (directEncoded) {
return getDirectColumnWriter().getDataStreams();
}
// actually write data
ImmutableList.Builder<StreamDataOutput> outputDataStreams = ImmutableList.builder();
presentStream.getStreamDataOutput(column, sequence).ifPresent(outputDataStreams::add);
outputDataStreams.add(dataStream.getStreamDataOutput(column, sequence));
outputDataStreams.addAll(getDictionaryStreams(column, sequence));
return outputDataStreams.build();
}
@Override
public long getBufferedBytes()
{
checkState(!closed);
if (directEncoded) {
return getDirectColumnWriter().getBufferedBytes();
}
// for dictionary columns we report the data we expect to write to the output stream
long numberOfNullBytes = getNullValueCount() / NUMBER_OF_NULLS_PER_BYTE;
return getIndexBytes() + getDictionaryBytes() + numberOfNullBytes;
}
@VisibleForTesting
public long getRowGroupRetainedSizeInBytes()
{
return rowGroupRetainedSizeInBytes;
}
@Override
public long getRetainedBytes()
{
return sizeOf(rowGroupIndexes) +
rowGroupBuilder.getRetainedSizeInBytes() +
dataStream.getRetainedBytes() +
presentStream.getRetainedBytes() +
getRetainedDictionaryBytes() +
rowGroupRetainedSizeInBytes;
}
private void resetRowGroups()
{
rowGroups.clear();
rowGroupBuilder.reset();
rowGroupRetainedSizeInBytes = 0;
rowGroupOffset = 0;
}
@Override
public void reset()
{
checkState(closed);
closed = false;
presentStream.reset();
// Dictionary data is held in memory, until the Stripe is flushed. OrcOutputStream maintains the
// allocated buffer and reuses the buffer for writing data. For Direct writer, OrcOutputStream
// behavior avoids the reallocation of the buffers by maintaining the pool. For Dictionary writer,
// OrcOutputStream doubles the memory requirement in most cases (one for dictionary and one for
// OrcOutputBuffer). To avoid this, the streams are reallocated for every stripe.
dataStream = createDataOutputStream(columnWriterOptions, dwrfEncryptor, orcEncoding);
resetDictionary();
resetRowGroups();
rawBytesEstimate = 0;
totalValueCount = 0;
totalNonNullValueCount = 0;
if (directEncoded) {
getDirectColumnWriter().reset();
if (preserveDirectEncodingStripeIndex >= preserveDirectEncodingStripeCount) {
directEncoded = false;
preserveDirectEncodingStripeIndex = 0;
}
else {
preserveDirectEncodingStripeIndex++;
}
}
}
static class BlockStatistics
{
private final int nonNullValueCount;
private final long rawBytesEstimate;
private final long rawBytesIncludingNulls;
public BlockStatistics(int nonNullValueCount, long rawBytesEstimate, long rawBytesIncludingNulls)
{
this.nonNullValueCount = nonNullValueCount;
this.rawBytesEstimate = rawBytesEstimate;
this.rawBytesIncludingNulls = rawBytesIncludingNulls;
}
public int getNonNullValueCount()
{
return nonNullValueCount;
}
public long getRawBytesEstimate()
{
return rawBytesEstimate;
}
public long getRawBytesIncludingNulls()
{
return rawBytesIncludingNulls;
}
}
}
|
from typing import List
def twoSum(nums: List[int], target: int) -> List[int]:
foo = {}
for i, j in enumerate(nums):
complement = target - j
if complement in foo:
return [foo.get(complement), i]
foo[j] = i
return None |
package com.packtpub.yummy.ratings;
import com.packtpub.yummy.ratings.config.AmqpConfig;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.cloud.stream.annotation.StreamListener;
import org.springframework.stereotype.Service;
@Service
@AllArgsConstructor
@Slf4j
public class EventService {
private RatingRepository ratingRepository;
@StreamListener(AmqpConfig.MessageSink.BOOKMARK_DELETIONS)
public void processDelete(BookmarkDeleteEvent event) {
log.info("Received Delete event for {}", event);
ratingRepository.remove(event.getType(), event.getEntityId());
}
@Data
@AllArgsConstructor
@NoArgsConstructor
public static class BookmarkDeleteEvent {
String type, entityId;
}
}
|
/*
Classe que implementa a interface(GUI) do painel de plugs
*/
package simuladorenigma;
public class PainelPlugs extends javax.swing.JFrame {
int cabo11 = 1, cabo12 = 1, cabo21 = 1, cabo22 = 1, cabo31 = 1, cabo32 = 1, cabo41 = 1, cabo42 = 1, cabo51 = 1, cabo52 = 1, cabo61 = 1, cabo62 = 1, cabo71 = 1, cabo72 = 1, cabo81 = 1, cabo82 = 1;
public int getCabo11() {
return cabo11;
}
public int getCabo12() {
return cabo12;
}
public int getCabo21() {
return cabo21;
}
public int getCabo22() {
return cabo22;
}
public int getCabo31() {
return cabo31;
}
public int getCabo32() {
return cabo32;
}
public int getCabo41() {
return cabo41;
}
public int getCabo42() {
return cabo42;
}
public int getCabo51() {
return cabo51;
}
public int getCabo52() {
return cabo52;
}
public int getCabo61() {
return cabo61;
}
public int getCabo62() {
return cabo62;
}
public int getCabo71() {
return cabo71;
}
public int getCabo72() {
return cabo72;
}
public int getCabo81() {
return cabo81;
}
public int getCabo82() {
return cabo82;
}
public PainelPlugs() {
initComponents();
}
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
parafuso3 = new javax.swing.JLabel();
jPanel1 = new javax.swing.JPanel();
lCabo2 = new javax.swing.JLabel();
lCabo3 = new javax.swing.JLabel();
lCabo4 = new javax.swing.JLabel();
lCabo5 = new javax.swing.JLabel();
lCabo6 = new javax.swing.JLabel();
lCabo7 = new javax.swing.JLabel();
box42 = new javax.swing.JComboBox();
box82 = new javax.swing.JComboBox();
box81 = new javax.swing.JComboBox();
box21 = new javax.swing.JComboBox();
box22 = new javax.swing.JComboBox();
box61 = new javax.swing.JComboBox();
box62 = new javax.swing.JComboBox();
box72 = new javax.swing.JComboBox();
box71 = new javax.swing.JComboBox();
jButton1 = new javax.swing.JButton();
box11 = new javax.swing.JComboBox();
box12 = new javax.swing.JComboBox();
box52 = new javax.swing.JComboBox();
box51 = new javax.swing.JComboBox();
box31 = new javax.swing.JComboBox();
box32 = new javax.swing.JComboBox();
box41 = new javax.swing.JComboBox();
lCabo = new javax.swing.JLabel();
lCabo1 = new javax.swing.JLabel();
parafuso = new javax.swing.JLabel();
parafuso1 = new javax.swing.JLabel();
parafuso2 = new javax.swing.JLabel();
parafuso4 = new javax.swing.JLabel();
parafuso5 = new javax.swing.JLabel();
parafuso6 = new javax.swing.JLabel();
parafuso3.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/parafuso.png"))); // NOI18N
setTitle("Painel de Plugs");
setBackground(new java.awt.Color(108, 106, 105));
setForeground(new java.awt.Color(102, 102, 102));
setResizable(false);
addWindowListener(new java.awt.event.WindowAdapter() {
public void windowClosed(java.awt.event.WindowEvent evt) {
formWindowClosed(evt);
}
public void windowClosing(java.awt.event.WindowEvent evt) {
formWindowClosing(evt);
}
});
jPanel1.setBackground(new java.awt.Color(108, 106, 105));
jPanel1.setToolTipText("Painel de PLugs");
lCabo2.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/cabo.png"))); // NOI18N
lCabo3.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/cabo.png"))); // NOI18N
lCabo4.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/cabo.png"))); // NOI18N
lCabo5.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/cabo.png"))); // NOI18N
lCabo6.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/cabo.png"))); // NOI18N
lCabo7.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/cabo.png"))); // NOI18N
box42.setBackground(new java.awt.Color(204, 204, 204));
box42.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box42.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box42.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box42ActionPerformed(evt);
}
});
box82.setBackground(new java.awt.Color(204, 204, 204));
box82.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box82.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box82.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box82ActionPerformed(evt);
}
});
box81.setBackground(new java.awt.Color(204, 204, 204));
box81.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box81.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box81.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box81ActionPerformed(evt);
}
});
box21.setBackground(new java.awt.Color(204, 204, 204));
box21.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box21.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box21.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box21ActionPerformed(evt);
}
});
box22.setBackground(new java.awt.Color(204, 204, 204));
box22.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box22.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box22.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box22ActionPerformed(evt);
}
});
box61.setBackground(new java.awt.Color(204, 204, 204));
box61.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box61.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box61.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box61ActionPerformed(evt);
}
});
box62.setBackground(new java.awt.Color(204, 204, 204));
box62.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box62.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box62.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box62ActionPerformed(evt);
}
});
box72.setBackground(new java.awt.Color(204, 204, 204));
box72.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box72.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box72.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box72ActionPerformed(evt);
}
});
box71.setBackground(new java.awt.Color(204, 204, 204));
box71.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box71.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box71.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box71ActionPerformed(evt);
}
});
jButton1.setBackground(new java.awt.Color(204, 204, 204));
jButton1.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
jButton1.setText("Concluido");
jButton1.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton1ActionPerformed(evt);
}
});
box11.setBackground(new java.awt.Color(204, 204, 204));
box11.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box11.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box11.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box11ActionPerformed(evt);
}
});
box12.setBackground(new java.awt.Color(204, 204, 204));
box12.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box12.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box12.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box12ActionPerformed(evt);
}
});
box52.setBackground(new java.awt.Color(204, 204, 204));
box52.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box52.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box52.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box52ActionPerformed(evt);
}
});
box51.setBackground(new java.awt.Color(204, 204, 204));
box51.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box51.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box51.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box51ActionPerformed(evt);
}
});
box31.setBackground(new java.awt.Color(204, 204, 204));
box31.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box31.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box31.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box31ActionPerformed(evt);
}
});
box32.setBackground(new java.awt.Color(204, 204, 204));
box32.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box32.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box32.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box32ActionPerformed(evt);
}
});
box41.setBackground(new java.awt.Color(204, 204, 204));
box41.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
box41.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }));
box41.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
box41ActionPerformed(evt);
}
});
lCabo.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/cabo.png"))); // NOI18N
lCabo1.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/cabo.png"))); // NOI18N
parafuso.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/parafuso.png"))); // NOI18N
parafuso1.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/parafuso.png"))); // NOI18N
parafuso2.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/parafuso.png"))); // NOI18N
parafuso4.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/parafuso.png"))); // NOI18N
parafuso5.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/parafuso.png"))); // NOI18N
parafuso6.setIcon(new javax.swing.ImageIcon(getClass().getResource("/simuladorenigma/botoesEnigma/parafuso.png"))); // NOI18N
javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1);
jPanel1.setLayout(jPanel1Layout);
jPanel1Layout.setHorizontalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, jPanel1Layout.createSequentialGroup()
.addContainerGap()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(parafuso2)
.addComponent(parafuso1)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(27, 27, 27)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(box41, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box31, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box21, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(lCabo, javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(lCabo2)
.addComponent(lCabo3)))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(box32, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box42, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addComponent(box22, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(box11, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(lCabo1)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(box12, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addGap(35, 35, 35)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(box51, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box61, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box71, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box81, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jButton1, javax.swing.GroupLayout.PREFERRED_SIZE, 103, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(lCabo5, javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(lCabo7)
.addComponent(lCabo6)))
.addComponent(lCabo4))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(box52, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box62, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box72, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box82, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))))))
.addGap(12, 12, 12)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(parafuso, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(parafuso4, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
.addContainerGap())
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(188, 188, 188)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(parafuso6)
.addComponent(parafuso5))
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
);
jPanel1Layout.setVerticalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addContainerGap()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(parafuso1)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(0, 0, Short.MAX_VALUE)
.addComponent(parafuso2))
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(parafuso5)
.addGap(17, 17, 17)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(box51, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box52, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(3, 3, 3)
.addComponent(lCabo4)))
.addComponent(box11, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(lCabo1)
.addComponent(box12, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, jPanel1Layout.createSequentialGroup()
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(box22, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box21, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(box32, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box31, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(box42, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box41, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(lCabo, javax.swing.GroupLayout.PREFERRED_SIZE, 21, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(18, 18, 18)
.addComponent(lCabo3)
.addGap(18, 18, 18)
.addComponent(lCabo2, javax.swing.GroupLayout.PREFERRED_SIZE, 21, javax.swing.GroupLayout.PREFERRED_SIZE))))
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, jPanel1Layout.createSequentialGroup()
.addComponent(box61, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(18, 18, 18)
.addComponent(box71, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(18, 18, 18)
.addComponent(box81, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(jPanel1Layout.createSequentialGroup()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(lCabo5, javax.swing.GroupLayout.PREFERRED_SIZE, 21, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(18, 18, 18)
.addComponent(lCabo6))
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(box62, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(18, 18, 18)
.addComponent(box72, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(lCabo7, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, 21, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(box82, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))))))
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(parafuso6)
.addComponent(jButton1))
.addGap(0, 0, Short.MAX_VALUE))))
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(parafuso)
.addGap(217, 217, 217)
.addComponent(parafuso4, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)))
.addContainerGap())
);
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jPanel1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jPanel1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
);
pack();
}// </editor-fold>//GEN-END:initComponents
private void box11ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box11ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box11ActionPerformed
private void box12ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box12ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box12ActionPerformed
private void box52ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box52ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box52ActionPerformed
private void box51ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box51ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box51ActionPerformed
private void box31ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box31ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box31ActionPerformed
private void box32ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box32ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box32ActionPerformed
private void box41ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box41ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box41ActionPerformed
private void box42ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box42ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box42ActionPerformed
private void box82ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box82ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box82ActionPerformed
private void box81ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box81ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box81ActionPerformed
private void box21ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box21ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box21ActionPerformed
private void box22ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box22ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box22ActionPerformed
private void box61ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box61ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box61ActionPerformed
private void box62ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box62ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box62ActionPerformed
private void box72ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box72ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box72ActionPerformed
private void box71ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_box71ActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_box71ActionPerformed
private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed
cabo11 = (box11.getSelectedIndex() + 1);
cabo12 = (box12.getSelectedIndex() + 1);
cabo21 = (box21.getSelectedIndex() + 1);
cabo22 = (box22.getSelectedIndex() + 1);
cabo31 = (box31.getSelectedIndex() + 1);
cabo32 = (box32.getSelectedIndex() + 1);
cabo41 = (box41.getSelectedIndex() + 1);
cabo42 = (box42.getSelectedIndex() + 1);
cabo51 = (box51.getSelectedIndex() + 1);
cabo52 = (box52.getSelectedIndex() + 1);
cabo61 = (box61.getSelectedIndex() + 1);
cabo62 = (box62.getSelectedIndex() + 1);
cabo71 = (box71.getSelectedIndex() + 1);
cabo72 = (box72.getSelectedIndex() + 1);
cabo81 = (box81.getSelectedIndex() + 1);
cabo82 = (box82.getSelectedIndex() + 1);
}//GEN-LAST:event_jButton1ActionPerformed
private void formWindowClosed(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosed
// TODO add your handling code here:
}//GEN-LAST:event_formWindowClosed
//o + 1 é deivo a primeira posição do box retornar 0;
private void formWindowClosing(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosing
cabo11 = (box11.getSelectedIndex() + 1);
cabo12 = (box12.getSelectedIndex() + 1);
cabo21 = (box21.getSelectedIndex() + 1);
cabo22 = (box22.getSelectedIndex() + 1);
cabo31 = (box31.getSelectedIndex() + 1);
cabo32 = (box32.getSelectedIndex() + 1);
cabo41 = (box41.getSelectedIndex() + 1);
cabo42 = (box42.getSelectedIndex() + 1);
cabo51 = (box51.getSelectedIndex() + 1);
cabo52 = (box52.getSelectedIndex() + 1);
cabo61 = (box61.getSelectedIndex() + 1);
cabo62 = (box62.getSelectedIndex() + 1);
cabo71 = (box71.getSelectedIndex() + 1);
cabo72 = (box72.getSelectedIndex() + 1);
cabo81 = (box81.getSelectedIndex() + 1);
cabo82 = (box82.getSelectedIndex() + 1);
}//GEN-LAST:event_formWindowClosing
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
/* Set the Nimbus look and feel */
//<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) ">
/* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.
* For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html
*/
try {
for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {
if ("Nimbus".equals(info.getName())) {
javax.swing.UIManager.setLookAndFeel(info.getClassName());
break;
}
}
} catch (ClassNotFoundException ex) {
java.util.logging.Logger.getLogger(PainelPlugs.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
java.util.logging.Logger.getLogger(PainelPlugs.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
java.util.logging.Logger.getLogger(PainelPlugs.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (javax.swing.UnsupportedLookAndFeelException ex) {
java.util.logging.Logger.getLogger(PainelPlugs.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
}
//</editor-fold>
/* Create and display the form */
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
new PainelPlugs().setVisible(true);
}
});
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JComboBox box11;
private javax.swing.JComboBox box12;
private javax.swing.JComboBox box21;
private javax.swing.JComboBox box22;
private javax.swing.JComboBox box31;
private javax.swing.JComboBox box32;
private javax.swing.JComboBox box41;
private javax.swing.JComboBox box42;
private javax.swing.JComboBox box51;
private javax.swing.JComboBox box52;
private javax.swing.JComboBox box61;
private javax.swing.JComboBox box62;
private javax.swing.JComboBox box71;
private javax.swing.JComboBox box72;
private javax.swing.JComboBox box81;
private javax.swing.JComboBox box82;
private javax.swing.JButton jButton1;
private javax.swing.JPanel jPanel1;
private javax.swing.JLabel lCabo;
private javax.swing.JLabel lCabo1;
private javax.swing.JLabel lCabo2;
private javax.swing.JLabel lCabo3;
private javax.swing.JLabel lCabo4;
private javax.swing.JLabel lCabo5;
private javax.swing.JLabel lCabo6;
private javax.swing.JLabel lCabo7;
private javax.swing.JLabel parafuso;
private javax.swing.JLabel parafuso1;
private javax.swing.JLabel parafuso2;
private javax.swing.JLabel parafuso3;
private javax.swing.JLabel parafuso4;
private javax.swing.JLabel parafuso5;
private javax.swing.JLabel parafuso6;
// End of variables declaration//GEN-END:variables
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.