text
stringlengths 1
1.05M
|
|---|
#!/bin/sh
#
# Smart confirm-before kill-pane command. Only asks for confirmation when
# killing a child process.
set -o errexit -o nounset
tmux_run() (
pane_pid="$(tmux list-panes -F "#{pane_active}:#{pane_pid}" |
grep '^1:' |
cut -c 3-)"
escaped_cmd="$(echo "$*" | sed 's/;/\\\;/g')"
if pgrep -P "$pane_pid" >/dev/null; then
tmux confirm-before -p "kill-pane #P? (y/n)" "run 'tmux $escaped_cmd'"
else
tmux "$@"
fi
)
main() (
window_list="$(tmux list-windows -F '#{window_active}:#{window_panes}')"
window_count="$(tmux display-message -p '#{session_windows}')"
pane_count="$(echo "$window_list" |
grep '^1:' |
cut -c 3-)"
# Mimic Terminal.app's behavior of always moving one tab to the right
# unless at the end.
if [ ! "$pane_count" = 1 ] || [ "$window_count" = 1 ]; then
tmux_run kill-pane
elif [ "$(echo "$window_list" | tail -n1 | cut -c1)" = 1 ]; then
tmux_run previous-window\; next-window\; kill-pane
else
tmux_run next-window\; previous-window\; kill-pane
fi
)
main
|
/*
* Copyright (C) 2015-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.http.impl.engine.ws
import scala.concurrent.{ Await, Promise }
import scala.concurrent.duration.DurationInt
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.Uri.apply
import akka.http.scaladsl.model.ws._
import akka.stream._
import akka.stream.scaladsl._
import akka.stream.testkit._
import akka.stream.scaladsl.GraphDSL.Implicits._
import org.scalatest.concurrent.Eventually
import java.net.InetSocketAddress
import akka.Done
import akka.http.scaladsl.settings.ClientConnectionSettings
import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, InHandler, OutHandler }
import akka.util.ByteString
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit._
import scala.util.{ Failure, Success }
class WebSocketIntegrationSpec extends AkkaSpec("akka.stream.materializer.debug.fuzzing-mode=off")
with Eventually {
implicit val materializer = ActorMaterializer()
"A WebSocket server" must {
"not reset the connection when no data are flowing" in Utils.assertAllStagesStopped {
val source = TestPublisher.probe[Message]()
val bindingFuture = Http().bindAndHandleSync({
case HttpRequest(_, _, headers, _, _) =>
val upgrade = headers.collectFirst { case u: UpgradeToWebSocket => u }.get
upgrade.handleMessages(Flow.fromSinkAndSource(Sink.ignore, Source.fromPublisher(source)), None)
}, interface = "localhost", port = 0)
val binding = Await.result(bindingFuture, 3.seconds.dilated)
val myPort = binding.localAddress.getPort
val (response, sink) = Http().singleWebSocketRequest(
WebSocketRequest("ws://127.0.0.1:" + myPort),
Flow.fromSinkAndSourceMat(TestSink.probe[Message], Source.empty)(Keep.left))
response.futureValue.response.status.isSuccess should ===(true)
sink
.request(10)
.expectNoMessage(500.millis)
source
.sendNext(TextMessage("hello"))
.sendComplete()
sink
.expectNext(TextMessage("hello"))
.expectComplete()
binding.unbind()
}
"not reset the connection when no data are flowing and the connection is closed from the client" in Utils.assertAllStagesStopped {
val source = TestPublisher.probe[Message]()
val bindingFuture = Http().bindAndHandleSync({
case HttpRequest(_, _, headers, _, _) =>
val upgrade = headers.collectFirst { case u: UpgradeToWebSocket => u }.get
upgrade.handleMessages(Flow.fromSinkAndSource(Sink.ignore, Source.fromPublisher(source)), None)
}, interface = "localhost", port = 0)
val binding = Await.result(bindingFuture, 3.seconds.dilated)
val myPort = binding.localAddress.getPort
val completeOnlySwitch: Flow[ByteString, ByteString, Promise[Done]] = Flow.fromGraph(
new GraphStageWithMaterializedValue[FlowShape[ByteString, ByteString], Promise[Done]] {
override val shape: FlowShape[ByteString, ByteString] =
FlowShape(Inlet("completeOnlySwitch.in"), Outlet("completeOnlySwitch.out"))
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Promise[Done]) = {
val promise = Promise[Done]
val logic = new GraphStageLogic(shape) with InHandler with OutHandler {
override def onPush(): Unit = push(shape.out, grab(shape.in))
override def onPull(): Unit = pull(shape.in)
override def preStart(): Unit = {
promise.future.foreach(_ => getAsyncCallback[Done](_ => complete(shape.out)).invoke(Done))(akka.dispatch.ExecutionContexts.sameThreadExecutionContext)
}
setHandlers(shape.in, shape.out, this)
}
(logic, promise)
}
})
val ((response, breaker), sink) =
Source.empty
.viaMat {
Http().webSocketClientLayer(WebSocketRequest("ws://localhost:" + myPort))
.atop(TLSPlacebo())
.joinMat(completeOnlySwitch.via(
Tcp().outgoingConnection(new InetSocketAddress("localhost", myPort), halfClose = true)))(Keep.both)
}(Keep.right)
.toMat(TestSink.probe[Message])(Keep.both)
.run()
response.futureValue.response.status.isSuccess should ===(true)
sink
.request(10)
.expectNoMessage(1500.millis)
breaker.trySuccess(Done)
source
.sendNext(TextMessage("hello"))
.sendComplete()
sink
.expectNext(TextMessage("hello"))
.expectComplete()
binding.unbind()
}
"echo 100 elements and then shut down without error" in Utils.assertAllStagesStopped {
val bindingFuture = Http().bindAndHandleSync({
case HttpRequest(_, _, headers, _, _) =>
val upgrade = headers.collectFirst { case u: UpgradeToWebSocket => u }.get
upgrade.handleMessages(Flow.apply, None)
}, interface = "localhost", port = 0)
val binding = Await.result(bindingFuture, 3.seconds.dilated)
val myPort = binding.localAddress.getPort
val N = 100
EventFilter.warning(pattern = "HTTP header .* is not allowed in responses", occurrences = 0) intercept {
val (response, count) = Http().singleWebSocketRequest(
WebSocketRequest("ws://127.0.0.1:" + myPort),
Flow.fromSinkAndSourceMat(
Sink.fold(0)((n, _: Message) => n + 1),
Source.repeat(TextMessage("hello")).take(N))(Keep.left))
count.futureValue should ===(N)
}
binding.unbind()
}
"send back 100 elements and then terminate without error even when not ordinarily closed" in Utils.assertAllStagesStopped {
val N = 100
val handler = Flow.fromGraph(GraphDSL.create() { implicit b =>
val merge = b.add(Merge[Int](2))
// convert to int so we can connect to merge
val mapMsgToInt = b.add(Flow[Message].map(_ => -1))
val mapIntToMsg = b.add(Flow[Int].map(x => TextMessage.Strict(s"Sending: $x")))
// source we want to use to send message to the connected websocket sink
val rangeSource = b.add(Source(1 to N))
mapMsgToInt ~> merge // this part of the merge will never provide msgs
rangeSource ~> merge ~> mapIntToMsg
FlowShape(mapMsgToInt.in, mapIntToMsg.out)
})
val bindingFuture = Http().bindAndHandleSync({
case HttpRequest(_, _, headers, _, _) =>
val upgrade = headers.collectFirst { case u: UpgradeToWebSocket => u }.get
upgrade.handleMessages(handler, None)
}, interface = "localhost", port = 0)
val binding = Await.result(bindingFuture, 3.seconds.dilated)
val myPort = binding.localAddress.getPort
@volatile var messages = 0
val (switch, completion) =
Source.maybe
.viaMat {
Http().webSocketClientLayer(WebSocketRequest("ws://localhost:" + myPort))
.atop(TLSPlacebo())
// the resource leak of #19398 existed only for severed websocket connections
.atopMat(KillSwitches.singleBidi[ByteString, ByteString])(Keep.right)
.join(Tcp().outgoingConnection(new InetSocketAddress("localhost", myPort), halfClose = true))
}(Keep.right)
.toMat(Sink.foreach(_ => messages += 1))(Keep.both)
.run()
eventually(messages should ===(N))
// breaker should have been fulfilled long ago
switch.shutdown()
completion.futureValue
binding.unbind()
}
}
"A websocket client" should {
"fail the materialized future if the request fails" in {
val flow = Http().webSocketClientFlow(
WebSocketRequest("ws://127.0.0.1:65535/no/server/here"),
settings = ClientConnectionSettings(system).withConnectingTimeout(250.millis.dilated))
val future = Source.maybe[Message].viaMat(flow)(Keep.right).to(Sink.ignore).run()
import system.dispatcher
whenReady(future.map(r => Success(r)).recover { case ex => Failure(ex) }) { resTry =>
resTry.isFailure should ===(true)
resTry.failed.get.getMessage should ===("Connection failed.")
}
}
}
}
|
import LanguageContext from '../../../contexts/languageContext';
import SessionContext from '../../../contexts/sessionContext';
import {Button, Card, Table} from 'react-bootstrap';
import {refreshPage} from '../../sharedResources';
import {useContext, useState} from 'react';
const Beatmap = props => {
const {beatmap, page, setPage} = props;
const session = useContext(SessionContext);
const language = useContext(LanguageContext);
const [cover, setCover] = useState(`https://assets.ppy.sh/beatmaps/${beatmap.beatmapset_id}/covers/cover.jpg`);
const [name, setName] = useState(<Card.Title className="text-center fs-6 text-truncate">{beatmap.title}</Card.Title>);
const redirectOsuWebsite = () => window.open(`https://osu.ppy.sh/beatmaps/${beatmap.id}`);
const changeStatus = async () => {
const response = await fetch(`${process.env.REACT_APP_SERVER_API}web/beatmaps/${beatmap.id}/status`, {
method: 'PUT', body: JSON.stringify({active: !beatmap.active}), headers: {
Authorization: session.token, Accept: 'application/json', 'Content-Type': 'application/json'
}
});
if (response.ok) refreshPage(page, setPage);
};
return (<Card className="shadow mb-3">
<Card.Img
variant="top"
referrerPolicy="no-referrer"
alt={beatmap.title}
src={cover}
onClick={redirectOsuWebsite}
onError={() => setCover(`${process.env.PUBLIC_URL}/cover.jpg`)}
className="select-cursor"
/>
<Card.Body>
<div
onMouseOver={() => setName(<Card.Title className="text-center fs-6"> {beatmap.title}</Card.Title>)}
onMouseLeave={() => setName(<Card.Title className="text-center fs-6 text-truncate">{beatmap.title}</Card.Title>)}
onClick={redirectOsuWebsite}
className="select-cursor">
{name}
</div>
<Table size="sm">
<tbody>
<tr>
<td>{language.bpm}:</td>
<td>{beatmap.bpm}</td>
</tr>
<tr>
<td>{language.difficulty}:</td>
<td>
{beatmap.difficulty_rating}
<i className="bi bi-star"/>
</td>
</tr>
<tr>
<td>{language.type}:</td>
<td>
{beatmap.stream_length < 9 && language.bursts}
{beatmap.stream_length > 8 && beatmap.stream_length < 25 && language.streams}
{beatmap.stream_length > 24 && language.deathstreams}
</td>
</tr>
<tr>
<td>{language.category}:</td>
<td>{language[beatmap.ranked]}</td>
</tr>
<tr>
<td>{language.favorites}:</td>
<td>{beatmap.favourite_count}</td>
</tr>
</tbody>
</Table>
{session && beatmap.active && (<Button className="container-fluid" variant="danger" onClick={changeStatus}>
{language.disable}
</Button>)}
{session && !beatmap.active && (<Button className="container-fluid" variant="success" onClick={changeStatus}>
{language.enable}
</Button>)}
</Card.Body>
</Card>);
};
export default Beatmap;
|
import { SuggestionDocument } from './models';
export type Href = string;
export function reduceByLocation(
suggestions: Array<Partial<SuggestionDocument>>
): Map<Href, Array<Partial<SuggestionDocument>>> {
const result = suggestions.reduce<
Map<Href, Array<Partial<SuggestionDocument>>>
>((mapp, sugg) => {
if (!!sugg.location) {
if (!mapp.has(sugg.location)) {
mapp.set(sugg.location, []);
}
const hsuggs = mapp.get(sugg.location)!;
hsuggs.push(sugg);
}
return mapp;
}, new Map());
return result;
}
// const funStuff: SuggestionDocument[] = [
// {
// context:
// ', click the browser action button to see a list of the suggestions. Make any changes\n you choose to your documents and publish them the normal way. Simple.\n ',
// createdAt: 1514402425728,
// elementPath: 'BODY 1 DIV 0 DIV 0 MAIN 2 DIV 0 DIV 1 P 8',
// href: 'https://garyb432.github.io/busy-bishop/',
// id: '7442ea64-5528-409e-80ce-d2fff8d9d6d6',
// selectedText: 'browser',
// selectionStart: 12,
// suggestedText: 'extension',
// textNodeIndex: 2,
// },
// {
// context:
// "\n Select a small amount of text containing the mistake you'd like to correct. Its simple. Just select\n ",
// createdAt: 1514402490477,
// elementPath: 'BODY 1 DIV 0 DIV 0 MAIN 2 DIV 0 DIV 1 P 4',
// href: 'https://garyb432.github.io/busy-bishop/',
// id: 'ff15bfd5-af48-4f45-9221-90c583ba0b07',
// selectedText: 'Its',
// selectionStart: 89,
// suggestedText: "It's",
// textNodeIndex: 0,
// },
// {
// context:
// "\n Select a small amount of text containing the mistake you'd like to correct. Its simple. Just select\n ",
// createdAt: 1514402842233,
// elementPath: 'BODY 1 DIV 0 DIV 0 MAIN 2 DIV 0 DIV 1 P 4',
// href: 'https://garyb432.github.io/busy-bishop/',
// id: '219ffdb3-3ea0-47c6-9b28-725a66f5a343',
// selectedText: 'small',
// selectionStart: 22,
// suggestedText: 'minute',
// textNodeIndex: 0,
// },
// ];
|
var searchData=
[
['world_2eh',['World.h',['../World_8h.html',1,'']]]
];
|
package com.klk.mobilefingerprint.dialogs;
import android.app.AlertDialog;
import android.content.Context;
import android.content.DialogInterface;
import com.klk.mobilefingerprint.R;
public class ConfirmFinishEnrollDialog extends AlertDialog.Builder {
private int mId;
private int mFingerTotal;
private Context mContext;
public ConfirmFinishEnrollDialog(int id, int fingerTotal, Context context) {
super(context);
this.mContext = context;
this.mId = id;
this.mFingerTotal = fingerTotal;
}
@Override
public AlertDialog create() {
setPositiveButton(R.string.label_confirm_yes, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialogInterface, int i) {
// TODO : Update DB
dialogInterface.dismiss();
}
});
setNegativeButton(R.string.label_confirm_no, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialogInterface, int i) {
dialogInterface.cancel();
}
});
String msg = mContext.getResources().getString(R.string.label_message_finish_enroll);
setMessage(mFingerTotal + " " + msg);
setTitle(R.string.label_dialog_title);
return super.create();
}
}
|
<reponame>rovedit/Fort-Candle
#pragma once
#include <type_traits>
namespace tg
{
using u8 = unsigned char;
using u16 = unsigned short;
using u32 = unsigned int;
using u64 = unsigned long long;
using size_t = decltype(sizeof(0));
namespace detail
{
struct unused;
struct true_type
{
static constexpr bool value = true;
};
struct false_type
{
static constexpr bool value = false;
};
constexpr int min(int a, int b) { return a < b ? a : b; }
constexpr int max(int a, int b) { return a < b ? b : a; }
template <int D>
struct priority_tag : priority_tag<D - 1>
{
};
template <>
struct priority_tag<0>
{
};
// see https://stackoverflow.com/questions/44395169/why-is-sfinae-on-if-constexpr-not-allowed
template <template <class...> class, class, class...>
struct can_apply : std::false_type
{
};
template <template <class...> class Z, class... Ts>
struct can_apply<Z, std::void_t<Z<Ts...>>, Ts...> : std::true_type
{
};
} // namespace detail
template <template <class...> class Z, class... Ts>
constexpr bool can_apply = detail::can_apply<Z, void, Ts...>::value;
template <class...>
constexpr bool always_false = false;
template <class...>
using void_t = void;
template <class T>
struct dont_deduce_t
{
using type = T;
};
template <class T>
using dont_deduce = typename dont_deduce_t<T>::type;
template <class A, class B>
struct pair
{
A first;
B second;
template <class C, class D>
constexpr bool operator==(pair<C, D> const& rhs) const noexcept
{
return first == rhs.first && second == rhs.second;
}
template <class C, class D>
constexpr bool operator!=(pair<C, D> const& rhs) const noexcept
{
return first != rhs.first || second != rhs.second;
}
};
template <class A, class B>
pair(A const&, B const&)->pair<A, B>;
template <class I, class A, class B>
constexpr void introspect(I&& i, pair<A, B>& p)
{
i(p.first, "first");
i(p.second, "second");
}
template <class A, class B, class C>
struct triple
{
A first;
B second;
C third;
};
template <bool, class T = void>
struct enable_if_t
{
};
template <class T>
struct enable_if_t<true, T>
{
using type = T;
};
template <bool cond, class T = void>
using enable_if = typename enable_if_t<cond, T>::type;
template <class A, class B>
struct is_same_t
{
static constexpr bool value = false;
};
template <class A>
struct is_same_t<A, A>
{
static constexpr bool value = true;
};
template <class A, class B>
static constexpr bool is_same = is_same_t<A, B>::value;
template <class T>
struct remove_reference_t
{
using type = T;
};
template <class T>
struct remove_reference_t<T&>
{
using type = T;
};
template <class T>
struct remove_reference_t<T&&>
{
using type = T;
};
template <class T>
using remove_reference = typename remove_reference_t<T>::type;
template <class T>
struct remove_const_t
{
using type = T;
};
template <class T>
struct remove_const_t<const T>
{
using type = T;
};
template <class T>
using remove_const = typename remove_const_t<T>::type;
template <class T>
using remove_const_ref = remove_const<remove_reference<T>>;
template <class T>
constexpr T&& forward(remove_reference<T>& t) noexcept
{
return static_cast<T&&>(t);
}
template <class T>
constexpr T&& forward(remove_reference<T>&& t) noexcept
{
return static_cast<T&&>(t);
}
template <typename T>
constexpr remove_reference<T>&& move(T&& t) noexcept
{
return static_cast<remove_reference<T>&&>(t);
}
template <class To, class From>
To bit_cast(From f)
{
static_assert(sizeof(From) == sizeof(To), "can only bitcast between same-size types");
// NOTE: std::memcpy includes an std header which we want to avoid
union {
From from;
To to;
} u;
u.from = f;
return u.to;
}
template <class A, class B>
struct same_or_t
{
using type = A;
};
template <class B>
struct same_or_t<void, B>
{
using type = B;
};
// returns A == void ? A : B
template <class A, class B>
using same_or = typename same_or_t<A, B>::type;
template <bool B, class T, class F>
struct conditional_type_t
{
using type = T;
};
template <class T, class F>
struct conditional_type_t<false, T, F>
{
using type = F;
};
template <bool B, class T, class F>
using conditional_type = typename conditional_type_t<B, T, F>::type;
namespace detail
{
template <class Container, class ElementT>
auto container_test(Container* c) -> decltype(static_cast<ElementT*>(c->data()), static_cast<decltype(sizeof(0))>(c->size()), 0);
template <class Container, class ElementT>
char container_test(...);
template <class Container, class ElementT, class = void>
struct is_range_t : false_type
{
};
template <class ElementT, size_t N>
struct is_range_t<ElementT[N], ElementT> : true_type
{
};
template <class ElementT, size_t N>
struct is_range_t<ElementT[N], ElementT const> : true_type
{
};
template <class ElementT, size_t N>
struct is_range_t<ElementT (&)[N], ElementT> : true_type
{
};
template <class ElementT, size_t N>
struct is_range_t<ElementT (&)[N], ElementT const> : true_type
{
};
template <class Container, class ElementT>
struct is_range_t<Container,
ElementT,
std::void_t< //
decltype(static_cast<ElementT&>(*std::declval<Container>().begin())), //
decltype(std::declval<Container>().end()) //
>> : std::true_type
{
};
template <class Container, class = void>
struct is_any_range_t : false_type
{
};
template <class ElementT, size_t N>
struct is_any_range_t<ElementT[N]> : true_type
{
};
template <class ElementT, size_t N>
struct is_any_range_t<ElementT (&)[N]> : true_type
{
};
template <class Container>
struct is_any_range_t<Container,
std::void_t< //
decltype(std::declval<Container>().begin()), //
decltype(std::declval<Container>().end()) //
>> : std::true_type
{
};
}
template <class Container, class ElementT>
static constexpr bool is_container = sizeof(detail::container_test<Container, ElementT>(nullptr)) == sizeof(int);
template <class Container, class ElementT>
static constexpr bool is_range = detail::is_range_t<Container, ElementT>::value;
template <class Container>
static constexpr bool is_any_range = detail::is_any_range_t<Container>::value;
template <class C>
constexpr auto begin(C& c) -> decltype(c.begin())
{
return c.begin();
}
template <class C>
constexpr auto end(C& c) -> decltype(c.end())
{
return c.end();
}
template <class C>
constexpr auto begin(C const& c) -> decltype(c.begin())
{
return c.begin();
}
template <class C>
constexpr auto end(C const& c) -> decltype(c.end())
{
return c.end();
}
template <class T, size_t N>
constexpr T* begin(T (&array)[N])
{
return array;
}
template <class T, size_t N>
constexpr T* end(T (&array)[N])
{
return array + N;
}
template <class ContainerT>
constexpr auto container_size(ContainerT&& c) -> decltype(c.size())
{
return c.size();
}
template <class T, size_t N>
constexpr size_t container_size(T (&)[N])
{
return N;
}
template <class Range>
struct element_type_t
{
static Range make_range();
using type = remove_reference<decltype(*tg::begin(make_range()))>;
};
template <class T, size_t N>
struct element_type_t<T[N]>
{
using type = T;
};
template <class Range>
using element_type = typename element_type_t<Range>::type;
struct identity_fun
{
template <class T>
constexpr T&& operator()(T&& v) const noexcept
{
return forward<T>(v);
}
};
namespace detail
{
// in detail ns to avoid ambiguity clash with std::swap
template <class T>
void swap(T& a, T& b)
{
T tmp = static_cast<T&&>(a);
a = static_cast<T&&>(b);
b = static_cast<T&&>(tmp);
}
template <u64 N, u64 Alignment>
auto helper_size_t()
{
if constexpr (N < (1 << 8) && Alignment <= 1)
return u8{};
else if constexpr (N < (1 << 16) && Alignment <= 2)
return u16{};
else if constexpr (N < (1uLL << 32) && Alignment <= 4)
return u32{};
else
return u64{};
}
template <u64 N, u64 Alignment>
using size_t_for = decltype(helper_size_t<N, Alignment>());
/// Indirection workaround for a current MSVC compiler bug (19.23)
/// without indirection: https://godbolt.org/z/iQ19yj
/// with indirection: https://godbolt.org/z/6MoWE4
/// Bug report: https://developercommunity.visualstudio.com/content/problem/800899/false-positive-for-c2975-on-alias-template-fixed-w.html
template <class T, size_t N>
using compact_size_t_typed = size_t_for<N, alignof(T)>;
// adapted from https://stackoverflow.com/questions/23999573/convert-a-number-to-a-string-literal-with-constexpr
template <unsigned... digits>
struct digits_to_string_literal
{
static const char value[];
};
template <unsigned... digits>
constexpr char digits_to_string_literal<digits...>::value[] = {('0' + digits)..., 0};
template <unsigned rem, unsigned... digits>
struct number_to_string_literal_t : number_to_string_literal_t<rem / 10, rem % 10, digits...>
{
};
template <unsigned... digits>
struct number_to_string_literal_t<0, digits...> : digits_to_string_literal<digits...>
{
};
template <unsigned num>
inline constexpr char const* number_to_string_literal = number_to_string_literal_t<num>::value;
}
} // namespace tg
|
<gh_stars>1-10
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.u2B06 = void 0;
var u2B06 = {
"viewBox": "0 0 2600 2760.837",
"children": [{
"name": "path",
"attribs": {
"d": "M1943 1461h-406v818h-479v-818H656l643-877z"
},
"children": []
}]
};
exports.u2B06 = u2B06;
|
def is_valid_mul_table(matrix):
num_list = set()
for row in matrix:
for item in row:
num_list.add(item)
length = int(len(matrix) ** 0.5)
if len(num_list) != (length ** 2):
return False
for num in range(1,length ** 2 + 1):
if num not in num_list:
return False
return True
|
<reponame>XxSEGxX/pro-core
package me.atog.procore.api;
import org.bukkit.plugin.java.JavaPlugin;
public interface ProPlugin extends JavaPlugin {
}
|
(function ($) {
var headUploader;
headUploader = WebUploader.create({
swf: 'static/webuploader/Uploader.swf',
server: '/upload',
pick: '#head-upload',
fileNumLimit: 1,
fileSizeLimit: 10 * 1024 * 1024, // 10 M
fileSingleSizeLimit:10* 1024 *1024,
auto: true,
duplicate:true,
accept: {
title: 'Images',
extensions: 'jpg,png,jpeg,gif,bmp',
mimeTypes: 'image/*'
}
});
$('#head-upload').click(function () {
headUploader.reset();
});
headUploader.on('beforeFileQueued', function (file) {
var uploadType = $(file.source._refer).attr("uploadType"); //上传按钮(更新图片/更新文件)imageFile:图片 otherFile:其他
var fileTypeImg = file.type.indexOf("image/");//文件类型:-1 : 非图片 其他 : 图片
if(uploadType == "imageFile" && fileTypeImg == -1){
alert("请上传图片文件!");
return false;
}
});
//
headUploader.on('fileQueued', function (file) {
});
headUploader.on('uploadProgress', function (file, percentage) {
});
// 文件上传成功,给item添加成功class, 用样式标记上传成功。
headUploader.on('uploadSuccess', function (file) {
});
// 文件上传失败,显示上传出错。
headUploader.on('error', function (type) {
if (type == "Q_TYPE_DENIED") {
alert("请上传JPG、PNG、GIF、BMP格式文件");
} else if (type == "Q_EXCEED_SIZE_LIMIT") {
alert("图片大小不能超过10M哦!");
}else {
alert("上传出错!请检查后重新上传!");
}
});
headUploader.on('uploadAccept', function (object, ret) {
$("#headImg").attr("src",ret.data);
});
// 完成上传完了,成功或者失败,先删除进度条。
headUploader.on('uploadComplete', function (file) {
});
})(jQuery);
|
/**
* @copyright Copyright 2021 <NAME> <<EMAIL>>
* @license MIT
*/
import assert from 'assert';
import deepFreeze from 'deep-freeze';
import RemovePathsWithServersTransformer from '../remove-paths-with-servers.js';
describe('RemovePathsWithServersTransformer', () => {
it('removes path items with servers', () => {
assert.deepStrictEqual(
new RemovePathsWithServersTransformer().transformOpenApi(deepFreeze({
openapi: '3.0.3',
info: {
title: 'Title',
version: '1.0',
},
paths: {
'/a': {
servers: [
{ url: 'https://example.com' },
],
get: {
responses: {
204: {
description: 'Example response',
},
},
},
},
'/b': {
get: {
responses: {
204: {
description: 'Example response',
},
},
},
},
},
})),
{
openapi: '3.0.3',
info: {
title: 'Title',
version: '1.0',
},
paths: {
'/b': {
get: {
responses: {
204: {
description: 'Example response',
},
},
},
},
},
},
);
});
it('removes path items with empty servers', () => {
assert.deepStrictEqual(
new RemovePathsWithServersTransformer().transformOpenApi(deepFreeze({
openapi: '3.0.3',
info: {
title: 'Title',
version: '1.0',
},
servers: [
{ url: 'https://example.com' },
],
paths: {
'/a': {
servers: [],
get: {
responses: {
204: {
description: 'Example response',
},
},
},
},
'/b': {
get: {
responses: {
204: {
description: 'Example response',
},
},
},
},
},
})),
{
openapi: '3.0.3',
info: {
title: 'Title',
version: '1.0',
},
servers: [
{ url: 'https://example.com' },
],
paths: {
'/b': {
get: {
responses: {
204: {
description: 'Example response',
},
},
},
},
},
},
);
});
});
|
<gh_stars>1-10
package imports.k8s;
/**
* PodList is a list of Pods.
*/
@javax.annotation.Generated(value = "jsii-pacmak/1.14.1 (build 828de8a)", date = "2020-11-30T16:28:28.041Z")
@software.amazon.jsii.Jsii(module = imports.k8s.$Module.class, fqn = "k8s.PodList")
public class PodList extends org.cdk8s.ApiObject {
protected PodList(final software.amazon.jsii.JsiiObjectRef objRef) {
super(objRef);
}
protected PodList(final software.amazon.jsii.JsiiObject.InitializationMode initializationMode) {
super(initializationMode);
}
/**
* Defines a "io.k8s.api.core.v1.PodList" API object.
* <p>
* @param scope the scope in which to define this object. This parameter is required.
* @param name a scope-local name for the object. This parameter is required.
* @param options configuration options. This parameter is required.
*/
public PodList(final @org.jetbrains.annotations.NotNull software.constructs.Construct scope, final @org.jetbrains.annotations.NotNull java.lang.String name, final @org.jetbrains.annotations.NotNull imports.k8s.PodListOptions options) {
super(software.amazon.jsii.JsiiObject.InitializationMode.JSII);
software.amazon.jsii.JsiiEngine.getInstance().createNewObject(this, new Object[] { java.util.Objects.requireNonNull(scope, "scope is required"), java.util.Objects.requireNonNull(name, "name is required"), java.util.Objects.requireNonNull(options, "options is required") });
}
/**
* A fluent builder for {@link imports.k8s.PodList}.
*/
public static final class Builder implements software.amazon.jsii.Builder<imports.k8s.PodList> {
/**
* @return a new instance of {@link Builder}.
* @param scope the scope in which to define this object. This parameter is required.
* @param name a scope-local name for the object. This parameter is required.
*/
public static Builder create(final software.constructs.Construct scope, final java.lang.String name) {
return new Builder(scope, name);
}
private final software.constructs.Construct scope;
private final java.lang.String name;
private final imports.k8s.PodListOptions.Builder options;
private Builder(final software.constructs.Construct scope, final java.lang.String name) {
this.scope = scope;
this.name = name;
this.options = new imports.k8s.PodListOptions.Builder();
}
/**
* List of pods.
* <p>
* More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
* <p>
* @return {@code this}
* @param items List of pods. This parameter is required.
*/
public Builder items(final java.util.List<? extends imports.k8s.Pod> items) {
this.options.items(items);
return this;
}
/**
* Standard list metadata.
* <p>
* More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
* <p>
* @return {@code this}
* @param metadata Standard list metadata. This parameter is required.
*/
public Builder metadata(final imports.k8s.ListMeta metadata) {
this.options.metadata(metadata);
return this;
}
/**
* @returns a newly built instance of {@link imports.k8s.PodList}.
*/
@Override
public imports.k8s.PodList build() {
return new imports.k8s.PodList(
this.scope,
this.name,
this.options.build()
);
}
}
}
|
#!/bin/sh -e
#
# Copyright (c) 2009-2017 Robert Nelson <robertcnelson@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
DIR=$PWD
git_bin=$(which git)
mkdir -p "${DIR}/deploy/"
patch_kernel () {
cd "${DIR}/KERNEL" || exit
export DIR
/bin/bash -e "${DIR}/patch.sh" || { ${git_bin} add . ; exit 1 ; }
if [ ! -f "${DIR}/.yakbuild" ] ; then
if [ ! "${RUN_BISECT}" ] ; then
${git_bin} add --all
${git_bin} commit --allow-empty -a -m "${KERNEL_TAG}${BUILD} patchset"
fi
fi
cd "${DIR}/" || exit
}
copy_defconfig () {
cd "${DIR}/KERNEL" || exit
make ARCH=${KERNEL_ARCH} CROSS_COMPILE="${CC}" distclean
if [ ! -f "${DIR}/.yakbuild" ] ; then
make ARCH=${KERNEL_ARCH} CROSS_COMPILE="${CC}" "${config}"
cp -v .config "${DIR}/patches/ref_${config}"
cp -v "${DIR}/patches/defconfig" .config
else
make ARCH=${KERNEL_ARCH} CROSS_COMPILE="${CC}" rcn-ee_defconfig
fi
cd "${DIR}/" || exit
}
make_menuconfig () {
cd "${DIR}/KERNEL" || exit
make ARCH=${KERNEL_ARCH} CROSS_COMPILE="${CC}" menuconfig
if [ ! -f "${DIR}/.yakbuild" ] ; then
cp -v .config "${DIR}/patches/defconfig"
fi
cd "${DIR}/" || exit
}
make_deb () {
cd "${DIR}/KERNEL" || exit
deb_distro=$(lsb_release -cs | sed 's/\//_/g')
if [ "x${deb_distro}" = "xn_a" ] ; then
deb_distro="unstable"
fi
build_opts="-j${CORES}"
build_opts="${build_opts} ARCH=${KERNEL_ARCH}"
build_opts="${build_opts} KBUILD_DEBARCH=${DEBARCH}"
build_opts="${build_opts} LOCALVERSION=${BUILD}"
build_opts="${build_opts} KDEB_CHANGELOG_DIST=${deb_distro}"
build_opts="${build_opts} KDEB_PKGVERSION=1${DISTRO}"
#Just use "linux-upstream"...
#https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/scripts/package/builddeb?id=3716001bcb7f5822382ac1f2f54226b87312cc6b
build_opts="${build_opts} KDEB_SOURCENAME=linux-upstream"
echo "-----------------------------"
if grep -q bindeb-pkg "${DIR}/KERNEL/scripts/package/Makefile"; then
echo "make ${build_opts} CROSS_COMPILE="${CC}" bindeb-pkg"
echo "-----------------------------"
fakeroot make ${build_opts} CROSS_COMPILE="${CC}" bindeb-pkg
else
echo "make ${build_opts} CROSS_COMPILE="${CC}" deb-pkg"
echo "-----------------------------"
fakeroot make ${build_opts} CROSS_COMPILE="${CC}" deb-pkg
fi
mv "${DIR}"/*.deb "${DIR}/deploy/" || true
mv "${DIR}"/*.debian.tar.gz "${DIR}/deploy/" || true
mv "${DIR}"/*.dsc "${DIR}/deploy/" || true
mv "${DIR}"/*.changes "${DIR}/deploy/" || true
mv "${DIR}"/*.orig.tar.gz "${DIR}/deploy/" || true
KERNEL_UTS=$(cat "${DIR}/KERNEL/include/generated/utsrelease.h" | awk '{print $3}' | sed 's/\"//g' )
cd "${DIR}/" || exit
}
if [ -f "${DIR}/.yakbuild" ] ; then
if [ -f "${DIR}/recipe.sh.sample" ] ; then
if [ ! -f "${DIR}/recipe.sh" ] ; then
cp -v "${DIR}/recipe.sh.sample" "${DIR}/recipe.sh"
fi
fi
fi
/bin/sh -e "${DIR}/tools/host_det.sh" || { exit 1 ; }
if [ ! -f "${DIR}/system.sh" ] ; then
cp -v "${DIR}/system.sh.sample" "${DIR}/system.sh"
fi
if [ -f "${DIR}/branches.list" ] ; then
echo "-----------------------------"
echo "Please checkout one of the active branches:"
echo "-----------------------------"
cat "${DIR}/branches.list" | grep -v INACTIVE
echo "-----------------------------"
exit
fi
if [ -f "${DIR}/branch.expired" ] ; then
echo "-----------------------------"
echo "Support for this branch has expired."
unset response
echo -n "Do you wish to bypass this warning and support your self: (y/n)? "
read response
if [ "x${response}" != "xy" ] ; then
exit
fi
echo "-----------------------------"
fi
unset CC
unset LINUX_GIT
. "${DIR}/system.sh"
if [ -f "${DIR}/.yakbuild" ] ; then
. "${DIR}/recipe.sh"
fi
/bin/sh -e "${DIR}/scripts/gcc.sh" || { exit 1 ; }
. "${DIR}/.CC"
echo "CROSS_COMPILE=${CC}"
if [ -f /usr/bin/ccache ] ; then
echo "ccache [enabled]"
CC="ccache ${CC}"
fi
. "${DIR}/version.sh"
export LINUX_GIT
if [ ! "${CORES}" ] ; then
CORES=$(getconf _NPROCESSORS_ONLN)
fi
#unset FULL_REBUILD
FULL_REBUILD=1
if [ "${FULL_REBUILD}" ] ; then
/bin/sh -e "${DIR}/scripts/git.sh" || { exit 1 ; }
if [ "${RUN_BISECT}" ] ; then
/bin/sh -e "${DIR}/scripts/bisect.sh" || { exit 1 ; }
fi
patch_kernel
copy_defconfig
fi
if [ ! "${AUTO_BUILD}" ] ; then
make_menuconfig
fi
if [ -f "${DIR}/.yakbuild" ] ; then
BUILD=$(echo ${kernel_tag} | sed 's/[^-]*//'|| true)
fi
make_deb
echo "-----------------------------"
echo "Script Complete"
echo "${KERNEL_UTS}" > kernel_version
echo "-----------------------------"
|
package io.dronefleet.mavlink.uavionix;
import io.dronefleet.mavlink.annotations.MavlinkEntryInfo;
import io.dronefleet.mavlink.annotations.MavlinkEnum;
/**
* Emergency status encoding
*/
@MavlinkEnum
public enum UavionixAdsbEmergencyStatus {
/**
*
*/
@MavlinkEntryInfo(0)
UAVIONIX_ADSB_OUT_NO_EMERGENCY,
/**
*
*/
@MavlinkEntryInfo(1)
UAVIONIX_ADSB_OUT_GENERAL_EMERGENCY,
/**
*
*/
@MavlinkEntryInfo(2)
UAVIONIX_ADSB_OUT_LIFEGUARD_EMERGENCY,
/**
*
*/
@MavlinkEntryInfo(3)
UAVIONIX_ADSB_OUT_MINIMUM_FUEL_EMERGENCY,
/**
*
*/
@MavlinkEntryInfo(4)
UAVIONIX_ADSB_OUT_NO_COMM_EMERGENCY,
/**
*
*/
@MavlinkEntryInfo(5)
UAVIONIX_ADSB_OUT_UNLAWFUL_INTERFERANCE_EMERGENCY,
/**
*
*/
@MavlinkEntryInfo(6)
UAVIONIX_ADSB_OUT_DOWNED_AIRCRAFT_EMERGENCY,
/**
*
*/
@MavlinkEntryInfo(7)
UAVIONIX_ADSB_OUT_RESERVED
}
|
import { List } from './List';
/**
* Transform a [[List]] into an [[Union]]
* @param L to transform
* @returns [[Any]]
* @example
* ```ts
* ```
*/
export declare type UnionOf<L extends List> = L[number];
|
# Import necessary libraries
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
# Load the data set
df = pd.read_csv('sentiment.csv')
# Get the features and target values
X = df['text']
y = df['sentiment']
# Create the feature vector
vectorizer = CountVectorizer(ngram_range=(1, 3))
X = vectorizer.fit_transform(X)
# Split the data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# Create the model
model = MultinomialNB()
# Compile and fit the model
model.fit(X_train, y_train)
# Make predictions on the test set
y_pred = model.predict(X_test)
|
<filename>node_modules/botframework-connector/lib/auth/appCredentials.d.ts
/**
* @module botframework-connector
*/
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
*/
import * as msrest from '@azure/ms-rest-js';
import * as adal from 'adal-node';
/**
* General AppCredentials auth implementation and cache. Supports any ADAL client credential flow.
* Subclasses can implement refreshToken to acquire the token.
*/
export declare abstract class AppCredentials implements msrest.ServiceClientCredentials {
private static readonly cache;
appId: string;
private _oAuthEndpoint;
private _oAuthScope;
private _tenant;
tokenCacheKey: string;
protected refreshingToken: Promise<adal.TokenResponse> | null;
protected authenticationContext: adal.AuthenticationContext;
/**
* Initializes a new instance of the [AppCredentials](xref:botframework-connector.AppCredentials) class.
* @param appId The App ID.
* @param channelAuthTenant Optional. The oauth token tenant.
* @param oAuthScope The scope for the token.
*/
constructor(appId: string, channelAuthTenant?: string, oAuthScope?: string);
/**
* Gets tenant to be used for channel authentication.
*/
private get tenant();
/**
* Sets tenant to be used for channel authentication.
*/
private set tenant(value);
/**
* Gets the OAuth scope to use.
*/
get oAuthScope(): string;
/**
* Sets the OAuth scope to use.
*/
set oAuthScope(value: string);
/**
* Gets the OAuth endpoint to use.
*/
get oAuthEndpoint(): string;
/**
* Sets the OAuth endpoint to use.
*/
set oAuthEndpoint(value: string);
/**
* Adds the host of service url to trusted hosts.
* If expiration time is not provided, the expiration date will be current (utc) date + 1 day.
*
* @deprecated
*
* @param {string} serviceUrl The service url
* @param {Date} expiration? The expiration date after which this service url is not trusted anymore
*/
static trustServiceUrl(serviceUrl: string, expiration?: Date): void;
/**
* Checks if the service url is for a trusted host or not.
*
* @deprecated
*
* @param {string} serviceUrl The service url
* @returns {boolean} True if the host of the service url is trusted; False otherwise.
*/
static isTrustedServiceUrl(serviceUrl: string): boolean;
/**
* Apply the credentials to the HTTP request.
* @param webResource The WebResource HTTP request.
* @returns A Promise representing the asynchronous operation.
*/
signRequest(webResource: msrest.WebResource): Promise<msrest.WebResource>;
/**
* Gets an OAuth access token.
* @param forceRefresh True to force a refresh of the token; or false to get
* a cached token if it exists.
* @returns A Promise that represents the work queued to execute.
* @remarks If the promise is successful, the result contains the access token string.
*/
getToken(forceRefresh?: boolean): Promise<string>;
protected abstract refreshToken(): Promise<adal.TokenResponse>;
/**
* @private
*/
private shouldSetToken;
}
//# sourceMappingURL=appCredentials.d.ts.map
|
#!/bin/bash
# Check Privilege
# Make sure only root can run our script
echo "正在检查权限 Checking Privilege ..."
if [ "$(id -u)" != "0" ]; then
echo "失败: 请用Root身份运行此脚本. 是不是忘了sudo?" 1>&2
echo "Fail: This script must be run as root. perhaps forget sudo?" 1>&2
exit 1
fi
echo "成功 Success"
echo ""
echo "正在将自启动脚本复制到/etc/systemd/system/ss-bash.service"
echo "Creating systemd service /etc/systemd/system/ss-bash.service"
cat << EOF > /etc/systemd/system/ss-bash.service
[Unit]
Description=ss-bash
Documentation=https://github.com/hellofwy/ss-bash/wiki
After=network.target
[Service]
User=root
Type=forking
WorkingDirectory=$(pwd)
ExecStart=$(pwd)/ssadmin.sh start
ExecStop=$(pwd)/ssadmin.sh stop
[Install]
WantedBy=multi-user.target
EOF
if [ $? -eq 0 ]; then
echo "成功 Success"
echo ""
echo "请用以下命令运行Shadowssocks"
echo "Use the following commands to start Shadowsocks"
echo "sudo systemctl daemon-reload"
echo "sudo systemctl start ss-bash.service"
echo ""
echo "请用以下命令启动Shadowssocks自启动服务"
echo "Use the following command to run Shadowsocks as service"
echo "sudo systemctl enable ss-bash.service"
fi
|
<filename>dist/utils/is-object.js
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var isObject = function isObject(x) {
return x !== null && x !== undefined && x.constructor && x.constructor.name === 'Object';
};
exports.default = isObject;
|
<gh_stars>1-10
package trips
import (
"errors"
"strings"
"github.com/bradpurchase/grocerytime-backend/internal/pkg/db"
"github.com/bradpurchase/grocerytime-backend/internal/pkg/db/models"
"github.com/bradpurchase/grocerytime-backend/internal/pkg/stores"
uuid "github.com/satori/go.uuid"
"gorm.io/gorm"
)
// AddItemsToStore adds an array of items to a store for a user. It creates
// the store for the user if it doesn't already exist.
func AddItemsToStore(userID uuid.UUID, args map[string]interface{}) (addedItems []*models.Item, err error) {
var store models.Store
storeName, val := args["storeName"]
if val && args["storeName"] != nil {
store, err = FindOrCreateStore(userID, storeName.(string))
if err != nil {
return addedItems, errors.New("could not find or create store")
}
} else {
// TODO: what if there's no default store set? handle this case...
// could fall back to the user's first store added as a hail mary
store, err = FindDefaultStore(userID)
if err != nil {
return nil, errors.New("could not retrieve default store")
}
}
// Fetch the current trip for this store
trip, err := RetrieveCurrentStoreTrip(store.ID)
if err != nil {
return addedItems, errors.New("could not find current trip in store")
}
var errorStrings []string
itemNames := args["items"].([]interface{})
for i := range itemNames {
itemName := itemNames[i].(string)
args := map[string]interface{}{
"tripId": trip.ID,
"name": itemName,
"quantity": 1,
}
item, err := AddItem(userID, args)
if err != nil {
errorStrings = append(errorStrings, err.Error())
}
addedItems = append(addedItems, item)
}
if len(errorStrings) > 0 {
return addedItems, errors.New(strings.Join(errorStrings, "\n"))
}
return addedItems, nil
}
// FindOrCreateStore finds or creates a store for a userID by name
func FindOrCreateStore(userID uuid.UUID, name string) (storeRecord models.Store, err error) {
store := models.Store{}
storeQuery := db.Manager.
Select("stores.id").
Joins("INNER JOIN store_users ON store_users.store_id = stores.id").
Where("store_users.user_id = ?", userID).
Where("stores.name = ?", name).
First(&store).
Error
if err := storeQuery; errors.Is(err, gorm.ErrRecordNotFound) {
newStore, err := stores.CreateStore(userID, name)
if err != nil {
return storeRecord, errors.New("could not find or create store")
}
return newStore, nil
}
return store, nil
}
// FindDefaultStore retrieves the ID of the store that is set as the default for the userID provided
func FindDefaultStore(userID uuid.UUID) (store models.Store, err error) {
query := db.Manager.
Select("stores.id").
Joins("INNER JOIN store_users ON store_users.store_id = stores.id").
Joins("INNER JOIN store_user_preferences ON store_user_preferences.store_user_id = store_users.id").
Where("store_users.user_id = ?", userID).
Where("store_user_preferences.default_store = ?", true).
Last(&store).
Error
if err := query; err != nil {
return store, err
}
return store, nil
}
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
# Load the dataset
data = pd.read_csv('people.csv')
# Split the features and labels
X = data[['Name']]
y = data['Age'].values
# Create and train the model
model = LinearRegression().fit(X, y)
# Make predictions with the model
predictions = model.predict(X)
print(predictions)
|
#!/usr/bin/env bash
echo "[+] Compiling"
cd src && make && cd -
echo "[+] Going to flasher mod"
python3 go_flasher.py
sleep 3
avrdude -p m8515 -c avrisp2 -P /dev/ttyACM0 -U flash:w:`ls src/build/aes*.hex`:i -U eeprom:w:`ls src/build/eedata*.hex`:i
echo "[+] Please restart your LEIA board to go back to nominal mode!"
echo " Press enter when OK"
read aa
|
#!/bin/bash
if [ -z "$SERVERLESSBENCH_HOME" ]; then
echo "$0: ERROR: SERVERLESSBENCH_HOME environment variable not set"
exit
fi
source $SERVERLESSBENCH_HOME/local.env
couchdb_url=http://$COUCHDB_USERNAME:$COUCHDB_PASSWORD@$COUCHDB_IP:$COUCHDB_PORT
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
SRC_DIR=$SCRIPTS_DIR/../src
# frontend
cd $SRC_DIR/frontend
# install dependencies -- making this script need sudo to execute
npm install
# package handler with dependencies
zip -rq index.zip *
# create/update action
wsk -i action update alexa-frontend index.zip -a provide-api-key true --kind nodejs:10
# interact
cd $SRC_DIR/interact
# install dependencies -- making this script need sudo to execute
npm install
# package handler with dependencies
zip -rq index.zip * ../infra
# create/update action
wsk -i action update alexa-interact index.zip -a provide-api-key true --kind nodejs:10
# fact
cd $SRC_DIR/fact
# install dependencies -- making this script need sudo to execute
npm install
# package handler with dependencies
zip -rq index.zip index.js handler.js node_modules ../infra/language.js
# create/update action
wsk -i action update alexa-fact index.zip --kind nodejs:10
# reminder
cd $SRC_DIR/reminder
# install dependencies -- making this script need sudo to execute
npm install
# package handler with dependencies
zip -rq index.zip index.js handler.js node_modules ../infra/language.js
# create/update action
wsk -i action update alexa-reminder index.zip --kind nodejs:10 \
--param COUCHDB_URL $couchdb_url \
--param DATABASE $ALEXA_REMINDER_COUCHDB_DATABASE
# smarthome
cd $SRC_DIR/smarthome
# create actions for devices
$SCRIPTS_DIR/action_create_devices.sh
# install dependencies for smarthome entry
rm -rf node_modules
cp package-smarthome.json package.json
npm install
# smarthome entry
cp smarthome-index.js index.js
# package handler with dependencies
zip -rq smarthome.zip index.js smarthome-handler.js node_modules ../infra ./en-US.json
# create/update action
wsk -i action update alexa-smarthome smarthome.zip -a provide-api-key true --kind nodejs:10 \
--param SMARTHOME_PASSWORD "$ALEXA_SMARTHOME_PASSWORD"
|
#!/usr/bin/env bash
conda activate TextRecognitionDataGenerator
source env_setup.sh
round() {
printf "%.${2}f" "${1}"
}
INPUT_PATH='./texts/number+symbol_texts.txt'
imgH=64
num_imgs=$((240*1))
# Train / Test ratio (8:2)
train_ratio=0.8
test_ratio=$(echo "1 - $train_ratio" | bc -l)
train_basic_cnt=$(round $(echo "$num_imgs * $train_ratio" | bc -l) 0)
test_basic_cnt=$(round $(echo "$num_imgs * $test_ratio" | bc -l) 0)
train_skew_cnt=$train_basic_cnt
test_skew_cnt=$test_basic_cnt
train_dist_cnt=0 # 스캔 문서에는 왜곡이 거의 없음
test_dist_cnt=0
train_blur_cnt=$train_basic_cnt
test_blur_cnt=$test_basic_cnt
train_gaussian_cnt=$train_basic_cnt
test_gaussian_cnt=$test_basic_cnt
train_custom_cnt=$train_basic_cnt
test_custom_cnt=$test_basic_cnt
train_total_cnt=$(($train_basic_cnt+$train_skew_cnt+$train_dist_cnt+$train_blur_cnt+$train_gaussian_cnt+$train_custom_cnt))
test_total_cnt=$(($test_basic_cnt+$test_skew_cnt+$test_dist_cnt+$test_blur_cnt+$test_gaussian_cnt+$test_custom_cnt))
total_cnt=$(($train_total_cnt+$test_total_cnt))
printf "[TRAIN] Train total images : %s\n" $train_total_cnt
printf "[TEST] Test total images : %s\n" $test_total_cnt
printf "[TOTAL] Total images : %s\n" $total_cnt
# [Basic images] -> (num_fonts(11) * num_text_sequences(4))
python run_custom.py \
--output_dir out/num+sym/train/basic --input_file $INPUT_PATH --language num+sym --count $train_basic_cnt \
--format $imgH --background 1 --name_format 2 --margins 0,0,0,0 --fit \
--font_dir ./fonts/num+sym/
printf "[TRAIN] Basic images generated : %s\n" $train_basic_cnt
python run_custom.py \
--output_dir out/num+sym/test/basic --input_file $INPUT_PATH --language num+sym --count $test_basic_cnt \
--format $imgH --background 1 --name_format 2 --margins 0,0,0,0 --fit \
--font_dir ./fonts/num+sym/
printf "[TEST] Basic images generated : %s\n" $test_basic_cnt
# [Skew images] -> (num_fonts(11) * num_text_sequences(4))
python run_custom.py \
--output_dir out/num+sym/train/skew --input_file $INPUT_PATH --language num+sym --count $train_skew_cnt \
--format $imgH --skew_angle 2 --random_skew --background 1 \
--name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
printf "[TRAIN] Skew images generated : %s\n" $train_skew_cnt
python run_custom.py \
--output_dir out/num+sym/test/skew --input_file $INPUT_PATH --language num+sym --count $test_skew_cnt \
--format $imgH --skew_angle 2 --random_skew --background 1 \
--name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
printf "[TEST] Skew images generated : %s\n" $test_skew_cnt
# ## 스캔 문서에는 왜곡이 거의 없음
## [Dist.] -> (num_fonts(11) * num_text_sequences(4))
# python run_custom.py \
# --output_dir out/num+sym/train/dist --input_file $INPUT_PATH --language num+sym --count $train_dist_cnt \
# --format $imgH --distorsion 3 --distorsion_orientation 2 --background 1 \
# --name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
# printf "[TRAIN] Dist. images generated : %s\n" $train_dist_cnt
# python run_custom.py \
# --output_dir out/num+sym/test/dist --input_file $INPUT_PATH --language num+sym --count $test_dist_cnt \
# --format $imgH --distorsion 3 --distorsion_orientation 2 --background 1 \
# --name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
# printf "[TEST] Dist. images generated : %s\n" $test_dist_cnt
## [Blur Images] -> (num_fonts(11) * num_text_sequences(4))
python run_custom.py \
--output_dir out/num+sym/train/blur --input_file $INPUT_PATH --language num+sym --count $train_blur_cnt \
--format $imgH --blur 1 --random_blur --background 1 \
--name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
printf "[TRAIN] Blur images generated : %s\n" $train_blur_cnt
python run_custom.py \
--output_dir out/num+sym/test/blur --input_file $INPUT_PATH --language num+sym --count $test_blur_cnt \
--format $imgH --blur 1 --random_blur --background 1 \
--name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
printf "[TEST] Blur images generated : %s\n" $test_blur_cnt
# [Gaussian Images] -> (num_fonts(11) * num_text_sequences(4))
# Gaussian noise
python run_custom.py \
--output_dir out/num+sym/train/back --input_file $INPUT_PATH --language num+sym --count $train_gaussian_cnt \
--format $imgH --background 0 \
--name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
printf "[TRAIN] Gaussian noise images generated : %s\n" $train_gaussian_cnt
python run_custom.py \
--output_dir out/num+sym/test/back --input_file $INPUT_PATH --language num+sym --count $test_gaussian_cnt \
--format $imgH --background 0 \
--name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
printf "[TEST] Gaussian noise images generated : %s\n" $test_gaussian_cnt
# [Custom background images] -> (num_fonts(11) * num_text_sequences(4))
python run_custom.py \
--output_dir out/num+sym/train/back --input_file $INPUT_PATH --language num+sym --count $train_custom_cnt \
--format $imgH --background 3 \
--name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
printf "[TRAIN] Custom background images generated : %s\n" $train_custom_cnt
python run_custom.py \
--output_dir out/num+sym/test/back --input_file $INPUT_PATH --language num+sym --count $test_custom_cnt \
--format $imgH --background 3 \
--name_format 2 --margins 0,0,0,0 --fit --font_dir ./fonts/num+sym/
printf "[TEST] Custom background images generated : %s\n" $test_custom_cnt
printf "[TRAIN] Total images generated : %s\n" $train_total_cnt
printf "[TEST] Total images generated : %s\n" $test_total_cnt
printf "[TOTAL] Total images generated : %s\n" $total_cnt
|
import Queue from './queue.js'
import defaults from './defaults.js'
let queue = new Queue(function () {
return defaults.concurrency
})
export default function wxRequest(options) {
let promise = new Promise(function (resolve, reject) {
let request = null, cancel = false
if (options.cancelToken) {
options.cancelToken.promise.catch(function (Reason) {
reject(Reason)
cancel = true
if (request) request.abort()
})
}
queue.push(function (res, rej) {
if (cancel) return res()
request = wx.request({
...options,
success(result) {
res()
resolve(result)
},
fail(err) {
res()
reject(err)
}
})
})
})
if (options.cancelToken) promise = Promise.race([promise, options.cancelToken.promise])
return promise
}
|
export const fillAndStrokeText = text => (ctx, offset) => {
ctx.font = `${text.fontStyle} ${text.fontWeight} ${text.fontSize}px ${text.fontFamily}`;
ctx.textBaseline = text.baseline;
ctx.textAlign = text.align;
const { textContent } = text.cropAndMeasure()
const x = text.x + offset.x;
const y = text.y + offset.y;
if (text.color) {
ctx.fillStyle = text.color;
ctx.fillText(textContent, x - text.borderWidth / 2, y - text.borderWidth / 2);
}
if (text.borderColor && text.borderWidth) {
ctx.strokeStyle = text.borderColor;
ctx.lineWidth = text.borderWidth;
ctx.strokeText(textContent, x - text.borderWidth / 2, y - text.borderWidth / 2);
}
};
export const traceTextBox = text => (ctx, offset) => {
const { left, top, right, bottom } = text.getBoundingBox(offset);
ctx.beginPath();
ctx.rect(
left - text.borderWidth / 2,
top - text.borderWidth / 2,
right - left,
bottom - top,
);
return true;
}
|
<reponame>hexbee-net/parquet-go<filename>file-reader.go
package parquet
import (
"bytes"
"encoding/binary"
"io"
"strings"
"github.com/hexbee-net/errors"
"github.com/hexbee-net/parquet/compression"
"github.com/hexbee-net/parquet/layout"
"github.com/hexbee-net/parquet/parquet"
"github.com/hexbee-net/parquet/schema"
"github.com/hexbee-net/parquet/source"
)
const (
magic = "PAR1"
magicLen = len(magic)
footerLenSize = 4
footerLen = int64(footerLenSize + magicLen)
)
// FileReader is used to read data from a parquet file.
// Always use NewFileReader to create such an object.
type FileReader struct {
schema.Reader
meta *parquet.FileMetaData
reader source.Reader
chunkReader *layout.ChunkReader
rowGroupPosition int
currentRecord int64
skipRowGroup bool
}
// NewFileReader creates a new FileReader.
// You can limit the columns that are read by providing the names of
// the specific columns to read using dotted notation.
// If no columns are provided, then all columns are read.
func NewFileReader(r source.Reader, columns ...string) (*FileReader, error) {
meta, err := readFileMetaData(r)
if err != nil {
return nil, errors.Wrap(err, "failed to read file meta data")
}
s, err := readFileSchema(meta)
if err != nil {
return nil, errors.Wrap(err, "creating schema failed")
}
s.SetSelectedColumns(columns...)
// Reset the reader to the beginning of the file
if _, err := r.Seek(int64(magicLen), io.SeekStart); err != nil {
return nil, err
}
return &FileReader{
Reader: s,
meta: meta,
reader: r,
chunkReader: layout.NewChunkReader(defaultCompressors()),
}, nil
}
// CurrentRowGroup returns information about the current row group.
func (f *FileReader) CurrentRowGroup() *parquet.RowGroup {
if f == nil || f.meta == nil || f.meta.RowGroups == nil || f.rowGroupPosition-1 >= len(f.meta.RowGroups) {
return nil
}
return f.meta.RowGroups[f.rowGroupPosition-1]
}
// RowGroupCount returns the number of row groups in the parquet file.
func (f *FileReader) RowGroupCount() int {
return len(f.meta.RowGroups)
}
// NumRows returns the number of rows in the parquet file. This information is directly taken from
// the file's meta data.
func (f *FileReader) NumRows() int64 {
return f.meta.NumRows
}
// RowGroupNumRows returns the number of rows in the current RowGroup.
func (f *FileReader) RowGroupNumRows() (int64, error) {
if err := f.advanceIfNeeded(); err != nil {
return 0, err
}
return f.Reader.RowGroupNumRecords(), nil
}
// NextRow reads the next row from the parquet file. If required, it will load the next row group.
func (f *FileReader) NextRow() (map[string]interface{}, error) {
if err := f.advanceIfNeeded(); err != nil {
return nil, err
}
f.currentRecord++
return f.Reader.GetData()
}
// SkipRowGroup skips the currently loaded row group and advances to the next row group.
func (f *FileReader) SkipRowGroup() {
f.skipRowGroup = true
}
// PreLoad is used to load the row group if required. It does nothing if the row group is already loaded.
func (f *FileReader) PreLoad() error {
return f.advanceIfNeeded()
}
// MetaData returns a map of metadata key-value pairs stored in the parquet file.
func (f *FileReader) MetaData() map[string]string {
return metaDataToMap(f.meta.KeyValueMetadata)
}
// ColumnMetaData returns a map of metadata key-value pairs for the provided column
// in the current row group.
// The column name has to be provided in its dotted notation.
func (f *FileReader) ColumnMetaData(colName string) (map[string]string, error) {
for _, col := range f.CurrentRowGroup().Columns {
if colName == strings.Join(col.MetaData.PathInSchema, ".") {
return metaDataToMap(col.MetaData.KeyValueMetadata), nil
}
}
return nil, errors.WithFields(
errors.New("column not found"),
errors.Fields{
"name": colName,
})
}
func (f *FileReader) advanceIfNeeded() error {
if f.rowGroupPosition == 0 || f.currentRecord >= f.Reader.RowGroupNumRecords() || f.skipRowGroup {
if err := f.readRowGroup(); err != nil {
f.skipRowGroup = true
return err
}
f.currentRecord = 0
f.skipRowGroup = false
}
return nil
}
// readRowGroup read the next row group into memory.
func (f *FileReader) readRowGroup() error {
if f.rowGroupPosition >= len(f.meta.RowGroups) {
return io.EOF
}
rowGroups := f.meta.RowGroups[f.rowGroupPosition]
f.rowGroupPosition++
f.Reader.ResetData()
f.Reader.SetNumRecords(rowGroups.NumRows)
for _, c := range f.Reader.Columns() {
chunk := rowGroups.Columns[c.Index()]
if !f.Reader.IsSelected(c.FlatName()) {
if err := layout.SkipChunk(f.reader, c, chunk); err != nil {
return err
}
c.SetSkipped(true)
continue
}
pages, err := f.chunkReader.ReadChunk(f.reader, c, chunk)
if err != nil {
return errors.Wrap(err, "failed to read data chunk")
}
if err := readPageData(c, pages); err != nil {
return errors.Wrap(err, "failed to read page data")
}
}
return nil
}
func readFileMetaData(r io.ReadSeeker) (*parquet.FileMetaData, error) {
buf := make([]byte, magicLen)
// read and validate magic header
if _, err := r.Seek(0, io.SeekStart); err != nil {
return nil, errors.Wrap(err, "failed to seek to file magic header")
}
if _, err := io.ReadFull(r, buf); err != nil {
return nil, errors.Wrap(err, "failed to read file magic header failed")
}
if !bytes.Equal(buf, []byte(magic)) {
return nil, errors.New("invalid parquet file header")
}
// read and validate footer
if _, err := r.Seek(int64(-magicLen), io.SeekEnd); err != nil {
return nil, errors.Wrap(err, "failed to seek to file magic footer")
}
if _, err := io.ReadFull(r, buf); err != nil {
return nil, errors.Wrap(err, "failed to read file magic footer failed")
}
if !bytes.Equal(buf, []byte(magic)) {
return nil, errors.Errorf("invalid parquet file footer")
}
// read footer length
var fl int32
if _, err := r.Seek(-footerLen, io.SeekEnd); err != nil {
return nil, errors.Wrap(err, "failed to seek to footer length")
}
if err := binary.Read(r, binary.LittleEndian, &fl); err != nil {
return nil, errors.Wrap(err, "failed to read footer length")
}
if fl <= 0 {
return nil, errors.WithFields(
errors.New("invalid footer length"),
errors.Fields{
"length": fl,
})
}
// read file metadata
meta := &parquet.FileMetaData{}
if _, err := r.Seek(-footerLen-int64(fl), io.SeekEnd); err != nil {
return nil, errors.Wrap(err, "failed to seek to file meta data")
}
if err := readThrift(meta, io.LimitReader(r, int64(fl))); err != nil {
return nil, errors.Wrap(err, "failed to read file meta data")
}
return meta, nil
}
func readFileSchema(meta *parquet.FileMetaData) (schema.Reader, error) {
if len(meta.Schema) < 1 {
return nil, errors.New("no schema element found")
}
s, err := schema.LoadSchema(meta.Schema)
if err != nil {
return nil, errors.Wrap(err, "failed to read file schema from meta data")
}
return s, nil
}
func readPageData(col *schema.Column, pages []layout.PageReader) error {
s := col.ColumnStore()
for i := range pages {
data := make([]interface{}, pages[i].NumValues())
n, dl, rl, err := pages[i].ReadValues(data)
if err != nil {
return err
}
if int32(n) != pages[i].NumValues() {
return errors.WithFields(
errors.New("unexpected number of values"),
errors.Fields{
"expected": pages[i].NumValues(),
"actual": n,
})
}
// using append to make sure we handle the multiple data page correctly
if err := s.RepetitionLevels.AppendArray(rl); err != nil {
return err
}
if err := s.DefinitionLevels.AppendArray(dl); err != nil {
return err
}
s.Values.Values = append(s.Values.Values, data...)
s.Values.NoDictMode = true
}
return nil
}
func metaDataToMap(kvMetaData []*parquet.KeyValue) map[string]string {
data := make(map[string]string)
for _, kv := range kvMetaData {
if kv.Value != nil {
data[kv.Key] = *kv.Value
}
}
return data
}
func defaultCompressors() map[parquet.CompressionCodec]compression.BlockCompressor {
return map[parquet.CompressionCodec]compression.BlockCompressor{
parquet.CompressionCodec_UNCOMPRESSED: compression.Uncompressed{},
parquet.CompressionCodec_SNAPPY: compression.Snappy{},
parquet.CompressionCodec_GZIP: compression.GZip{},
parquet.CompressionCodec_BROTLI: compression.Brotli{},
parquet.CompressionCodec_LZ4: compression.LZ4{},
parquet.CompressionCodec_ZSTD: compression.ZStd{},
}
}
|
#!/bin/bash
#SBATCH -N 2
#SBATCH -p GPU
#SBATCH --ntasks-per-node 28
#SBATCH -t 5:00:00
#SBATCH --gres=gpu:p100:2
SEQ="./seqs/RFAM/RF02543.fasta"
CMD="./bin/cuda_sankoff"
OPT=""
OUT="gpu"
module avail cuda
module load cuda
set -x
#run GPU program
cd $HOME"/hpc_foldalign"
strace -ve wait4 /usr/bin/time -v $CMD $OPT $SEQ >> $SEQ.$OUT.output 2>&1
|
import execa = require("execa");
import { codechecks } from "@codechecks/client";
import { visRegCodecheck } from "@codechecks/vis-reg";
import { dir as tmpDir } from "tmp-promise";
import { UserOptions, parseUserOptions } from "./options";
export async function visRegStorybook(_options: UserOptions = {}): Promise<void> {
const options = parseUserOptions(_options);
const { path: tmpPathDir } = await tmpDir();
console.log(`Gathering screenshots to ${tmpPathDir}`);
await execa(
require.resolve("zisui/lib/node/cli.js"),
["--serverCmd", options.startServerCmd, "--outDir", tmpPathDir, options.storybookUrl],
{
timeout: 300000, // @todo we should fine a way to only timeout when there was no new output for x seconds
cwd: codechecks.context.workspaceRoot,
},
);
await visRegCodecheck({
collectionName: options.collectionName,
imagesPath: tmpPathDir,
});
}
export default visRegStorybook;
|
/**
* Author: <NAME>
* Dijkstra's Algorithm implementation in JavaScript
* Dijkstra's Algorithm calculates the minimum distance between two nodes.
* It is used to find the shortes path.
* It uses graph data structure.
*/
function createGraph( V, E ) {
// V - Number of vertices in graph
// E - Number of edges in graph (u,v,w)
const adjList = [] // Adjacency list
for ( let i = 0; i < V; i++ ) {
adjList.push( [] )
}
for ( let i = 0; i < E.length; i++ ) {
adjList[ E[ i ][ 0 ] ].push( [ E[ i ][ 1 ], E[ i ][ 2 ] ] )
adjList[ E[ i ][ 1 ] ].push( [ E[ i ][ 0 ], E[ i ][ 2 ] ] )
}
return adjList
}
function djikstra( graph, V, src ) {
const vis = Array( V ).fill( 0 )
const dist = []
for ( let i = 0; i < V; i++ ) dist.push( [ 10000, -1 ] )
dist[ src ][ 0 ] = 0
for ( let i = 0; i < V - 1; i++ ) {
let mn = -1
for ( let j = 0; j < V; j++ ) {
if ( vis[ j ] === 0 ) {
if ( mn === -1 || dist[ j ][ 0 ] < dist[ mn ][ 0 ] ) mn = j
}
}
vis[ mn ] = 1
graph[ mn ].forEach( edge => {
if ( vis[ edge[ 0 ] ] === 0 && dist[ edge[ 0 ] ][ 0 ] > dist[ mn ][ 0 ] + edge[ 1 ] ) {
dist[ edge[ 0 ] ][ 0 ] = dist[ mn ][ 0 ] + edge[ 1 ]
dist[ edge[ 0 ] ][ 1 ] = mn
}
} );
}
return dist
}
const V = 9
const E = [
[ 0, 1, 4 ],
[ 0, 7, 8 ],
[ 1, 7, 11 ],
[ 1, 2, 8 ],
[ 7, 8, 7 ],
[ 6, 7, 1 ],
[ 2, 8, 2 ],
[ 6, 8, 6 ],
[ 5, 6, 2 ],
[ 2, 5, 4 ],
[ 2, 3, 7 ],
[ 3, 5, 14 ],
[ 3, 4, 9 ],
[ 4, 5, 10 ]
]
const graph = createGraph( V, E )
const distances = djikstra( graph, V, 0 )
/**
* The first value in the array determines the minimum distance and the
* second value represents the parent node from which the minimum distance has been calculated
*/
console.log( distances )
|
const http = require('http');
const date = new Date();
const requestHandler = (req, res) => {
if (req.url === '/status') {
res.writeHead(200, {'Content-Type': 'application/json'});
res.end(JSON.stringify({time: date.toGMTString()}));
} else {
res.writeHead(404);
res.end();
}
};
const server = http.createServer(requestHandler);
server.listen(3000);
|
<gh_stars>1-10
'use strict';
import views from './views';
import sidemenu from './sidemenu';
import modals from './modals';
export {
// views:
views,
// modals:
modals,
// sidemenu:
sidemenu
};
|
package com.qurux.coffeevizbeer.exceptions;
/**
* Created by <NAME> on 06-12-2016.
*/
public class NullUserException extends Exception {
public NullUserException() {
super("Your details are not loaded yet");
}
}
|
colour_base00="28/2c/34"
colour_base01="35/3b/45"
colour_base02="3e/44/51"
colour_base03="54/58/62"
colour_base04="56/5c/64"
colour_base05="ab/b2/bf"
colour_base06="b6/bd/ca"
colour_base07="c8/cc/d4"
colour_base08="e0/6c/75"
colour_base09="d1/9a/66"
colour_base0a="e5/c0/7b"
colour_base0b="98/c3/79"
colour_base0c="56/b6/c2"
colour_base0d="61/af/ef"
colour_base0e="c6/78/dd"
colour_base0f="be/50/46"
|
#!/usr/bin/env bats
load _helpers
@test "ingressGateways/ServiceAccount: disabled by default" {
cd `chart_dir`
assert_empty helm template \
-s templates/ingress-gateways-serviceaccount.yaml \
.
}
@test "ingressGateways/ServiceAccount: enabled with ingressGateways, connectInject enabled" {
cd `chart_dir`
local actual=$(helm template \
-s templates/ingress-gateways-serviceaccount.yaml \
--set 'ingressGateways.enabled=true' \
--set 'connectInject.enabled=true' \
. | tee /dev/stderr |
yq -s 'length > 0' | tee /dev/stderr)
[ "${actual}" = "true" ]
}
#--------------------------------------------------------------------
# global.imagePullSecrets
@test "ingressGateways/ServiceAccount: can set image pull secrets" {
cd `chart_dir`
local object=$(helm template \
-s templates/ingress-gateways-serviceaccount.yaml \
--set 'ingressGateways.enabled=true' \
--set 'connectInject.enabled=true' \
--set 'global.imagePullSecrets[0].name=my-secret' \
--set 'global.imagePullSecrets[1].name=my-secret2' \
. | tee /dev/stderr)
local actual=$(echo "$object" |
yq -s -r '.[0].imagePullSecrets[0].name' | tee /dev/stderr)
[ "${actual}" = "my-secret" ]
local actual=$(echo "$object" |
yq -s -r '.[0].imagePullSecrets[1].name' | tee /dev/stderr)
[ "${actual}" = "my-secret2" ]
}
#--------------------------------------------------------------------
# multiple gateways
@test "ingressGateways/ServiceAccount: multiple gateways" {
cd `chart_dir`
local object=$(helm template \
-s templates/ingress-gateways-serviceaccount.yaml \
--set 'ingressGateways.enabled=true' \
--set 'connectInject.enabled=true' \
--set 'ingressGateways.gateways[0].name=gateway1' \
--set 'ingressGateways.gateways[1].name=gateway2' \
. | tee /dev/stderr |
yq -s -r '.' | tee /dev/stderr)
local actual=$(echo $object | yq -r '.[0].metadata.name' | tee /dev/stderr)
[ "${actual}" = "release-name-consul-gateway1" ]
local actual=$(echo $object | yq -r '.[1].metadata.name' | tee /dev/stderr)
[ "${actual}" = "release-name-consul-gateway2" ]
local actual=$(echo "$object" |
yq -r '.[2] | length > 0' | tee /dev/stderr)
[ "${actual}" = "false" ]
}
|
/*
* Copyright (c) 2015, Freescale Semiconductor, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* o Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
*
* o Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* o Neither the name of Freescale Semiconductor, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fsl_common.h"
#include "fsl_smc.h"
#include "fsl_rtc.h"
#include "fsl_clock_config.h"
#define MCG_IRCLK_DISABLE 0U /*!< MCGIRCLK disabled */
#define MCG_PLL_DISABLE 0U /*!< MCGPLLCLK disabled */
#define OSC_CAP0P 0U /*!< Oscillator 0pF capacitor load */
#define RTC_OSC_CAP_LOAD_0PF 0x0U /*!< RTC oscillator capacity load: 0pF */
#define RTC_RTC32KCLK_PERIPHERALS_ENABLED 1U /*!< RTC32KCLK to other peripherals: enabled */
#define SIM_CLKOUT_SEL_FLEXBUS_CLK 0U /*!< CLKOUT pin clock select: FlexBus clock */
#define SIM_OSC32KSEL_RTC32KCLK_CLK 2U /*!< OSC32KSEL select: RTC32KCLK clock (32.768kHz) */
#define SIM_PLLFLLSEL_MCGFLLCLK_CLK 0U /*!< PLLFLL select: MCGFLLCLK clock */
static void CLOCK_CONFIG_SetRtcClock(uint32_t capLoad, uint8_t enableOutPeriph)
{
/* RTC clock gate enable */
CLOCK_EnableClock(kCLOCK_Rtc0);
if ((RTC->CR & RTC_CR_OSCE_MASK) == 0u) { /* Only if the Rtc oscillator is not already enabled */
/* Set the specified capacitor configuration for the RTC oscillator */
RTC_SetOscCapLoad(RTC, capLoad);
/* Enable the RTC 32KHz oscillator */
RTC->CR |= RTC_CR_OSCE_MASK;
}
/* Output to other peripherals */
if (enableOutPeriph) {
RTC->CR &= ~RTC_CR_CLKO_MASK;
}
else {
RTC->CR |= RTC_CR_CLKO_MASK;
}
/* Set the XTAL32/RTC_CLKIN frequency based on board setting. */
CLOCK_SetXtal32Freq(BOARD_XTAL32K_CLK_HZ);
/* Set RTC_TSR if there is fault value in RTC */
if (RTC->SR & RTC_SR_TIF_MASK) {
RTC -> TSR = RTC -> TSR;
}
/* RTC clock gate disable */
CLOCK_DisableClock(kCLOCK_Rtc0);
}
static void CLOCK_CONFIG_EnableIrc48MOsc()
{
/* USB clock gate enable */
CLOCK_EnableClock(kCLOCK_Usbfs0);
/* IRC48M oscillator enable */
USB0->CLK_RECOVER_IRC_EN = USB_CLK_RECOVER_IRC_EN_IRC_EN_MASK | USB_CLK_RECOVER_IRC_EN_REG_EN_MASK;
/* USB clock gate disable */
CLOCK_DisableClock(kCLOCK_Usbfs0);
}
static void CLOCK_CONFIG_SetFllExtRefDiv(uint8_t frdiv)
{
MCG->C1 = ((MCG->C1 & ~MCG_C1_FRDIV_MASK) | MCG_C1_FRDIV(frdiv));
}
/*******************************************************************************
* Definitions
******************************************************************************/
/*! @brief Clock configuration structure. */
typedef struct _clock_config
{
mcg_config_t mcgConfig; /*!< MCG configuration. */
sim_clock_config_t simConfig; /*!< SIM configuration. */
osc_config_t oscConfig; /*!< OSC configuration. */
uint32_t coreClock; /*!< core clock frequency. */
} clock_config_t;
/*******************************************************************************
* Variables
******************************************************************************/
/* System clock frequency. */
extern uint32_t SystemCoreClock;
/* Configuration for enter VLPR mode. Core clock = 4MHz. */
const clock_config_t g_defaultClockConfigVlpr = {
.mcgConfig =
{
.mcgMode = kMCG_ModeBLPI, /* Work in BLPI mode. */
.irclkEnableMode = kMCG_IrclkEnable, /* MCGIRCLK enable. */
.ircs = kMCG_IrcFast, /* Select IRC4M. */
.fcrdiv = 0U, /* FCRDIV is 0. */
.frdiv = 0U,
.drs = kMCG_DrsLow, /* Low frequency range. */
.dmx32 = kMCG_Dmx32Default, /* DCO has a default range of 25%. */
.oscsel = kMCG_OscselOsc, /* Select OSC. */
.pll0Config =
{
.enableMode = 0U, /* Don't eanble PLL. */
.prdiv = 0U,
.vdiv = 0U,
},
},
.simConfig =
{
.pllFllSel = 3U, /* PLLFLLSEL select IRC48MCLK. */
.er32kSrc = 2U, /* ERCLK32K selection, use RTC. */
.clkdiv1 = 0x00040000U, /* SIM_CLKDIV1. */
},
.oscConfig = {.freq = BOARD_XTAL0_CLK_HZ,
.capLoad = 0,
.workMode = kOSC_ModeExt,
.oscerConfig =
{
.enableMode = kOSC_ErClkEnable,
#if (defined(FSL_FEATURE_OSC_HAS_EXT_REF_CLOCK_DIVIDER) && FSL_FEATURE_OSC_HAS_EXT_REF_CLOCK_DIVIDER)
.erclkDiv = 0U,
#endif
}},
.coreClock = 4000000U, /* Core clock frequency */
};
/* Configuration for enter RUN mode. Core clock = 95.977472 MHz. */
const clock_config_t g_defaultClockConfigRun = {
.mcgConfig =
{
.mcgMode = kMCG_ModePEE, /* PEE - PLL Engaged External */
.irclkEnableMode = MCG_IRCLK_DISABLE, /* MCGIRCLK disabled */
.ircs = kMCG_IrcSlow, /* Slow internal reference clock selected */
.fcrdiv = 0x0U, /* Fast IRC divider: divided by 1 */
.frdiv = 0x2U, /* FLL reference clock divider: divided by 4 */
.drs = kMCG_DrsLow, /* Low frequency range */
.dmx32 = kMCG_Dmx32Default, /* DCO has a default range of 25% */
.oscsel = kMCG_OscselIrc, /* Selects 48 MHz IRC Oscillator */
.pll0Config =
{
.enableMode = MCG_PLL_DISABLE, /* MCGPLLCLK disabled */
.prdiv = 0xbU, /* PLL Reference divider: divided by 12 */
.vdiv = 0x0U, /* VCO divider: multiplied by 24 */
},
},
.simConfig = //OK
{
.pllFllSel = SIM_PLLFLLSEL_MCGFLLCLK_CLK, /* PLLFLL select: MCGFLLCLK clock */
.er32kSrc = SIM_OSC32KSEL_RTC32KCLK_CLK, /* OSC32KSEL select: RTC32KCLK clock (32.768kHz) */
.clkdiv1 = 0x1240000U, /* SIM_CLKDIV1 - OUTDIV1: /1, OUTDIV2: /2, OUTDIV3: /3, OUTDIV4: /5 */
},
.oscConfig =
{
.freq = 0U, /* Oscillator frequency: 0Hz */
.capLoad = (OSC_CAP0P), /* Oscillator capacity load: 0pF */
.workMode = kOSC_ModeExt, /* Use external clock */
.oscerConfig =
{
.enableMode = kOSC_ErClkEnable, /* Enable external reference clock, disable external reference clock in STOP mode */
}
},
.coreClock = 96000000U, /* Core clock frequency */
};
/*******************************************************************************
* Code
******************************************************************************/
/*
* How to setup clock using clock driver functions:
*
* 1. CLOCK_SetSimSafeDivs, to make sure core clock, bus clock, flexbus clock
* and flash clock are in allowed range during clock mode switch.
*
* 2. Call CLOCK_Osc0Init to setup OSC clock, if it is used in target mode.
*
* 3. Set MCG configuration, MCG includes three parts: FLL clock, PLL clock and
* internal reference clock(MCGIRCLK). Follow the steps to setup:
*
* 1). Call CLOCK_BootToXxxMode to set MCG to target mode.
*
* 2). If target mode is FBI/BLPI/PBI mode, the MCGIRCLK has been configured
* correctly. For other modes, need to call CLOCK_SetInternalRefClkConfig
* explicitly to setup MCGIRCLK.
*
* 3). Don't need to configure FLL explicitly, because if target mode is FLL
* mode, then FLL has been configured by the function CLOCK_BootToXxxMode,
* if the target mode is not FLL mode, the FLL is disabled.
*
* 4). If target mode is PEE/PBE/PEI/PBI mode, then the related PLL has been
* setup by CLOCK_BootToXxxMode. In FBE/FBI/FEE/FBE mode, the PLL could
* be enabled independently, call CLOCK_EnablePll0 explicitly in this case.
*
* 4. Call CLOCK_SetSimConfig to set the clock configuration in SIM.
*/
static void fllStableDelay(void)
{
/*
Should wait at least 1ms. Because in these modes, the core clock is 100MHz
at most, so this function could obtain the 1ms delay.
*/
volatile uint32_t i = 30000U;
while (i--)
{
__NOP();
}
}
void BOARD_BootClockVLPR(void)
{
CLOCK_SetSimSafeDivs();
CLOCK_BootToBlpiMode(g_defaultClockConfigVlpr.mcgConfig.fcrdiv, g_defaultClockConfigVlpr.mcgConfig.ircs,
g_defaultClockConfigVlpr.mcgConfig.irclkEnableMode);
CLOCK_SetSimConfig(&g_defaultClockConfigVlpr.simConfig);
SystemCoreClock = g_defaultClockConfigVlpr.coreClock;
SMC_SetPowerModeProtection(SMC, kSMC_AllowPowerModeAll);
SMC_SetPowerModeVlpr(SMC, false);
while (SMC_GetPowerModeState(SMC) != kSMC_PowerStateVlpr)
{
}
}
void BOARD_BootClockRUN(void)
{
/* Set the system clock dividers in SIM to safe value. */
CLOCK_SetSimSafeDivs();
/* Configure RTC clock including enabling RTC oscillator. */
CLOCK_CONFIG_SetRtcClock(RTC_OSC_CAP_LOAD_0PF, RTC_RTC32KCLK_PERIPHERALS_ENABLED);
/* Enable IRC48M oscillator for K24 as workaround because there is not enabled the oscillator automatically. */
CLOCK_CONFIG_EnableIrc48MOsc();
/* Configure FLL external reference divider (FRDIV). */
CLOCK_CONFIG_SetFllExtRefDiv(g_defaultClockConfigRun.mcgConfig.frdiv);
/* Set MCG to PEE mode. */
CLOCK_BootToPeeMode(g_defaultClockConfigRun.mcgConfig.oscsel,
kMCG_PllClkSelPll0,
&g_defaultClockConfigRun.mcgConfig.pll0Config);
/* Set the clock configuration in SIM module. */
CLOCK_SetSimConfig(&g_defaultClockConfigRun.simConfig);
/* Set SystemCoreClock variable. */
SystemCoreClock = g_defaultClockConfigRun.coreClock;
/* Set CLKOUT source. */
CLOCK_SetClkOutClock(SIM_CLKOUT_SEL_FLEXBUS_CLK);
}
|
#!/usr/bin/env bash
{{!
Template adapted from here:
https://github.com/chriskempson/base16-builder/blob/master/templates/gnome-terminal/dark.sh.erb
}}
# Base16 Mexico Light - Gnome Terminal color scheme install script
# Sheldon Johnson
[[ -z "$PROFILE_NAME" ]] && PROFILE_NAME="Base 16 Mexico Light 256"
[[ -z "$PROFILE_SLUG" ]] && PROFILE_SLUG="base-16-mexico-light-256"
[[ -z "$DCONF" ]] && DCONF=dconf
[[ -z "$UUIDGEN" ]] && UUIDGEN=uuidgen
dset() {
local key="$1"; shift
local val="$1"; shift
if [[ "$type" == "string" ]]; then
val="'$val'"
fi
"$DCONF" write "$PROFILE_KEY/$key" "$val"
}
# Because dconf still doesn't have "append"
dlist_append() {
local key="$1"; shift
local val="$1"; shift
local entries="$(
{
"$DCONF" read "$key" | tr -d '[]' | tr , "\n" | fgrep -v "$val"
echo "'$val'"
} | head -c-1 | tr "\n" ,
)"
"$DCONF" write "$key" "[$entries]"
}
# Newest versions of gnome-terminal use dconf
if which "$DCONF" > /dev/null 2>&1; then
# Check that uuidgen is available
type $UUIDGEN >/dev/null 2>&1 || { echo >&2 "Requires uuidgen but it's not installed. Aborting!"; exit 1; }
[[ -z "$BASE_KEY_NEW" ]] && BASE_KEY_NEW=/org/gnome/terminal/legacy/profiles:
if [[ -n "`$DCONF list $BASE_KEY_NEW/`" ]]; then
if which "$UUIDGEN" > /dev/null 2>&1; then
PROFILE_SLUG=`uuidgen`
fi
if [[ -n "`$DCONF read $BASE_KEY_NEW/default`" ]]; then
DEFAULT_SLUG=`$DCONF read $BASE_KEY_NEW/default | tr -d \'`
else
DEFAULT_SLUG=`$DCONF list $BASE_KEY_NEW/ | grep '^:' | head -n1 | tr -d :/`
fi
DEFAULT_KEY="$BASE_KEY_NEW/:$DEFAULT_SLUG"
PROFILE_KEY="$BASE_KEY_NEW/:$PROFILE_SLUG"
# Copy existing settings from default profile
$DCONF dump "$DEFAULT_KEY/" | $DCONF load "$PROFILE_KEY/"
# Add new copy to list of profiles
dlist_append $BASE_KEY_NEW/list "$PROFILE_SLUG"
# Update profile values with theme options
dset visible-name "'$PROFILE_NAME'"
dset palette "['#f8f8f8', '#ab4642', '#538947', '#f79a0e', '#7cafc2', '#96609e', '#4b8093', '#383838', '#b8b8b8', '#ab4642', '#538947', '#f79a0e', '#7cafc2', '#96609e', '#4b8093', '#181818']"
dset background-color "'#f8f8f8'"
dset foreground-color "'#383838'"
dset bold-color "'#383838'"
dset bold-color-same-as-fg "true"
dset cursor-colors-set "true"
dset cursor-background-color "'#383838'"
dset cursor-foreground-color "'#f8f8f8'"
dset use-theme-colors "false"
dset use-theme-background "false"
unset PROFILE_NAME
unset PROFILE_SLUG
unset DCONF
unset UUIDGEN
exit 0
fi
fi
# Fallback for Gnome 2 and early Gnome 3
[[ -z "$GCONFTOOL" ]] && GCONFTOOL=gconftool
[[ -z "$BASE_KEY" ]] && BASE_KEY=/apps/gnome-terminal/profiles
PROFILE_KEY="$BASE_KEY/$PROFILE_SLUG"
gset() {
local type="$1"; shift
local key="$1"; shift
local val="$1"; shift
"$GCONFTOOL" --set --type "$type" "$PROFILE_KEY/$key" -- "$val"
}
# Because gconftool doesn't have "append"
glist_append() {
local type="$1"; shift
local key="$1"; shift
local val="$1"; shift
local entries="$(
{
"$GCONFTOOL" --get "$key" | tr -d '[]' | tr , "\n" | fgrep -v "$val"
echo "$val"
} | head -c-1 | tr "\n" ,
)"
"$GCONFTOOL" --set --type list --list-type $type "$key" "[$entries]"
}
# Append the Base16 profile to the profile list
glist_append string /apps/gnome-terminal/global/profile_list "$PROFILE_SLUG"
gset string visible_name "$PROFILE_NAME"
gset string palette "#f8f8f8:#ab4642:#538947:#f79a0e:#7cafc2:#96609e:#4b8093:#383838:#b8b8b8:#ab4642:#538947:#f79a0e:#7cafc2:#96609e:#4b8093:#181818"
gset string background_color "#f8f8f8"
gset string foreground_color "#383838"
gset string bold_color "#383838"
gset bool bold_color_same_as_fg "true"
gset bool cursor-colors-set "true"
gset string cursor-background-color "'#383838'"
gset string cursor-foreground-color "'#f8f8f8'"
gset bool use_theme_colors "false"
gset bool use_theme_background "false"
unset PROFILE_NAME
unset PROFILE_SLUG
unset DCONF
unset UUIDGEN
|
#!/bin/bash
# Function to set the system timezone
set_timezone() {
timezone=$1
sudo timedatectl set-timezone $timezone
echo "System timezone set to $timezone"
}
# Function to enable unattended upgrades
enable_unattended_upgrades() {
sudo apt-get install unattended-upgrades
sudo dpkg-reconfigure --priority=low unattended-upgrades
echo "Unattended upgrades for security updates enabled"
}
# Display menu for timezone selection
echo "Select a timezone:"
echo "1. Pacific Time"
echo "2. Eastern Time"
echo "3. Central Time"
echo "4. Mountain Time"
echo "5. Quit"
read -p "Enter your choice: " choice
case $choice in
1) set_timezone "America/Los_Angeles"; enable_unattended_upgrades ;;
2) set_timezone "America/New_York"; enable_unattended_upgrades ;;
3) set_timezone "America/Chicago"; enable_unattended_upgrades ;;
4) set_timezone "America/Denver"; enable_unattended_upgrades ;;
5) echo "Exiting script"; exit ;;
*) echo "Invalid choice"; exit 1 ;;
esac
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'date'
require 'google/apis/core/base_service'
require 'google/apis/core/json_representation'
require 'google/apis/core/hashable'
require 'google/apis/errors'
module Google
module Apis
module MonitoringV3
# Describes how to combine multiple time series to provide different views of
# the data. Aggregation consists of an alignment step on individual time series (
# alignment_period and per_series_aligner) followed by an optional reduction
# step of the data across the aligned time series (cross_series_reducer and
# group_by_fields). For more details, see Aggregation.
class Aggregation
include Google::Apis::Core::Hashable
# The alignment period for per-time series alignment. If present,
# alignmentPeriod must be at least 60 seconds. After per-time series alignment,
# each time series will contain data points only on the period boundaries. If
# perSeriesAligner is not specified or equals ALIGN_NONE, then this field is
# ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then
# this field must be defined; otherwise an error is returned.
# Corresponds to the JSON property `alignmentPeriod`
# @return [String]
attr_accessor :alignment_period
# The approach to be used to combine time series. Not all reducer functions may
# be applied to all time series, depending on the metric type and the value type
# of the original time series. Reduction may change the metric type of value
# type of the time series.Time series data must be aligned in order to perform
# cross-time series reduction. If crossSeriesReducer is specified, then
# perSeriesAligner must be specified and not equal ALIGN_NONE and
# alignmentPeriod must be specified; otherwise, an error is returned.
# Corresponds to the JSON property `crossSeriesReducer`
# @return [String]
attr_accessor :cross_series_reducer
# The set of fields to preserve when crossSeriesReducer is specified. The
# groupByFields determine how the time series are partitioned into subsets prior
# to applying the aggregation function. Each subset contains time series that
# have the same value for each of the grouping fields. Each individual time
# series is a member of exactly one subset. The crossSeriesReducer is applied to
# each subset of time series. It is not possible to reduce across different
# resource types, so this field implicitly contains resource.type. Fields not
# specified in groupByFields are aggregated away. If groupByFields is not
# specified and all the time series have the same resource type, then the time
# series are aggregated into a single output time series. If crossSeriesReducer
# is not defined, this field is ignored.
# Corresponds to the JSON property `groupByFields`
# @return [Array<String>]
attr_accessor :group_by_fields
# The approach to be used to align individual time series. Not all alignment
# functions may be applied to all time series, depending on the metric type and
# value type of the original time series. Alignment may change the metric type
# or the value type of the time series.Time series data must be aligned in order
# to perform cross-time series reduction. If crossSeriesReducer is specified,
# then perSeriesAligner must be specified and not equal ALIGN_NONE and
# alignmentPeriod must be specified; otherwise, an error is returned.
# Corresponds to the JSON property `perSeriesAligner`
# @return [String]
attr_accessor :per_series_aligner
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@alignment_period = args[:alignment_period] if args.key?(:alignment_period)
@cross_series_reducer = args[:cross_series_reducer] if args.key?(:cross_series_reducer)
@group_by_fields = args[:group_by_fields] if args.key?(:group_by_fields)
@per_series_aligner = args[:per_series_aligner] if args.key?(:per_series_aligner)
end
end
# A description of the conditions under which some aspect of your system is
# considered to be "unhealthy" and the ways to notify people or services about
# this state. For an overview of alert policies, see Introduction to Alerting.
class AlertPolicy
include Google::Apis::Core::Hashable
# How to combine the results of multiple conditions to determine if an incident
# should be opened.
# Corresponds to the JSON property `combiner`
# @return [String]
attr_accessor :combiner
# A list of conditions for the policy. The conditions are combined by AND or OR
# according to the combiner field. If the combined conditions evaluate to true,
# then an incident is created. A policy can have from one to six conditions.
# Corresponds to the JSON property `conditions`
# @return [Array<Google::Apis::MonitoringV3::Condition>]
attr_accessor :conditions
# Describes a change made to a configuration.
# Corresponds to the JSON property `creationRecord`
# @return [Google::Apis::MonitoringV3::MutationRecord]
attr_accessor :creation_record
# A short name or phrase used to identify the policy in dashboards,
# notifications, and incidents. To avoid confusion, don't use the same display
# name for multiple policies in the same project. The name is limited to 512
# Unicode characters.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# A content string and a MIME type that describes the content string's format.
# Corresponds to the JSON property `documentation`
# @return [Google::Apis::MonitoringV3::Documentation]
attr_accessor :documentation
# Whether or not the policy is enabled. On write, the default interpretation if
# unset is that the policy is enabled. On read, clients should not make any
# assumption about the state if it has not been populated. The field should
# always be populated on List and Get operations, unless a field projection has
# been specified that strips it out.
# Corresponds to the JSON property `enabled`
# @return [Boolean]
attr_accessor :enabled
alias_method :enabled?, :enabled
# Describes a change made to a configuration.
# Corresponds to the JSON property `mutationRecord`
# @return [Google::Apis::MonitoringV3::MutationRecord]
attr_accessor :mutation_record
# Required if the policy exists. The resource name for this policy. The syntax
# is:
# projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
# [ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is
# created. When calling the alertPolicies.create method, do not include the name
# field in the alerting policy passed as part of the request.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Identifies the notification channels to which notifications should be sent
# when incidents are opened or closed or when new violations occur on an already
# opened incident. Each element of this array corresponds to the name field in
# each of the NotificationChannel objects that are returned from the
# ListNotificationChannels method. The syntax of the entries in this field is:
# projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
# Corresponds to the JSON property `notificationChannels`
# @return [Array<String>]
attr_accessor :notification_channels
# User-supplied key/value data to be used for organizing and identifying the
# AlertPolicy objects.The field can contain up to 64 entries. Each key and value
# is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels
# and values can contain only lowercase letters, numerals, underscores, and
# dashes. Keys must begin with a letter.
# Corresponds to the JSON property `userLabels`
# @return [Hash<String,String>]
attr_accessor :user_labels
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@combiner = args[:combiner] if args.key?(:combiner)
@conditions = args[:conditions] if args.key?(:conditions)
@creation_record = args[:creation_record] if args.key?(:creation_record)
@display_name = args[:display_name] if args.key?(:display_name)
@documentation = args[:documentation] if args.key?(:documentation)
@enabled = args[:enabled] if args.key?(:enabled)
@mutation_record = args[:mutation_record] if args.key?(:mutation_record)
@name = args[:name] if args.key?(:name)
@notification_channels = args[:notification_channels] if args.key?(:notification_channels)
@user_labels = args[:user_labels] if args.key?(:user_labels)
end
end
# A type of authentication to perform against the specified resource or URL that
# uses username and password. Currently, only Basic authentication is supported
# in Uptime Monitoring.
class BasicAuthentication
include Google::Apis::Core::Hashable
# The password to authenticate.
# Corresponds to the JSON property `password`
# @return [String]
attr_accessor :password
# The username to authenticate.
# Corresponds to the JSON property `username`
# @return [String]
attr_accessor :username
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@password = args[:password] if args.key?(:password)
@username = args[:username] if args.key?(:username)
end
end
# BucketOptions describes the bucket boundaries used to create a histogram for
# the distribution. The buckets can be in a linear sequence, an exponential
# sequence, or each bucket can be specified explicitly. BucketOptions does not
# include the number of values in each bucket.A bucket has an inclusive lower
# bound and exclusive upper bound for the values that are counted for that
# bucket. The upper bound of a bucket must be strictly greater than the lower
# bound. The sequence of N buckets for a distribution consists of an underflow
# bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an
# overflow bucket (number N - 1). The buckets are contiguous: the lower bound of
# bucket i (i > 0) is the same as the upper bound of bucket i - 1. The buckets
# span the whole range of finite values: lower bound of the underflow bucket is -
# infinity and the upper bound of the overflow bucket is +infinity. The finite
# buckets are so-called because both bounds are finite.
class BucketOptions
include Google::Apis::Core::Hashable
# Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (=
# N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-1):
# boundsi Lower bound (1 <= i < N); boundsi - 1The bounds field must contain at
# least one element. If bounds has only one element, then there are no finite
# buckets, and that single element is the common boundary of the overflow and
# underflow buckets.
# Corresponds to the JSON property `explicitBuckets`
# @return [Google::Apis::MonitoringV3::Explicit]
attr_accessor :explicit_buckets
# Specifies an exponential sequence of buckets that have a width that is
# proportional to the value of the lower bound. Each bucket represents a
# constant relative uncertainty on a specific value in the bucket.There are
# num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:
# Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). Lower bound (1 <= i <
# N): scale * (growth_factor ^ (i - 1)).
# Corresponds to the JSON property `exponentialBuckets`
# @return [Google::Apis::MonitoringV3::Exponential]
attr_accessor :exponential_buckets
# Specifies a linear sequence of buckets that all have the same width (except
# overflow and underflow). Each bucket represents a constant absolute
# uncertainty on the specific value in the bucket.There are num_finite_buckets +
# 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-
# 1): offset + (width * i). Lower bound (1 <= i < N): offset + (width * (i - 1))
# .
# Corresponds to the JSON property `linearBuckets`
# @return [Google::Apis::MonitoringV3::Linear]
attr_accessor :linear_buckets
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@explicit_buckets = args[:explicit_buckets] if args.key?(:explicit_buckets)
@exponential_buckets = args[:exponential_buckets] if args.key?(:exponential_buckets)
@linear_buckets = args[:linear_buckets] if args.key?(:linear_buckets)
end
end
# A collection of data points sent from a collectd-based plugin. See the
# collectd documentation for more information.
class CollectdPayload
include Google::Apis::Core::Hashable
# The end time of the interval.
# Corresponds to the JSON property `endTime`
# @return [String]
attr_accessor :end_time
# The measurement metadata. Example: "process_id" -> 12345
# Corresponds to the JSON property `metadata`
# @return [Hash<String,Google::Apis::MonitoringV3::TypedValue>]
attr_accessor :metadata
# The name of the plugin. Example: "disk".
# Corresponds to the JSON property `plugin`
# @return [String]
attr_accessor :plugin
# The instance name of the plugin Example: "hdcl".
# Corresponds to the JSON property `pluginInstance`
# @return [String]
attr_accessor :plugin_instance
# The start time of the interval.
# Corresponds to the JSON property `startTime`
# @return [String]
attr_accessor :start_time
# The measurement type. Example: "memory".
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
# The measurement type instance. Example: "used".
# Corresponds to the JSON property `typeInstance`
# @return [String]
attr_accessor :type_instance
# The measured values during this time interval. Each value must have a
# different dataSourceName.
# Corresponds to the JSON property `values`
# @return [Array<Google::Apis::MonitoringV3::CollectdValue>]
attr_accessor :values
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@end_time = args[:end_time] if args.key?(:end_time)
@metadata = args[:metadata] if args.key?(:metadata)
@plugin = args[:plugin] if args.key?(:plugin)
@plugin_instance = args[:plugin_instance] if args.key?(:plugin_instance)
@start_time = args[:start_time] if args.key?(:start_time)
@type = args[:type] if args.key?(:type)
@type_instance = args[:type_instance] if args.key?(:type_instance)
@values = args[:values] if args.key?(:values)
end
end
# Describes the error status for payloads that were not written.
class CollectdPayloadError
include Google::Apis::Core::Hashable
# The Status type defines a logical error model that is suitable for different
# programming environments, including REST APIs and RPC APIs. It is used by gRPC
# (https://github.com/grpc). The error model is designed to be:
# Simple to use and understand for most users
# Flexible enough to meet unexpected needsOverviewThe Status message contains
# three pieces of data: error code, error message, and error details. The error
# code should be an enum value of google.rpc.Code, but it may accept additional
# error codes if needed. The error message should be a developer-facing English
# message that helps developers understand and resolve the error. If a localized
# user-facing error message is needed, put the localized message in the error
# details or localize it in the client. The optional error details may contain
# arbitrary information about the error. There is a predefined set of error
# detail types in the package google.rpc that can be used for common error
# conditions.Language mappingThe Status message is the logical representation of
# the error model, but it is not necessarily the actual wire format. When the
# Status message is exposed in different client libraries and different wire
# protocols, it can be mapped differently. For example, it will likely be mapped
# to some exceptions in Java, but more likely mapped to some error codes in C.
# Other usesThe error model and the Status message can be used in a variety of
# environments, either with or without APIs, to provide a consistent developer
# experience across different environments.Example uses of this error model
# include:
# Partial errors. If a service needs to return partial errors to the client, it
# may embed the Status in the normal response to indicate the partial errors.
# Workflow errors. A typical workflow has multiple steps. Each step may have a
# Status message for error reporting.
# Batch operations. If a client uses batch request and batch response, the
# Status message should be used directly inside batch response, one for each
# error sub-response.
# Asynchronous operations. If an API call embeds asynchronous operation results
# in its response, the status of those operations should be represented directly
# using the Status message.
# Logging. If some API errors are stored in logs, the message Status could be
# used directly after any stripping needed for security/privacy reasons.
# Corresponds to the JSON property `error`
# @return [Google::Apis::MonitoringV3::Status]
attr_accessor :error
# The zero-based index in CreateCollectdTimeSeriesRequest.collectd_payloads.
# Corresponds to the JSON property `index`
# @return [Fixnum]
attr_accessor :index
# Records the error status for values that were not written due to an error.
# Failed payloads for which nothing is written will not include partial value
# errors.
# Corresponds to the JSON property `valueErrors`
# @return [Array<Google::Apis::MonitoringV3::CollectdValueError>]
attr_accessor :value_errors
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@error = args[:error] if args.key?(:error)
@index = args[:index] if args.key?(:index)
@value_errors = args[:value_errors] if args.key?(:value_errors)
end
end
# A single data point from a collectd-based plugin.
class CollectdValue
include Google::Apis::Core::Hashable
# The data source for the collectd value. For example there are two data sources
# for network measurements: "rx" and "tx".
# Corresponds to the JSON property `dataSourceName`
# @return [String]
attr_accessor :data_source_name
# The type of measurement.
# Corresponds to the JSON property `dataSourceType`
# @return [String]
attr_accessor :data_source_type
# A single strongly-typed value.
# Corresponds to the JSON property `value`
# @return [Google::Apis::MonitoringV3::TypedValue]
attr_accessor :value
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@data_source_name = args[:data_source_name] if args.key?(:data_source_name)
@data_source_type = args[:data_source_type] if args.key?(:data_source_type)
@value = args[:value] if args.key?(:value)
end
end
# Describes the error status for values that were not written.
class CollectdValueError
include Google::Apis::Core::Hashable
# The Status type defines a logical error model that is suitable for different
# programming environments, including REST APIs and RPC APIs. It is used by gRPC
# (https://github.com/grpc). The error model is designed to be:
# Simple to use and understand for most users
# Flexible enough to meet unexpected needsOverviewThe Status message contains
# three pieces of data: error code, error message, and error details. The error
# code should be an enum value of google.rpc.Code, but it may accept additional
# error codes if needed. The error message should be a developer-facing English
# message that helps developers understand and resolve the error. If a localized
# user-facing error message is needed, put the localized message in the error
# details or localize it in the client. The optional error details may contain
# arbitrary information about the error. There is a predefined set of error
# detail types in the package google.rpc that can be used for common error
# conditions.Language mappingThe Status message is the logical representation of
# the error model, but it is not necessarily the actual wire format. When the
# Status message is exposed in different client libraries and different wire
# protocols, it can be mapped differently. For example, it will likely be mapped
# to some exceptions in Java, but more likely mapped to some error codes in C.
# Other usesThe error model and the Status message can be used in a variety of
# environments, either with or without APIs, to provide a consistent developer
# experience across different environments.Example uses of this error model
# include:
# Partial errors. If a service needs to return partial errors to the client, it
# may embed the Status in the normal response to indicate the partial errors.
# Workflow errors. A typical workflow has multiple steps. Each step may have a
# Status message for error reporting.
# Batch operations. If a client uses batch request and batch response, the
# Status message should be used directly inside batch response, one for each
# error sub-response.
# Asynchronous operations. If an API call embeds asynchronous operation results
# in its response, the status of those operations should be represented directly
# using the Status message.
# Logging. If some API errors are stored in logs, the message Status could be
# used directly after any stripping needed for security/privacy reasons.
# Corresponds to the JSON property `error`
# @return [Google::Apis::MonitoringV3::Status]
attr_accessor :error
# The zero-based index in CollectdPayload.values within the parent
# CreateCollectdTimeSeriesRequest.collectd_payloads.
# Corresponds to the JSON property `index`
# @return [Fixnum]
attr_accessor :index
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@error = args[:error] if args.key?(:error)
@index = args[:index] if args.key?(:index)
end
end
# A condition is a true/false test that determines when an alerting policy
# should open an incident. If a condition evaluates to true, it signifies that
# something is wrong.
class Condition
include Google::Apis::Core::Hashable
# A condition type that checks that monitored resources are reporting data. The
# configuration defines a metric and a set of monitored resources. The predicate
# is considered in violation when a time series for the specified metric of a
# monitored resource does not include any data in the specified duration.
# Corresponds to the JSON property `conditionAbsent`
# @return [Google::Apis::MonitoringV3::MetricAbsence]
attr_accessor :condition_absent
# A condition type that compares a collection of time series against a threshold.
# Corresponds to the JSON property `conditionThreshold`
# @return [Google::Apis::MonitoringV3::MetricThreshold]
attr_accessor :condition_threshold
# A short name or phrase used to identify the condition in dashboards,
# notifications, and incidents. To avoid confusion, don't use the same display
# name for multiple conditions in the same policy.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# Required if the condition exists. The unique resource name for this condition.
# Its syntax is:
# projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
# [CONDITION_ID] is assigned by Stackdriver Monitoring when the condition is
# created as part of a new or updated alerting policy.When calling the
# alertPolicies.create method, do not include the name field in the conditions
# of the requested alerting policy. Stackdriver Monitoring creates the condition
# identifiers and includes them in the new policy.When calling the alertPolicies.
# update method to update a policy, including a condition name causes the
# existing condition to be updated. Conditions without names are added to the
# updated policy. Existing conditions are deleted if they are not updated.Best
# practice is to preserve [CONDITION_ID] if you make only small changes, such as
# those to condition thresholds, durations, or trigger values. Otherwise, treat
# the change as a new condition and let the existing condition be deleted.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@condition_absent = args[:condition_absent] if args.key?(:condition_absent)
@condition_threshold = args[:condition_threshold] if args.key?(:condition_threshold)
@display_name = args[:display_name] if args.key?(:display_name)
@name = args[:name] if args.key?(:name)
end
end
# Used to perform string matching. Currently, this matches on the exact content.
# In the future, it can be expanded to allow for regular expressions and more
# complex matching.
class ContentMatcher
include Google::Apis::Core::Hashable
# String content to match (max 1024 bytes)
# Corresponds to the JSON property `content`
# @return [String]
attr_accessor :content
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@content = args[:content] if args.key?(:content)
end
end
# The CreateCollectdTimeSeries request.
class CreateCollectdTimeSeriesRequest
include Google::Apis::Core::Hashable
# The collectd payloads representing the time series data. You must not include
# more than a single point for each time series, so no two payloads can have the
# same values for all of the fields plugin, plugin_instance, type, and
# type_instance.
# Corresponds to the JSON property `collectdPayloads`
# @return [Array<Google::Apis::MonitoringV3::CollectdPayload>]
attr_accessor :collectd_payloads
# The version of collectd that collected the data. Example: "5.3.0-192.el6".
# Corresponds to the JSON property `collectdVersion`
# @return [String]
attr_accessor :collectd_version
# An object representing a resource that can be used for monitoring, logging,
# billing, or other purposes. Examples include virtual machine instances,
# databases, and storage devices such as disks. The type field identifies a
# MonitoredResourceDescriptor object that describes the resource's schema.
# Information in the labels field identifies the actual resource and its
# attributes according to the schema. For example, a particular Compute Engine
# VM instance could be represented by the following object, because the
# MonitoredResourceDescriptor for "gce_instance" has labels "instance_id" and "
# zone":
# ` "type": "gce_instance",
# "labels": ` "instance_id": "12345678901234",
# "zone": "us-central1-a" ``
# Corresponds to the JSON property `resource`
# @return [Google::Apis::MonitoringV3::MonitoredResource]
attr_accessor :resource
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@collectd_payloads = args[:collectd_payloads] if args.key?(:collectd_payloads)
@collectd_version = args[:collectd_version] if args.key?(:collectd_version)
@resource = args[:resource] if args.key?(:resource)
end
end
# The CreateCollectdTimeSeries response.
class CreateCollectdTimeSeriesResponse
include Google::Apis::Core::Hashable
# Records the error status for points that were not written due to an error.
# Failed requests for which nothing is written will return an error response
# instead.
# Corresponds to the JSON property `payloadErrors`
# @return [Array<Google::Apis::MonitoringV3::CollectdPayloadError>]
attr_accessor :payload_errors
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@payload_errors = args[:payload_errors] if args.key?(:payload_errors)
end
end
# The CreateTimeSeries request.
class CreateTimeSeriesRequest
include Google::Apis::Core::Hashable
# The new data to be added to a list of time series. Adds at most one data point
# to each of several time series. The new data point must be more recent than
# any other point in its time series. Each TimeSeries value must fully specify a
# unique time series by supplying all label values for the metric and the
# monitored resource.
# Corresponds to the JSON property `timeSeries`
# @return [Array<Google::Apis::MonitoringV3::TimeSeries>]
attr_accessor :time_series
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@time_series = args[:time_series] if args.key?(:time_series)
end
end
# Distribution contains summary statistics for a population of values. It
# optionally contains a histogram representing the distribution of those values
# across a set of buckets.The summary statistics are the count, mean, sum of the
# squared deviation from the mean, the minimum, and the maximum of the set of
# population of values. The histogram is based on a sequence of buckets and
# gives a count of values that fall into each bucket. The boundaries of the
# buckets are given either explicitly or by formulas for buckets of fixed or
# exponentially increasing widths.Although it is not forbidden, it is generally
# a bad idea to include non-finite values (infinities or NaNs) in the population
# of values, as this will render the mean and sum_of_squared_deviation fields
# meaningless.
class Distribution
include Google::Apis::Core::Hashable
# Required in the Stackdriver Monitoring API v3. The values for each bucket
# specified in bucket_options. The sum of the values in bucketCounts must equal
# the value in the count field of the Distribution object. The order of the
# bucket counts follows the numbering schemes described for the three bucket
# types. The underflow bucket has number 0; the finite buckets, if any, have
# numbers 1 through N-2; and the overflow bucket has number N-1. The size of
# bucket_counts must not be greater than N. If the size is less than N, then the
# remaining buckets are assigned values of zero.
# Corresponds to the JSON property `bucketCounts`
# @return [Array<Fixnum>]
attr_accessor :bucket_counts
# BucketOptions describes the bucket boundaries used to create a histogram for
# the distribution. The buckets can be in a linear sequence, an exponential
# sequence, or each bucket can be specified explicitly. BucketOptions does not
# include the number of values in each bucket.A bucket has an inclusive lower
# bound and exclusive upper bound for the values that are counted for that
# bucket. The upper bound of a bucket must be strictly greater than the lower
# bound. The sequence of N buckets for a distribution consists of an underflow
# bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an
# overflow bucket (number N - 1). The buckets are contiguous: the lower bound of
# bucket i (i > 0) is the same as the upper bound of bucket i - 1. The buckets
# span the whole range of finite values: lower bound of the underflow bucket is -
# infinity and the upper bound of the overflow bucket is +infinity. The finite
# buckets are so-called because both bounds are finite.
# Corresponds to the JSON property `bucketOptions`
# @return [Google::Apis::MonitoringV3::BucketOptions]
attr_accessor :bucket_options
# The number of values in the population. Must be non-negative. This value must
# equal the sum of the values in bucket_counts if a histogram is provided.
# Corresponds to the JSON property `count`
# @return [Fixnum]
attr_accessor :count
# Must be in increasing order of value field.
# Corresponds to the JSON property `exemplars`
# @return [Array<Google::Apis::MonitoringV3::Exemplar>]
attr_accessor :exemplars
# The arithmetic mean of the values in the population. If count is zero then
# this field must be zero.
# Corresponds to the JSON property `mean`
# @return [Float]
attr_accessor :mean
# The range of the population values.
# Corresponds to the JSON property `range`
# @return [Google::Apis::MonitoringV3::Range]
attr_accessor :range
# The sum of squared deviations from the mean of the values in the population.
# For values x_i this is:
# Sum[i=1..n]((x_i - mean)^2)
# Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
# describes Welford's method for accumulating this sum in one pass.If count is
# zero then this field must be zero.
# Corresponds to the JSON property `sumOfSquaredDeviation`
# @return [Float]
attr_accessor :sum_of_squared_deviation
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@bucket_counts = args[:bucket_counts] if args.key?(:bucket_counts)
@bucket_options = args[:bucket_options] if args.key?(:bucket_options)
@count = args[:count] if args.key?(:count)
@exemplars = args[:exemplars] if args.key?(:exemplars)
@mean = args[:mean] if args.key?(:mean)
@range = args[:range] if args.key?(:range)
@sum_of_squared_deviation = args[:sum_of_squared_deviation] if args.key?(:sum_of_squared_deviation)
end
end
# A content string and a MIME type that describes the content string's format.
class Documentation
include Google::Apis::Core::Hashable
# The text of the documentation, interpreted according to mime_type. The content
# may not exceed 8,192 Unicode characters and may not exceed more than 10,240
# bytes when encoded in UTF-8 format, whichever is smaller.
# Corresponds to the JSON property `content`
# @return [String]
attr_accessor :content
# The format of the content field. Presently, only the value "text/markdown" is
# supported. See Markdown (https://en.wikipedia.org/wiki/Markdown) for more
# information.
# Corresponds to the JSON property `mimeType`
# @return [String]
attr_accessor :mime_type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@content = args[:content] if args.key?(:content)
@mime_type = args[:mime_type] if args.key?(:mime_type)
end
end
# A generic empty message that you can re-use to avoid defining duplicated empty
# messages in your APIs. A typical example is to use it as the request or the
# response type of an API method. For instance:
# service Foo `
# rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
# `
# The JSON representation for Empty is empty JSON object ``.
class Empty
include Google::Apis::Core::Hashable
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
end
end
# Exemplars are example points that may be used to annotate aggregated
# distribution values. They are metadata that gives information about a
# particular value added to a Distribution bucket, such as a trace ID that was
# active when a value was added. They may contain further information, such as a
# example values and timestamps, origin, etc.
class Exemplar
include Google::Apis::Core::Hashable
# Contextual information about the example value. Examples are:Trace ID: type.
# googleapis.com/google.devtools.cloudtrace.v1.TraceLiteral string: type.
# googleapis.com/google.protobuf.StringValueLabels dropped during aggregation:
# type.googleapis.com/google.monitoring.v3.DroppedLabelsThere may be only a
# single attachment of any given message type in a single exemplar, and this is
# enforced by the system.
# Corresponds to the JSON property `attachments`
# @return [Array<Hash<String,Object>>]
attr_accessor :attachments
# The observation (sampling) time of the above value.
# Corresponds to the JSON property `timestamp`
# @return [String]
attr_accessor :timestamp
# Value of the exemplar point. This value determines to which bucket the
# exemplar belongs.
# Corresponds to the JSON property `value`
# @return [Float]
attr_accessor :value
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@attachments = args[:attachments] if args.key?(:attachments)
@timestamp = args[:timestamp] if args.key?(:timestamp)
@value = args[:value] if args.key?(:value)
end
end
# Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (=
# N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-1):
# boundsi Lower bound (1 <= i < N); boundsi - 1The bounds field must contain at
# least one element. If bounds has only one element, then there are no finite
# buckets, and that single element is the common boundary of the overflow and
# underflow buckets.
class Explicit
include Google::Apis::Core::Hashable
# The values must be monotonically increasing.
# Corresponds to the JSON property `bounds`
# @return [Array<Float>]
attr_accessor :bounds
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@bounds = args[:bounds] if args.key?(:bounds)
end
end
# Specifies an exponential sequence of buckets that have a width that is
# proportional to the value of the lower bound. Each bucket represents a
# constant relative uncertainty on a specific value in the bucket.There are
# num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:
# Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). Lower bound (1 <= i <
# N): scale * (growth_factor ^ (i - 1)).
class Exponential
include Google::Apis::Core::Hashable
# Must be greater than 1.
# Corresponds to the JSON property `growthFactor`
# @return [Float]
attr_accessor :growth_factor
# Must be greater than 0.
# Corresponds to the JSON property `numFiniteBuckets`
# @return [Fixnum]
attr_accessor :num_finite_buckets
# Must be greater than 0.
# Corresponds to the JSON property `scale`
# @return [Float]
attr_accessor :scale
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@growth_factor = args[:growth_factor] if args.key?(:growth_factor)
@num_finite_buckets = args[:num_finite_buckets] if args.key?(:num_finite_buckets)
@scale = args[:scale] if args.key?(:scale)
end
end
# A single field of a message type.
class Field
include Google::Apis::Core::Hashable
# The field cardinality.
# Corresponds to the JSON property `cardinality`
# @return [String]
attr_accessor :cardinality
# The string value of the default value of this field. Proto2 syntax only.
# Corresponds to the JSON property `defaultValue`
# @return [String]
attr_accessor :default_value
# The field JSON name.
# Corresponds to the JSON property `jsonName`
# @return [String]
attr_accessor :json_name
# The field type.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The field name.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The field number.
# Corresponds to the JSON property `number`
# @return [Fixnum]
attr_accessor :number
# The index of the field type in Type.oneofs, for message or enumeration types.
# The first type has index 1; zero means the type is not in the list.
# Corresponds to the JSON property `oneofIndex`
# @return [Fixnum]
attr_accessor :oneof_index
# The protocol buffer options.
# Corresponds to the JSON property `options`
# @return [Array<Google::Apis::MonitoringV3::Option>]
attr_accessor :options
# Whether to use alternative packed wire representation.
# Corresponds to the JSON property `packed`
# @return [Boolean]
attr_accessor :packed
alias_method :packed?, :packed
# The field type URL, without the scheme, for message or enumeration types.
# Example: "type.googleapis.com/google.protobuf.Timestamp".
# Corresponds to the JSON property `typeUrl`
# @return [String]
attr_accessor :type_url
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@cardinality = args[:cardinality] if args.key?(:cardinality)
@default_value = args[:default_value] if args.key?(:default_value)
@json_name = args[:json_name] if args.key?(:json_name)
@kind = args[:kind] if args.key?(:kind)
@name = args[:name] if args.key?(:name)
@number = args[:number] if args.key?(:number)
@oneof_index = args[:oneof_index] if args.key?(:oneof_index)
@options = args[:options] if args.key?(:options)
@packed = args[:packed] if args.key?(:packed)
@type_url = args[:type_url] if args.key?(:type_url)
end
end
# The GetNotificationChannelVerificationCode request.
class GetNotificationChannelVerificationCodeRequest
include Google::Apis::Core::Hashable
# The desired expiration time. If specified, the API will guarantee that the
# returned code will not be valid after the specified timestamp; however, the
# API cannot guarantee that the returned code will be valid for at least as long
# as the requested time (the API puts an upper bound on the amount of time for
# which a code may be valid). If omitted, a default expiration will be used,
# which may be less than the max permissible expiration (so specifying an
# expiration may extend the code's lifetime over omitting an expiration, even
# though the API does impose an upper limit on the maximum expiration that is
# permitted).
# Corresponds to the JSON property `expireTime`
# @return [String]
attr_accessor :expire_time
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@expire_time = args[:expire_time] if args.key?(:expire_time)
end
end
# The GetNotificationChannelVerificationCode request.
class GetNotificationChannelVerificationCodeResponse
include Google::Apis::Core::Hashable
# The verification code, which may be used to verify other channels that have an
# equivalent identity (i.e. other channels of the same type with the same
# fingerprint such as other email channels with the same email address or other
# sms channels with the same number).
# Corresponds to the JSON property `code`
# @return [String]
attr_accessor :code
# The expiration time associated with the code that was returned. If an
# expiration was provided in the request, this is the minimum of the requested
# expiration in the request and the max permitted expiration.
# Corresponds to the JSON property `expireTime`
# @return [String]
attr_accessor :expire_time
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@code = args[:code] if args.key?(:code)
@expire_time = args[:expire_time] if args.key?(:expire_time)
end
end
# The description of a dynamic collection of monitored resources. Each group has
# a filter that is matched against monitored resources and their associated
# metadata. If a group's filter matches an available monitored resource, then
# that resource is a member of that group. Groups can contain any number of
# monitored resources, and each monitored resource can be a member of any number
# of groups.Groups can be nested in parent-child hierarchies. The parentName
# field identifies an optional parent for each group. If a group has a parent,
# then the only monitored resources available to be matched by the group's
# filter are the resources contained in the parent group. In other words, a
# group contains the monitored resources that match its filter and the filters
# of all the group's ancestors. A group without a parent can contain any
# monitored resource.For example, consider an infrastructure running a set of
# instances with two user-defined tags: "environment" and "role". A parent group
# has a filter, environment="production". A child of that parent group has a
# filter, role="transcoder". The parent group contains all instances in the
# production environment, regardless of their roles. The child group contains
# instances that have the transcoder role and are in the production environment.
# The monitored resources contained in a group can change at any moment,
# depending on what resources exist and what filters are associated with the
# group and its ancestors.
class Group
include Google::Apis::Core::Hashable
# A user-assigned name for this group, used only for display purposes.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# The filter used to determine which monitored resources belong to this group.
# Corresponds to the JSON property `filter`
# @return [String]
attr_accessor :filter
# If true, the members of this group are considered to be a cluster. The system
# can perform additional analysis on groups that are clusters.
# Corresponds to the JSON property `isCluster`
# @return [Boolean]
attr_accessor :is_cluster
alias_method :is_cluster?, :is_cluster
# Output only. The name of this group. The format is "projects/`
# project_id_or_number`/groups/`group_id`". When creating a group, this field is
# ignored and a new name is created consisting of the project specified in the
# call to CreateGroup and a unique `group_id` that is generated automatically.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The name of the group's parent, if it has one. The format is "projects/`
# project_id_or_number`/groups/`group_id`". For groups with no parent,
# parentName is the empty string, "".
# Corresponds to the JSON property `parentName`
# @return [String]
attr_accessor :parent_name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@display_name = args[:display_name] if args.key?(:display_name)
@filter = args[:filter] if args.key?(:filter)
@is_cluster = args[:is_cluster] if args.key?(:is_cluster)
@name = args[:name] if args.key?(:name)
@parent_name = args[:parent_name] if args.key?(:parent_name)
end
end
# Information involved in an HTTP/HTTPS uptime check request.
class HttpCheck
include Google::Apis::Core::Hashable
# A type of authentication to perform against the specified resource or URL that
# uses username and password. Currently, only Basic authentication is supported
# in Uptime Monitoring.
# Corresponds to the JSON property `authInfo`
# @return [Google::Apis::MonitoringV3::BasicAuthentication]
attr_accessor :auth_info
# The list of headers to send as part of the uptime check request. If two
# headers have the same key and different values, they should be entered as a
# single header, with the value being a comma-separated list of all the desired
# values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page
# 31). Entering two separate headers with the same key in a Create call will
# cause the first to be overwritten by the second. The maximum number of headers
# allowed is 100.
# Corresponds to the JSON property `headers`
# @return [Hash<String,String>]
attr_accessor :headers
# Boolean specifiying whether to encrypt the header information. Encryption
# should be specified for any headers related to authentication that you do not
# wish to be seen when retrieving the configuration. The server will be
# responsible for encrypting the headers. On Get/List calls, if mask_headers is
# set to True then the headers will be obscured with ******.
# Corresponds to the JSON property `maskHeaders`
# @return [Boolean]
attr_accessor :mask_headers
alias_method :mask_headers?, :mask_headers
# The path to the page to run the check against. Will be combined with the host (
# specified within the MonitoredResource) and port to construct the full URL.
# Optional (defaults to "/").
# Corresponds to the JSON property `path`
# @return [String]
attr_accessor :path
# The port to the page to run the check against. Will be combined with host (
# specified within the MonitoredResource) and path to construct the full URL.
# Optional (defaults to 80 without SSL, or 443 with SSL).
# Corresponds to the JSON property `port`
# @return [Fixnum]
attr_accessor :port
# If true, use HTTPS instead of HTTP to run the check.
# Corresponds to the JSON property `useSsl`
# @return [Boolean]
attr_accessor :use_ssl
alias_method :use_ssl?, :use_ssl
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@auth_info = args[:auth_info] if args.key?(:auth_info)
@headers = args[:headers] if args.key?(:headers)
@mask_headers = args[:mask_headers] if args.key?(:mask_headers)
@path = args[:path] if args.key?(:path)
@port = args[:port] if args.key?(:port)
@use_ssl = args[:use_ssl] if args.key?(:use_ssl)
end
end
# Nimbus InternalCheckers.
class InternalChecker
include Google::Apis::Core::Hashable
# The checker ID.
# Corresponds to the JSON property `checkerId`
# @return [String]
attr_accessor :checker_id
# The checker's human-readable name.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# The GCP zone the uptime check should egress from. Only respected for internal
# uptime checks, where internal_network is specified.
# Corresponds to the JSON property `gcpZone`
# @return [String]
attr_accessor :gcp_zone
# The internal network to perform this uptime check on.
# Corresponds to the JSON property `network`
# @return [String]
attr_accessor :network
# The GCP project ID. Not necessarily the same as the project_id for the config.
# Corresponds to the JSON property `projectId`
# @return [String]
attr_accessor :project_id
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@checker_id = args[:checker_id] if args.key?(:checker_id)
@display_name = args[:display_name] if args.key?(:display_name)
@gcp_zone = args[:gcp_zone] if args.key?(:gcp_zone)
@network = args[:network] if args.key?(:network)
@project_id = args[:project_id] if args.key?(:project_id)
end
end
# A description of a label.
class LabelDescriptor
include Google::Apis::Core::Hashable
# A human-readable description for the label.
# Corresponds to the JSON property `description`
# @return [String]
attr_accessor :description
# The label key.
# Corresponds to the JSON property `key`
# @return [String]
attr_accessor :key
# The type of data that can be assigned to the label.
# Corresponds to the JSON property `valueType`
# @return [String]
attr_accessor :value_type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@description = args[:description] if args.key?(:description)
@key = args[:key] if args.key?(:key)
@value_type = args[:value_type] if args.key?(:value_type)
end
end
# Specifies a linear sequence of buckets that all have the same width (except
# overflow and underflow). Each bucket represents a constant absolute
# uncertainty on the specific value in the bucket.There are num_finite_buckets +
# 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-
# 1): offset + (width * i). Lower bound (1 <= i < N): offset + (width * (i - 1))
# .
class Linear
include Google::Apis::Core::Hashable
# Must be greater than 0.
# Corresponds to the JSON property `numFiniteBuckets`
# @return [Fixnum]
attr_accessor :num_finite_buckets
# Lower bound of the first bucket.
# Corresponds to the JSON property `offset`
# @return [Float]
attr_accessor :offset
# Must be greater than 0.
# Corresponds to the JSON property `width`
# @return [Float]
attr_accessor :width
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@num_finite_buckets = args[:num_finite_buckets] if args.key?(:num_finite_buckets)
@offset = args[:offset] if args.key?(:offset)
@width = args[:width] if args.key?(:width)
end
end
# The protocol for the ListAlertPolicies response.
class ListAlertPoliciesResponse
include Google::Apis::Core::Hashable
# The returned alert policies.
# Corresponds to the JSON property `alertPolicies`
# @return [Array<Google::Apis::MonitoringV3::AlertPolicy>]
attr_accessor :alert_policies
# If there might be more results than were returned, then this field is set to a
# non-empty value. To see the additional results, use that value as pageToken in
# the next call to this method.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@alert_policies = args[:alert_policies] if args.key?(:alert_policies)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# The ListGroupMembers response.
class ListGroupMembersResponse
include Google::Apis::Core::Hashable
# A set of monitored resources in the group.
# Corresponds to the JSON property `members`
# @return [Array<Google::Apis::MonitoringV3::MonitoredResource>]
attr_accessor :members
# If there are more results than have been returned, then this field is set to a
# non-empty value. To see the additional results, use that value as pageToken in
# the next call to this method.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# The total number of elements matching this request.
# Corresponds to the JSON property `totalSize`
# @return [Fixnum]
attr_accessor :total_size
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@members = args[:members] if args.key?(:members)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@total_size = args[:total_size] if args.key?(:total_size)
end
end
# The ListGroups response.
class ListGroupsResponse
include Google::Apis::Core::Hashable
# The groups that match the specified filters.
# Corresponds to the JSON property `group`
# @return [Array<Google::Apis::MonitoringV3::Group>]
attr_accessor :group
# If there are more results than have been returned, then this field is set to a
# non-empty value. To see the additional results, use that value as pageToken in
# the next call to this method.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@group = args[:group] if args.key?(:group)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# The ListMetricDescriptors response.
class ListMetricDescriptorsResponse
include Google::Apis::Core::Hashable
# The metric descriptors that are available to the project and that match the
# value of filter, if present.
# Corresponds to the JSON property `metricDescriptors`
# @return [Array<Google::Apis::MonitoringV3::MetricDescriptor>]
attr_accessor :metric_descriptors
# If there are more results than have been returned, then this field is set to a
# non-empty value. To see the additional results, use that value as pageToken in
# the next call to this method.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@metric_descriptors = args[:metric_descriptors] if args.key?(:metric_descriptors)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# The ListMonitoredResourceDescriptors response.
class ListMonitoredResourceDescriptorsResponse
include Google::Apis::Core::Hashable
# If there are more results than have been returned, then this field is set to a
# non-empty value. To see the additional results, use that value as pageToken in
# the next call to this method.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# The monitored resource descriptors that are available to this project and that
# match filter, if present.
# Corresponds to the JSON property `resourceDescriptors`
# @return [Array<Google::Apis::MonitoringV3::MonitoredResourceDescriptor>]
attr_accessor :resource_descriptors
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@resource_descriptors = args[:resource_descriptors] if args.key?(:resource_descriptors)
end
end
# The ListNotificationChannelDescriptors response.
class ListNotificationChannelDescriptorsResponse
include Google::Apis::Core::Hashable
# The monitored resource descriptors supported for the specified project,
# optionally filtered.
# Corresponds to the JSON property `channelDescriptors`
# @return [Array<Google::Apis::MonitoringV3::NotificationChannelDescriptor>]
attr_accessor :channel_descriptors
# If not empty, indicates that there may be more results that match the request.
# Use the value in the page_token field in a subsequent request to fetch the
# next set of results. If empty, all results have been returned.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@channel_descriptors = args[:channel_descriptors] if args.key?(:channel_descriptors)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# The ListNotificationChannels response.
class ListNotificationChannelsResponse
include Google::Apis::Core::Hashable
# If not empty, indicates that there may be more results that match the request.
# Use the value in the page_token field in a subsequent request to fetch the
# next set of results. If empty, all results have been returned.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# The notification channels defined for the specified project.
# Corresponds to the JSON property `notificationChannels`
# @return [Array<Google::Apis::MonitoringV3::NotificationChannel>]
attr_accessor :notification_channels
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@notification_channels = args[:notification_channels] if args.key?(:notification_channels)
end
end
# The ListTimeSeries response.
class ListTimeSeriesResponse
include Google::Apis::Core::Hashable
# Query execution errors that may have caused the time series data returned to
# be incomplete.
# Corresponds to the JSON property `executionErrors`
# @return [Array<Google::Apis::MonitoringV3::Status>]
attr_accessor :execution_errors
# If there are more results than have been returned, then this field is set to a
# non-empty value. To see the additional results, use that value as pageToken in
# the next call to this method.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# One or more time series that match the filter included in the request.
# Corresponds to the JSON property `timeSeries`
# @return [Array<Google::Apis::MonitoringV3::TimeSeries>]
attr_accessor :time_series
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@execution_errors = args[:execution_errors] if args.key?(:execution_errors)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@time_series = args[:time_series] if args.key?(:time_series)
end
end
# The protocol for the ListUptimeCheckConfigs response.
class ListUptimeCheckConfigsResponse
include Google::Apis::Core::Hashable
# This field represents the pagination token to retrieve the next page of
# results. If the value is empty, it means no further results for the request.
# To retrieve the next page of results, the value of the next_page_token is
# passed to the subsequent List method call (in the request message's page_token
# field).
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# The total number of uptime check configurations for the project, irrespective
# of any pagination.
# Corresponds to the JSON property `totalSize`
# @return [Fixnum]
attr_accessor :total_size
# The returned uptime check configurations.
# Corresponds to the JSON property `uptimeCheckConfigs`
# @return [Array<Google::Apis::MonitoringV3::UptimeCheckConfig>]
attr_accessor :uptime_check_configs
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@total_size = args[:total_size] if args.key?(:total_size)
@uptime_check_configs = args[:uptime_check_configs] if args.key?(:uptime_check_configs)
end
end
# The protocol for the ListUptimeCheckIps response.
class ListUptimeCheckIpsResponse
include Google::Apis::Core::Hashable
# This field represents the pagination token to retrieve the next page of
# results. If the value is empty, it means no further results for the request.
# To retrieve the next page of results, the value of the next_page_token is
# passed to the subsequent List method call (in the request message's page_token
# field). NOTE: this field is not yet implemented
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# The returned list of IP addresses (including region and location) that the
# checkers run from.
# Corresponds to the JSON property `uptimeCheckIps`
# @return [Array<Google::Apis::MonitoringV3::UptimeCheckIp>]
attr_accessor :uptime_check_ips
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@uptime_check_ips = args[:uptime_check_ips] if args.key?(:uptime_check_ips)
end
end
# A specific metric, identified by specifying values for all of the labels of a
# MetricDescriptor.
class Metric
include Google::Apis::Core::Hashable
# The set of label values that uniquely identify this metric. All labels listed
# in the MetricDescriptor must be assigned values.
# Corresponds to the JSON property `labels`
# @return [Hash<String,String>]
attr_accessor :labels
# An existing metric type, see google.api.MetricDescriptor. For example, custom.
# googleapis.com/invoice/paid/amount.
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@labels = args[:labels] if args.key?(:labels)
@type = args[:type] if args.key?(:type)
end
end
# A condition type that checks that monitored resources are reporting data. The
# configuration defines a metric and a set of monitored resources. The predicate
# is considered in violation when a time series for the specified metric of a
# monitored resource does not include any data in the specified duration.
class MetricAbsence
include Google::Apis::Core::Hashable
# Specifies the alignment of data points in individual time series as well as
# how to combine the retrieved time series together (such as when aggregating
# multiple streams on each resource to a single stream for each resource or when
# aggregating streams across all members of a group of resrouces). Multiple
# aggregations are applied in the order specified.This field is similar to the
# one in the MetricService.ListTimeSeries request. It is advisable to use the
# ListTimeSeries method when debugging this field.
# Corresponds to the JSON property `aggregations`
# @return [Array<Google::Apis::MonitoringV3::Aggregation>]
attr_accessor :aggregations
# The amount of time that a time series must fail to report new data to be
# considered failing. Currently, only values that are a multiple of a minute--e.
# g. 60, 120, or 300 seconds--are supported. If an invalid value is given, an
# error will be returned. The Duration.nanos field is ignored.
# Corresponds to the JSON property `duration`
# @return [String]
attr_accessor :duration
# A filter that identifies which time series should be compared with the
# threshold.The filter is similar to the one that is specified in the
# MetricService.ListTimeSeries request (that call is useful to verify the time
# series that will be retrieved / processed) and must specify the metric type
# and optionally may contain restrictions on resource type, resource labels, and
# metric labels. This field may not exceed 2048 Unicode characters in length.
# Corresponds to the JSON property `filter`
# @return [String]
attr_accessor :filter
# Specifies how many time series must fail a predicate to trigger a condition.
# If not specified, then a `count: 1` trigger is used.
# Corresponds to the JSON property `trigger`
# @return [Google::Apis::MonitoringV3::Trigger]
attr_accessor :trigger
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@aggregations = args[:aggregations] if args.key?(:aggregations)
@duration = args[:duration] if args.key?(:duration)
@filter = args[:filter] if args.key?(:filter)
@trigger = args[:trigger] if args.key?(:trigger)
end
end
# Defines a metric type and its schema. Once a metric descriptor is created,
# deleting or altering it stops data collection and makes the metric type's
# existing data unusable.
class MetricDescriptor
include Google::Apis::Core::Hashable
# A detailed description of the metric, which can be used in documentation.
# Corresponds to the JSON property `description`
# @return [String]
attr_accessor :description
# A concise name for the metric, which can be displayed in user interfaces. Use
# sentence case without an ending period, for example "Request count". This
# field is optional but it is recommended to be set for any metrics associated
# with user-visible concepts, such as Quota.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# The set of labels that can be used to describe a specific instance of this
# metric type. For example, the appengine.googleapis.com/http/server/
# response_latencies metric type has a label for the HTTP response code,
# response_code, so you can look at latencies for successful responses or just
# for responses that failed.
# Corresponds to the JSON property `labels`
# @return [Array<Google::Apis::MonitoringV3::LabelDescriptor>]
attr_accessor :labels
# Additional annotations that can be used to guide the usage of a metric.
# Corresponds to the JSON property `metadata`
# @return [Google::Apis::MonitoringV3::MetricDescriptorMetadata]
attr_accessor :metadata
# Whether the metric records instantaneous values, changes to a value, etc. Some
# combinations of metric_kind and value_type might not be supported.
# Corresponds to the JSON property `metricKind`
# @return [String]
attr_accessor :metric_kind
# The resource name of the metric descriptor.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The metric type, including its DNS name prefix. The type is not URL-encoded.
# All user-defined custom metric types have the DNS name custom.googleapis.com.
# Metric types should use a natural hierarchical grouping. For example:
# "custom.googleapis.com/invoice/paid/amount"
# "appengine.googleapis.com/http/server/response_latencies"
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
# The unit in which the metric value is reported. It is only applicable if the
# value_type is INT64, DOUBLE, or DISTRIBUTION. The supported units are a subset
# of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html)
# standard:Basic units (UNIT)
# bit bit
# By byte
# s second
# min minute
# h hour
# d dayPrefixes (PREFIX)
# k kilo (10**3)
# M mega (10**6)
# G giga (10**9)
# T tera (10**12)
# P peta (10**15)
# E exa (10**18)
# Z zetta (10**21)
# Y yotta (10**24)
# m milli (10**-3)
# u micro (10**-6)
# n nano (10**-9)
# p pico (10**-12)
# f femto (10**-15)
# a atto (10**-18)
# z zepto (10**-21)
# y yocto (10**-24)
# Ki kibi (2**10)
# Mi mebi (2**20)
# Gi gibi (2**30)
# Ti tebi (2**40)GrammarThe grammar also includes these connectors:
# / division (as an infix operator, e.g. 1/s).
# . multiplication (as an infix operator, e.g. GBy.d)The grammar for a unit is
# as follows:
# Expression = Component ` "." Component ` ` "/" Component ` ;
# Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ]
# | Annotation
# | "1"
# ;
# Annotation = "`" NAME "`" ;
# Notes:
# Annotation is just a comment if it follows a UNIT and is equivalent to 1 if
# it is used alone. For examples, `requests`/s == 1/s, By`transmitted`/s == By/
# s.
# NAME is a sequence of non-blank printable ASCII characters not containing '`'
# or '`'.
# 1 represents dimensionless value 1, such as in 1/s.
# % represents dimensionless value 1/100, and annotates values giving a
# percentage.
# Corresponds to the JSON property `unit`
# @return [String]
attr_accessor :unit
# Whether the measurement is an integer, a floating-point number, etc. Some
# combinations of metric_kind and value_type might not be supported.
# Corresponds to the JSON property `valueType`
# @return [String]
attr_accessor :value_type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@description = args[:description] if args.key?(:description)
@display_name = args[:display_name] if args.key?(:display_name)
@labels = args[:labels] if args.key?(:labels)
@metadata = args[:metadata] if args.key?(:metadata)
@metric_kind = args[:metric_kind] if args.key?(:metric_kind)
@name = args[:name] if args.key?(:name)
@type = args[:type] if args.key?(:type)
@unit = args[:unit] if args.key?(:unit)
@value_type = args[:value_type] if args.key?(:value_type)
end
end
# Additional annotations that can be used to guide the usage of a metric.
class MetricDescriptorMetadata
include Google::Apis::Core::Hashable
# The delay of data points caused by ingestion. Data points older than this age
# are guaranteed to be ingested and available to be read, excluding data loss
# due to errors.
# Corresponds to the JSON property `ingestDelay`
# @return [String]
attr_accessor :ingest_delay
# The launch stage of the metric definition.
# Corresponds to the JSON property `launchStage`
# @return [String]
attr_accessor :launch_stage
# The sampling period of metric data points. For metrics which are written
# periodically, consecutive data points are stored at this time interval,
# excluding data loss due to errors. Metrics with a higher granularity have a
# smaller sampling period.
# Corresponds to the JSON property `samplePeriod`
# @return [String]
attr_accessor :sample_period
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@ingest_delay = args[:ingest_delay] if args.key?(:ingest_delay)
@launch_stage = args[:launch_stage] if args.key?(:launch_stage)
@sample_period = args[:sample_period] if args.key?(:sample_period)
end
end
# A condition type that compares a collection of time series against a threshold.
class MetricThreshold
include Google::Apis::Core::Hashable
# Specifies the alignment of data points in individual time series as well as
# how to combine the retrieved time series together (such as when aggregating
# multiple streams on each resource to a single stream for each resource or when
# aggregating streams across all members of a group of resrouces). Multiple
# aggregations are applied in the order specified.This field is similar to the
# one in the MetricService.ListTimeSeries request. It is advisable to use the
# ListTimeSeries method when debugging this field.
# Corresponds to the JSON property `aggregations`
# @return [Array<Google::Apis::MonitoringV3::Aggregation>]
attr_accessor :aggregations
# The comparison to apply between the time series (indicated by filter and
# aggregation) and the threshold (indicated by threshold_value). The comparison
# is applied on each time series, with the time series on the left-hand side and
# the threshold on the right-hand side.Only COMPARISON_LT and COMPARISON_GT are
# supported currently.
# Corresponds to the JSON property `comparison`
# @return [String]
attr_accessor :comparison
# Specifies the alignment of data points in individual time series selected by
# denominatorFilter as well as how to combine the retrieved time series together
# (such as when aggregating multiple streams on each resource to a single stream
# for each resource or when aggregating streams across all members of a group of
# resources).When computing ratios, the aggregations and
# denominator_aggregations fields must use the same alignment period and produce
# time series that have the same periodicity and labels.This field is similar to
# the one in the MetricService.ListTimeSeries request. It is advisable to use
# the ListTimeSeries method when debugging this field.
# Corresponds to the JSON property `denominatorAggregations`
# @return [Array<Google::Apis::MonitoringV3::Aggregation>]
attr_accessor :denominator_aggregations
# A filter that identifies a time series that should be used as the denominator
# of a ratio that will be compared with the threshold. If a denominator_filter
# is specified, the time series specified by the filter field will be used as
# the numerator.The filter is similar to the one that is specified in the
# MetricService.ListTimeSeries request (that call is useful to verify the time
# series that will be retrieved / processed) and must specify the metric type
# and optionally may contain restrictions on resource type, resource labels, and
# metric labels. This field may not exceed 2048 Unicode characters in length.
# Corresponds to the JSON property `denominatorFilter`
# @return [String]
attr_accessor :denominator_filter
# The amount of time that a time series must violate the threshold to be
# considered failing. Currently, only values that are a multiple of a minute--e.
# g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given,
# an error will be returned. When choosing a duration, it is useful to keep in
# mind the frequency of the underlying time series data (which may also be
# affected by any alignments specified in the aggregations field); a good
# duration is long enough so that a single outlier does not generate spurious
# alerts, but short enough that unhealthy states are detected and alerted on
# quickly.
# Corresponds to the JSON property `duration`
# @return [String]
attr_accessor :duration
# A filter that identifies which time series should be compared with the
# threshold.The filter is similar to the one that is specified in the
# MetricService.ListTimeSeries request (that call is useful to verify the time
# series that will be retrieved / processed) and must specify the metric type
# and optionally may contain restrictions on resource type, resource labels, and
# metric labels. This field may not exceed 2048 Unicode characters in length.
# Corresponds to the JSON property `filter`
# @return [String]
attr_accessor :filter
# A value against which to compare the time series.
# Corresponds to the JSON property `thresholdValue`
# @return [Float]
attr_accessor :threshold_value
# Specifies how many time series must fail a predicate to trigger a condition.
# If not specified, then a `count: 1` trigger is used.
# Corresponds to the JSON property `trigger`
# @return [Google::Apis::MonitoringV3::Trigger]
attr_accessor :trigger
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@aggregations = args[:aggregations] if args.key?(:aggregations)
@comparison = args[:comparison] if args.key?(:comparison)
@denominator_aggregations = args[:denominator_aggregations] if args.key?(:denominator_aggregations)
@denominator_filter = args[:denominator_filter] if args.key?(:denominator_filter)
@duration = args[:duration] if args.key?(:duration)
@filter = args[:filter] if args.key?(:filter)
@threshold_value = args[:threshold_value] if args.key?(:threshold_value)
@trigger = args[:trigger] if args.key?(:trigger)
end
end
# An object representing a resource that can be used for monitoring, logging,
# billing, or other purposes. Examples include virtual machine instances,
# databases, and storage devices such as disks. The type field identifies a
# MonitoredResourceDescriptor object that describes the resource's schema.
# Information in the labels field identifies the actual resource and its
# attributes according to the schema. For example, a particular Compute Engine
# VM instance could be represented by the following object, because the
# MonitoredResourceDescriptor for "gce_instance" has labels "instance_id" and "
# zone":
# ` "type": "gce_instance",
# "labels": ` "instance_id": "12345678901234",
# "zone": "us-central1-a" ``
class MonitoredResource
include Google::Apis::Core::Hashable
# Required. Values for all of the labels listed in the associated monitored
# resource descriptor. For example, Compute Engine VM instances use the labels "
# project_id", "instance_id", and "zone".
# Corresponds to the JSON property `labels`
# @return [Hash<String,String>]
attr_accessor :labels
# Required. The monitored resource type. This field must match the type field of
# a MonitoredResourceDescriptor object. For example, the type of a Compute
# Engine VM instance is gce_instance.
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@labels = args[:labels] if args.key?(:labels)
@type = args[:type] if args.key?(:type)
end
end
# An object that describes the schema of a MonitoredResource object using a type
# name and a set of labels. For example, the monitored resource descriptor for
# Google Compute Engine VM instances has a type of "gce_instance" and specifies
# the use of the labels "instance_id" and "zone" to identify particular VM
# instances.Different APIs can support different monitored resource types. APIs
# generally provide a list method that returns the monitored resource
# descriptors used by the API.
class MonitoredResourceDescriptor
include Google::Apis::Core::Hashable
# Optional. A detailed description of the monitored resource type that might be
# used in documentation.
# Corresponds to the JSON property `description`
# @return [String]
attr_accessor :description
# Optional. A concise name for the monitored resource type that might be
# displayed in user interfaces. It should be a Title Cased Noun Phrase, without
# any article or other determiners. For example, "Google Cloud SQL Database".
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# Required. A set of labels used to describe instances of this monitored
# resource type. For example, an individual Google Cloud SQL database is
# identified by values for the labels "database_id" and "zone".
# Corresponds to the JSON property `labels`
# @return [Array<Google::Apis::MonitoringV3::LabelDescriptor>]
attr_accessor :labels
# Optional. The resource name of the monitored resource descriptor: "projects/`
# project_id`/monitoredResourceDescriptors/`type`" where `type` is the value of
# the type field in this object and `project_id` is a project ID that provides
# API-specific context for accessing the type. APIs that do not use project
# information can use the resource name format "monitoredResourceDescriptors/`
# type`".
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Required. The monitored resource type. For example, the type "
# cloudsql_database" represents databases in Google Cloud SQL. The maximum
# length of this value is 256 characters.
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@description = args[:description] if args.key?(:description)
@display_name = args[:display_name] if args.key?(:display_name)
@labels = args[:labels] if args.key?(:labels)
@name = args[:name] if args.key?(:name)
@type = args[:type] if args.key?(:type)
end
end
# Auxiliary metadata for a MonitoredResource object. MonitoredResource objects
# contain the minimum set of information to uniquely identify a monitored
# resource instance. There is some other useful auxiliary metadata. Google
# Stackdriver Monitoring & Logging uses an ingestion pipeline to extract
# metadata for cloud resources of all types , and stores the metadata in this
# message.
class MonitoredResourceMetadata
include Google::Apis::Core::Hashable
# Output only. Values for predefined system metadata labels. System labels are a
# kind of metadata extracted by Google Stackdriver. Stackdriver determines what
# system labels are useful and how to obtain their values. Some examples: "
# machine_image", "vpc", "subnet_id", "security_group", "name", etc. System
# label values can be only strings, Boolean values, or a list of strings. For
# example:
# ` "name": "my-test-instance",
# "security_group": ["a", "b", "c"],
# "spot_instance": false `
# Corresponds to the JSON property `systemLabels`
# @return [Hash<String,Object>]
attr_accessor :system_labels
# Output only. A map of user-defined metadata labels.
# Corresponds to the JSON property `userLabels`
# @return [Hash<String,String>]
attr_accessor :user_labels
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@system_labels = args[:system_labels] if args.key?(:system_labels)
@user_labels = args[:user_labels] if args.key?(:user_labels)
end
end
# Describes a change made to a configuration.
class MutationRecord
include Google::Apis::Core::Hashable
# When the change occurred.
# Corresponds to the JSON property `mutateTime`
# @return [String]
attr_accessor :mutate_time
# The email address of the user making the change.
# Corresponds to the JSON property `mutatedBy`
# @return [String]
attr_accessor :mutated_by
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@mutate_time = args[:mutate_time] if args.key?(:mutate_time)
@mutated_by = args[:mutated_by] if args.key?(:mutated_by)
end
end
# A NotificationChannel is a medium through which an alert is delivered when a
# policy violation is detected. Examples of channels include email, SMS, and
# third-party messaging applications. Fields containing sensitive information
# like authentication tokens or contact info are only partially populated on
# retrieval.
class NotificationChannel
include Google::Apis::Core::Hashable
# An optional human-readable description of this notification channel. This
# description may provide additional details, beyond the display name, for the
# channel. This may not exceeed 1024 Unicode characters.
# Corresponds to the JSON property `description`
# @return [String]
attr_accessor :description
# An optional human-readable name for this notification channel. It is
# recommended that you specify a non-empty and unique name in order to make it
# easier to identify the channels in your project, though this is not enforced.
# The display name is limited to 512 Unicode characters.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# Whether notifications are forwarded to the described channel. This makes it
# possible to disable delivery of notifications to a particular channel without
# removing the channel from all alerting policies that reference the channel.
# This is a more convenient approach when the change is temporary and you want
# to receive notifications from the same set of alerting policies on the channel
# at some point in the future.
# Corresponds to the JSON property `enabled`
# @return [Boolean]
attr_accessor :enabled
alias_method :enabled?, :enabled
# Configuration fields that define the channel and its behavior. The permissible
# and required labels are specified in the NotificationChannelDescriptor.labels
# of the NotificationChannelDescriptor corresponding to the type field.
# Corresponds to the JSON property `labels`
# @return [Hash<String,String>]
attr_accessor :labels
# The full REST resource name for this channel. The syntax is:
# projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
# The [CHANNEL_ID] is automatically assigned by the server on creation.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The type of the notification channel. This field matches the value of the
# NotificationChannelDescriptor.type field.
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
# User-supplied key/value data that does not need to conform to the
# corresponding NotificationChannelDescriptor's schema, unlike the labels field.
# This field is intended to be used for organizing and identifying the
# NotificationChannel objects.The field can contain up to 64 entries. Each key
# and value is limited to 63 Unicode characters or 128 bytes, whichever is
# smaller. Labels and values can contain only lowercase letters, numerals,
# underscores, and dashes. Keys must begin with a letter.
# Corresponds to the JSON property `userLabels`
# @return [Hash<String,String>]
attr_accessor :user_labels
# Indicates whether this channel has been verified or not. On a
# ListNotificationChannels or GetNotificationChannel operation, this field is
# expected to be populated.If the value is UNVERIFIED, then it indicates that
# the channel is non-functioning (it both requires verification and lacks
# verification); otherwise, it is assumed that the channel works.If the channel
# is neither VERIFIED nor UNVERIFIED, it implies that the channel is of a type
# that does not require verification or that this specific channel has been
# exempted from verification because it was created prior to verification being
# required for channels of this type.This field cannot be modified using a
# standard UpdateNotificationChannel operation. To change the value of this
# field, you must call VerifyNotificationChannel.
# Corresponds to the JSON property `verificationStatus`
# @return [String]
attr_accessor :verification_status
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@description = args[:description] if args.key?(:description)
@display_name = args[:display_name] if args.key?(:display_name)
@enabled = args[:enabled] if args.key?(:enabled)
@labels = args[:labels] if args.key?(:labels)
@name = args[:name] if args.key?(:name)
@type = args[:type] if args.key?(:type)
@user_labels = args[:user_labels] if args.key?(:user_labels)
@verification_status = args[:verification_status] if args.key?(:verification_status)
end
end
# A description of a notification channel. The descriptor includes the
# properties of the channel and the set of labels or fields that must be
# specified to configure channels of a given type.
class NotificationChannelDescriptor
include Google::Apis::Core::Hashable
# A human-readable description of the notification channel type. The description
# may include a description of the properties of the channel and pointers to
# external documentation.
# Corresponds to the JSON property `description`
# @return [String]
attr_accessor :description
# A human-readable name for the notification channel type. This form of the name
# is suitable for a user interface.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# The set of labels that must be defined to identify a particular channel of the
# corresponding type. Each label includes a description for how that field
# should be populated.
# Corresponds to the JSON property `labels`
# @return [Array<Google::Apis::MonitoringV3::LabelDescriptor>]
attr_accessor :labels
# The full REST resource name for this descriptor. The syntax is:
# projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE]
# In the above, [TYPE] is the value of the type field.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The tiers that support this notification channel; the project service tier
# must be one of the supported_tiers.
# Corresponds to the JSON property `supportedTiers`
# @return [Array<String>]
attr_accessor :supported_tiers
# The type of notification channel, such as "email", "sms", etc. Notification
# channel types are globally unique.
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@description = args[:description] if args.key?(:description)
@display_name = args[:display_name] if args.key?(:display_name)
@labels = args[:labels] if args.key?(:labels)
@name = args[:name] if args.key?(:name)
@supported_tiers = args[:supported_tiers] if args.key?(:supported_tiers)
@type = args[:type] if args.key?(:type)
end
end
# A protocol buffer option, which can be attached to a message, field,
# enumeration, etc.
class Option
include Google::Apis::Core::Hashable
# The option's name. For protobuf built-in options (options defined in
# descriptor.proto), this is the short name. For example, "map_entry". For
# custom options, it should be the fully-qualified name. For example, "google.
# api.http".
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The option's value packed in an Any message. If the value is a primitive, the
# corresponding wrapper type defined in google/protobuf/wrappers.proto should be
# used. If the value is an enum, it should be stored as an int32 value using the
# google.protobuf.Int32Value type.
# Corresponds to the JSON property `value`
# @return [Hash<String,Object>]
attr_accessor :value
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@name = args[:name] if args.key?(:name)
@value = args[:value] if args.key?(:value)
end
end
# A single data point in a time series.
class Point
include Google::Apis::Core::Hashable
# A time interval extending just after a start time through an end time. If the
# start time is the same as the end time, then the interval represents a single
# point in time.
# Corresponds to the JSON property `interval`
# @return [Google::Apis::MonitoringV3::TimeInterval]
attr_accessor :interval
# A single strongly-typed value.
# Corresponds to the JSON property `value`
# @return [Google::Apis::MonitoringV3::TypedValue]
attr_accessor :value
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@interval = args[:interval] if args.key?(:interval)
@value = args[:value] if args.key?(:value)
end
end
# The range of the population values.
class Range
include Google::Apis::Core::Hashable
# The maximum of the population values.
# Corresponds to the JSON property `max`
# @return [Float]
attr_accessor :max
# The minimum of the population values.
# Corresponds to the JSON property `min`
# @return [Float]
attr_accessor :min
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@max = args[:max] if args.key?(:max)
@min = args[:min] if args.key?(:min)
end
end
# The resource submessage for group checks. It can be used instead of a
# monitored resource, when multiple resources are being monitored.
class ResourceGroup
include Google::Apis::Core::Hashable
# The group of resources being monitored. Should be only the group_id, not
# projects/<project_id>/groups/<group_id>.
# Corresponds to the JSON property `groupId`
# @return [String]
attr_accessor :group_id
# The resource type of the group members.
# Corresponds to the JSON property `resourceType`
# @return [String]
attr_accessor :resource_type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@group_id = args[:group_id] if args.key?(:group_id)
@resource_type = args[:resource_type] if args.key?(:resource_type)
end
end
# The SendNotificationChannelVerificationCode request.
class SendNotificationChannelVerificationCodeRequest
include Google::Apis::Core::Hashable
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
end
end
# SourceContext represents information about the source of a protobuf element,
# like the file in which it is defined.
class SourceContext
include Google::Apis::Core::Hashable
# The path-qualified name of the .proto file that contained the associated
# protobuf element. For example: "google/protobuf/source_context.proto".
# Corresponds to the JSON property `fileName`
# @return [String]
attr_accessor :file_name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@file_name = args[:file_name] if args.key?(:file_name)
end
end
# The Status type defines a logical error model that is suitable for different
# programming environments, including REST APIs and RPC APIs. It is used by gRPC
# (https://github.com/grpc). The error model is designed to be:
# Simple to use and understand for most users
# Flexible enough to meet unexpected needsOverviewThe Status message contains
# three pieces of data: error code, error message, and error details. The error
# code should be an enum value of google.rpc.Code, but it may accept additional
# error codes if needed. The error message should be a developer-facing English
# message that helps developers understand and resolve the error. If a localized
# user-facing error message is needed, put the localized message in the error
# details or localize it in the client. The optional error details may contain
# arbitrary information about the error. There is a predefined set of error
# detail types in the package google.rpc that can be used for common error
# conditions.Language mappingThe Status message is the logical representation of
# the error model, but it is not necessarily the actual wire format. When the
# Status message is exposed in different client libraries and different wire
# protocols, it can be mapped differently. For example, it will likely be mapped
# to some exceptions in Java, but more likely mapped to some error codes in C.
# Other usesThe error model and the Status message can be used in a variety of
# environments, either with or without APIs, to provide a consistent developer
# experience across different environments.Example uses of this error model
# include:
# Partial errors. If a service needs to return partial errors to the client, it
# may embed the Status in the normal response to indicate the partial errors.
# Workflow errors. A typical workflow has multiple steps. Each step may have a
# Status message for error reporting.
# Batch operations. If a client uses batch request and batch response, the
# Status message should be used directly inside batch response, one for each
# error sub-response.
# Asynchronous operations. If an API call embeds asynchronous operation results
# in its response, the status of those operations should be represented directly
# using the Status message.
# Logging. If some API errors are stored in logs, the message Status could be
# used directly after any stripping needed for security/privacy reasons.
class Status
include Google::Apis::Core::Hashable
# The status code, which should be an enum value of google.rpc.Code.
# Corresponds to the JSON property `code`
# @return [Fixnum]
attr_accessor :code
# A list of messages that carry the error details. There is a common set of
# message types for APIs to use.
# Corresponds to the JSON property `details`
# @return [Array<Hash<String,Object>>]
attr_accessor :details
# A developer-facing error message, which should be in English. Any user-facing
# error message should be localized and sent in the google.rpc.Status.details
# field, or localized by the client.
# Corresponds to the JSON property `message`
# @return [String]
attr_accessor :message
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@code = args[:code] if args.key?(:code)
@details = args[:details] if args.key?(:details)
@message = args[:message] if args.key?(:message)
end
end
# Information required for a TCP uptime check request.
class TcpCheck
include Google::Apis::Core::Hashable
# The port to the page to run the check against. Will be combined with host (
# specified within the MonitoredResource) to construct the full URL. Required.
# Corresponds to the JSON property `port`
# @return [Fixnum]
attr_accessor :port
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@port = args[:port] if args.key?(:port)
end
end
# A time interval extending just after a start time through an end time. If the
# start time is the same as the end time, then the interval represents a single
# point in time.
class TimeInterval
include Google::Apis::Core::Hashable
# Required. The end of the time interval.
# Corresponds to the JSON property `endTime`
# @return [String]
attr_accessor :end_time
# Optional. The beginning of the time interval. The default value for the start
# time is the end time. The start time must not be later than the end time.
# Corresponds to the JSON property `startTime`
# @return [String]
attr_accessor :start_time
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@end_time = args[:end_time] if args.key?(:end_time)
@start_time = args[:start_time] if args.key?(:start_time)
end
end
# A collection of data points that describes the time-varying values of a metric.
# A time series is identified by a combination of a fully-specified monitored
# resource and a fully-specified metric. This type is used for both listing and
# creating time series.
class TimeSeries
include Google::Apis::Core::Hashable
# Auxiliary metadata for a MonitoredResource object. MonitoredResource objects
# contain the minimum set of information to uniquely identify a monitored
# resource instance. There is some other useful auxiliary metadata. Google
# Stackdriver Monitoring & Logging uses an ingestion pipeline to extract
# metadata for cloud resources of all types , and stores the metadata in this
# message.
# Corresponds to the JSON property `metadata`
# @return [Google::Apis::MonitoringV3::MonitoredResourceMetadata]
attr_accessor :metadata
# A specific metric, identified by specifying values for all of the labels of a
# MetricDescriptor.
# Corresponds to the JSON property `metric`
# @return [Google::Apis::MonitoringV3::Metric]
attr_accessor :metric
# The metric kind of the time series. When listing time series, this metric kind
# might be different from the metric kind of the associated metric if this time
# series is an alignment or reduction of other time series.When creating a time
# series, this field is optional. If present, it must be the same as the metric
# kind of the associated metric. If the associated metric's descriptor must be
# auto-created, then this field specifies the metric kind of the new descriptor
# and must be either GAUGE (the default) or CUMULATIVE.
# Corresponds to the JSON property `metricKind`
# @return [String]
attr_accessor :metric_kind
# The data points of this time series. When listing time series, points are
# returned in reverse time order.When creating a time series, this field must
# contain exactly one point and the point's type must be the same as the value
# type of the associated metric. If the associated metric's descriptor must be
# auto-created, then the value type of the descriptor is determined by the point'
# s type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION.
# Corresponds to the JSON property `points`
# @return [Array<Google::Apis::MonitoringV3::Point>]
attr_accessor :points
# An object representing a resource that can be used for monitoring, logging,
# billing, or other purposes. Examples include virtual machine instances,
# databases, and storage devices such as disks. The type field identifies a
# MonitoredResourceDescriptor object that describes the resource's schema.
# Information in the labels field identifies the actual resource and its
# attributes according to the schema. For example, a particular Compute Engine
# VM instance could be represented by the following object, because the
# MonitoredResourceDescriptor for "gce_instance" has labels "instance_id" and "
# zone":
# ` "type": "gce_instance",
# "labels": ` "instance_id": "12345678901234",
# "zone": "us-central1-a" ``
# Corresponds to the JSON property `resource`
# @return [Google::Apis::MonitoringV3::MonitoredResource]
attr_accessor :resource
# The value type of the time series. When listing time series, this value type
# might be different from the value type of the associated metric if this time
# series is an alignment or reduction of other time series.When creating a time
# series, this field is optional. If present, it must be the same as the type of
# the data in the points field.
# Corresponds to the JSON property `valueType`
# @return [String]
attr_accessor :value_type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@metadata = args[:metadata] if args.key?(:metadata)
@metric = args[:metric] if args.key?(:metric)
@metric_kind = args[:metric_kind] if args.key?(:metric_kind)
@points = args[:points] if args.key?(:points)
@resource = args[:resource] if args.key?(:resource)
@value_type = args[:value_type] if args.key?(:value_type)
end
end
# Specifies how many time series must fail a predicate to trigger a condition.
# If not specified, then a `count: 1` trigger is used.
class Trigger
include Google::Apis::Core::Hashable
# The absolute number of time series that must fail the predicate for the
# condition to be triggered.
# Corresponds to the JSON property `count`
# @return [Fixnum]
attr_accessor :count
# The percentage of time series that must fail the predicate for the condition
# to be triggered.
# Corresponds to the JSON property `percent`
# @return [Float]
attr_accessor :percent
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@count = args[:count] if args.key?(:count)
@percent = args[:percent] if args.key?(:percent)
end
end
# A protocol buffer message type.
class Type
include Google::Apis::Core::Hashable
# The list of fields.
# Corresponds to the JSON property `fields`
# @return [Array<Google::Apis::MonitoringV3::Field>]
attr_accessor :fields
# The fully qualified message name.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The list of types appearing in oneof definitions in this type.
# Corresponds to the JSON property `oneofs`
# @return [Array<String>]
attr_accessor :oneofs
# The protocol buffer options.
# Corresponds to the JSON property `options`
# @return [Array<Google::Apis::MonitoringV3::Option>]
attr_accessor :options
# SourceContext represents information about the source of a protobuf element,
# like the file in which it is defined.
# Corresponds to the JSON property `sourceContext`
# @return [Google::Apis::MonitoringV3::SourceContext]
attr_accessor :source_context
# The source syntax.
# Corresponds to the JSON property `syntax`
# @return [String]
attr_accessor :syntax
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@fields = args[:fields] if args.key?(:fields)
@name = args[:name] if args.key?(:name)
@oneofs = args[:oneofs] if args.key?(:oneofs)
@options = args[:options] if args.key?(:options)
@source_context = args[:source_context] if args.key?(:source_context)
@syntax = args[:syntax] if args.key?(:syntax)
end
end
# A single strongly-typed value.
class TypedValue
include Google::Apis::Core::Hashable
# A Boolean value: true or false.
# Corresponds to the JSON property `boolValue`
# @return [Boolean]
attr_accessor :bool_value
alias_method :bool_value?, :bool_value
# Distribution contains summary statistics for a population of values. It
# optionally contains a histogram representing the distribution of those values
# across a set of buckets.The summary statistics are the count, mean, sum of the
# squared deviation from the mean, the minimum, and the maximum of the set of
# population of values. The histogram is based on a sequence of buckets and
# gives a count of values that fall into each bucket. The boundaries of the
# buckets are given either explicitly or by formulas for buckets of fixed or
# exponentially increasing widths.Although it is not forbidden, it is generally
# a bad idea to include non-finite values (infinities or NaNs) in the population
# of values, as this will render the mean and sum_of_squared_deviation fields
# meaningless.
# Corresponds to the JSON property `distributionValue`
# @return [Google::Apis::MonitoringV3::Distribution]
attr_accessor :distribution_value
# A 64-bit double-precision floating-point number. Its magnitude is
# approximately ±10<sup>±300</sup> and it has 16 significant
# digits of precision.
# Corresponds to the JSON property `doubleValue`
# @return [Float]
attr_accessor :double_value
# A 64-bit integer. Its range is approximately ±9.2x10<sup>18</sup>.
# Corresponds to the JSON property `int64Value`
# @return [Fixnum]
attr_accessor :int64_value
# A variable-length string value.
# Corresponds to the JSON property `stringValue`
# @return [String]
attr_accessor :string_value
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@bool_value = args[:bool_value] if args.key?(:bool_value)
@distribution_value = args[:distribution_value] if args.key?(:distribution_value)
@double_value = args[:double_value] if args.key?(:double_value)
@int64_value = args[:int64_value] if args.key?(:int64_value)
@string_value = args[:string_value] if args.key?(:string_value)
end
end
# This message configures which resources and services to monitor for
# availability.
class UptimeCheckConfig
include Google::Apis::Core::Hashable
# The expected content on the page the check is run against. Currently, only the
# first entry in the list is supported, and other entries will be ignored. The
# server will look for an exact match of the string in the page response's
# content. This field is optional and should only be specified if a content
# match is required.
# Corresponds to the JSON property `contentMatchers`
# @return [Array<Google::Apis::MonitoringV3::ContentMatcher>]
attr_accessor :content_matchers
# A human-friendly name for the uptime check configuration. The display name
# should be unique within a Stackdriver Account in order to make it easier to
# identify; however, uniqueness is not enforced. Required.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# Information involved in an HTTP/HTTPS uptime check request.
# Corresponds to the JSON property `httpCheck`
# @return [Google::Apis::MonitoringV3::HttpCheck]
attr_accessor :http_check
# The internal checkers that this check will egress from. If is_internal is true
# and this list is empty, the check will egress from all InternalCheckers
# configured for the project that owns this CheckConfig.
# Corresponds to the JSON property `internalCheckers`
# @return [Array<Google::Apis::MonitoringV3::InternalChecker>]
attr_accessor :internal_checkers
# Denotes whether this is a check that egresses from InternalCheckers.
# Corresponds to the JSON property `isInternal`
# @return [Boolean]
attr_accessor :is_internal
alias_method :is_internal?, :is_internal
# An object representing a resource that can be used for monitoring, logging,
# billing, or other purposes. Examples include virtual machine instances,
# databases, and storage devices such as disks. The type field identifies a
# MonitoredResourceDescriptor object that describes the resource's schema.
# Information in the labels field identifies the actual resource and its
# attributes according to the schema. For example, a particular Compute Engine
# VM instance could be represented by the following object, because the
# MonitoredResourceDescriptor for "gce_instance" has labels "instance_id" and "
# zone":
# ` "type": "gce_instance",
# "labels": ` "instance_id": "12345678901234",
# "zone": "us-central1-a" ``
# Corresponds to the JSON property `monitoredResource`
# @return [Google::Apis::MonitoringV3::MonitoredResource]
attr_accessor :monitored_resource
# A unique resource name for this UptimeCheckConfig. The format is:projects/[
# PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].This field should be omitted
# when creating the uptime check configuration; on create, the resource name is
# assigned by the server and included in the response.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# How often, in seconds, the uptime check is performed. Currently, the only
# supported values are 60s (1 minute), 300s (5 minutes), 600s (10 minutes), and
# 900s (15 minutes). Optional, defaults to 300s.
# Corresponds to the JSON property `period`
# @return [String]
attr_accessor :period
# The resource submessage for group checks. It can be used instead of a
# monitored resource, when multiple resources are being monitored.
# Corresponds to the JSON property `resourceGroup`
# @return [Google::Apis::MonitoringV3::ResourceGroup]
attr_accessor :resource_group
# The list of regions from which the check will be run. If this field is
# specified, enough regions to include a minimum of 3 locations must be provided,
# or an error message is returned. Not specifying this field will result in
# uptime checks running from all regions.
# Corresponds to the JSON property `selectedRegions`
# @return [Array<String>]
attr_accessor :selected_regions
# Information required for a TCP uptime check request.
# Corresponds to the JSON property `tcpCheck`
# @return [Google::Apis::MonitoringV3::TcpCheck]
attr_accessor :tcp_check
# The maximum amount of time to wait for the request to complete (must be
# between 1 and 60 seconds). Required.
# Corresponds to the JSON property `timeout`
# @return [String]
attr_accessor :timeout
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@content_matchers = args[:content_matchers] if args.key?(:content_matchers)
@display_name = args[:display_name] if args.key?(:display_name)
@http_check = args[:http_check] if args.key?(:http_check)
@internal_checkers = args[:internal_checkers] if args.key?(:internal_checkers)
@is_internal = args[:is_internal] if args.key?(:is_internal)
@monitored_resource = args[:monitored_resource] if args.key?(:monitored_resource)
@name = args[:name] if args.key?(:name)
@period = args[:period] if args.key?(:period)
@resource_group = args[:resource_group] if args.key?(:resource_group)
@selected_regions = args[:selected_regions] if args.key?(:selected_regions)
@tcp_check = args[:tcp_check] if args.key?(:tcp_check)
@timeout = args[:timeout] if args.key?(:timeout)
end
end
# Contains the region, location, and list of IP addresses where checkers in the
# location run from.
class UptimeCheckIp
include Google::Apis::Core::Hashable
# The IP address from which the uptime check originates. This is a full IP
# address (not an IP address range). Most IP addresses, as of this publication,
# are in IPv4 format; however, one should not rely on the IP addresses being in
# IPv4 format indefinitely and should support interpreting this field in either
# IPv4 or IPv6 format.
# Corresponds to the JSON property `ipAddress`
# @return [String]
attr_accessor :ip_address
# A more specific location within the region that typically encodes a particular
# city/town/metro (and its containing state/province or country) within the
# broader umbrella region category.
# Corresponds to the JSON property `location`
# @return [String]
attr_accessor :location
# A broad region category in which the IP address is located.
# Corresponds to the JSON property `region`
# @return [String]
attr_accessor :region
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@ip_address = args[:ip_address] if args.key?(:ip_address)
@location = args[:location] if args.key?(:location)
@region = args[:region] if args.key?(:region)
end
end
# The VerifyNotificationChannel request.
class VerifyNotificationChannelRequest
include Google::Apis::Core::Hashable
# The verification code that was delivered to the channel as a result of
# invoking the SendNotificationChannelVerificationCode API method or that was
# retrieved from a verified channel via GetNotificationChannelVerificationCode.
# For example, one might have "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in
# general, one is only guaranteed that the code is valid UTF-8; one should not
# make any assumptions regarding the structure or format of the code).
# Corresponds to the JSON property `code`
# @return [String]
attr_accessor :code
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@code = args[:code] if args.key?(:code)
end
end
end
end
end
|
import uuid
def generate_unique_id(n):
unique_ids = set()
for i in range(1, n+1):
unique_id = uuid.uuid4()
unique_ids.add(unique_id)
return unique_ids
# Driver Code
if __name__ == "__main__":
number_of_ids = 2000000
print(generate_unique_id(number_of_ids))
|
package cyclops.stream.spliterator.push;
import cyclops.reactive.ReactiveSeq;
import java.util.Spliterator;
import java.util.Spliterators.AbstractSpliterator;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.LockSupport;
import java.util.function.Consumer;
public class ValueEmittingSpliterator<T> extends AbstractSpliterator<T> {
AtomicReference<T> value = new AtomicReference<T>(null);
boolean emitted = false;
public ValueEmittingSpliterator(long est,
int additionalCharacteristics,
ReactiveSeq<T> seq) {
super(est,
additionalCharacteristics & Spliterator.ORDERED);
seq.forEach(e -> value.set(e));
}
@Override
public boolean tryAdvance(Consumer<? super T> action) {
if (emitted) {
return false;
}
T local = null;
while ((local = value.get()) == null) {
LockSupport.parkNanos(0l);//spin until a value is present
}
action.accept(local);
emitted = true;
return false;
}
}
|
REM FILE NAME: st_proc.sql
REM LOCATION: Object Management\Functions,Procedures, and Packages\Reports
REM FUNCTION: Generate a list of stored code
REM TESTED ON: 7.3.3.5, 8.0.4.1, 8.1.5, 8.1.7, 9.0.1
REM PLATFORM: non-specific
REM REQUIRES: dba_objects
REM
REM This is a part of the Knowledge Xpert for Oracle Administration library.
REM Copyright (C) 2001 Quest Software
REM All rights reserved.
REM
REM******************** Knowledge Xpert for Oracle Administration ********************
COLUMN OBJECT_TYPE FORMAT A12 HEADING 'Module|Type'
COLUMN OBJECT_NAME FORMAT A40 HEADING 'Module|Name'
COLUMN owner format a10 heading 'Module|Owner'
SET LINES 80 verify off feedback off
SET PAGES 58
BREAK on owner on object_type
@TITLE80 'STORED PROCEDURES'
SPOOL rep_out\st_proc
SELECT owner, object_type, object_name
FROM dba_objects
WHERE object_type IN ('FUNCTION', 'PACKAGE', 'PACKAGE BODY', 'PROCEDURE')
ORDER BY owner, object_type
/
SPOOL off
CLEAR columns
CLEAR breaks
SET lines 80 pages 22 verify on feedback on
TTITLE OFF
|
import component from './PeopleSelector'
export default component
|
#!/usr/bin/env bash
trap 'rm -rf "${WORKDIR}"' EXIT
[[ -z "${WORKDIR}" || "${WORKDIR}" != "/tmp/"* || ! -d "${WORKDIR}" ]] && WORKDIR="$(mktemp -d)"
[[ -z "${CURRENT_DIR}" || ! -d "${CURRENT_DIR}" ]] && CURRENT_DIR=$(pwd)
# Load custom functions
if type 'colorEcho' 2>/dev/null | grep -q 'function'; then
:
else
if [[ -s "${MY_SHELL_SCRIPTS:-$HOME/.dotfiles}/custom_functions.sh" ]]; then
source "${MY_SHELL_SCRIPTS:-$HOME/.dotfiles}/custom_functions.sh"
else
echo "${MY_SHELL_SCRIPTS:-$HOME/.dotfiles}/custom_functions.sh does not exist!"
exit 0
fi
fi
# [[ -x "$(command -v proxychains4)" && -d "$HOME/proxychains-ng" && $UID -eq 0 ]] && \
# isUpgrade="yes"
# [[ ! -x "$(command -v proxychains4)" && ! -d "$HOME/proxychains-ng" && $UID -eq 0 ]] && \
# isNewInstall="yes"
# if [[ "$isUpgrade" == "yes" || "$isNewInstall" == "yes" ]]; then
# proxychains
if [[ -x "$(command -v proxychains4)" ]]; then
if [[ -d "$HOME/proxychains-ng" ]]; then
colorEcho "${BLUE}Updating ${FUCHSIA}proxychains-ng${BLUE}..."
Git_Clone_Update_Branch "rofl0r/proxychains-ng" "$HOME/proxychains-ng"
# only recompile if update
# git_latest_update=$(git log -1 --format="%at" | xargs -I{} date -d @{} +'%Y-%m-%d %H:%M:%S')
git_latest_update=$(git log -1 --format="%at" | xargs -I{} date -d @{})
proxychains4_date=$(date -d "$(stat --printf='%y\n' "$(which proxychains4)")")
# if [[ $(date -d "$git_latest_update") > $(date --date='7 day ago') ]]; then
if [[ $(date -d "$git_latest_update") > $(date -d "$proxychains4_date") ]]; then
cd "$HOME/proxychains-ng" && \
sudo ./configure --prefix=/usr --sysconfdir=/etc/proxychains >/dev/null && \
sudo make >/dev/null && sudo make install >/dev/null
fi
fi
else
colorEcho "${BLUE}Installing ${FUCHSIA}proxychains-ng${BLUE}..."
if [[ -x "$(command -v pacman)" ]]; then
if checkPackageNeedInstall "proxychains4"; then
sudo pacman --noconfirm -S proxychains4
fi
fi
if [[ ! -x "$(command -v proxychains4)" ]]; then
Git_Clone_Update_Branch "rofl0r/proxychains-ng" "$HOME/proxychains-ng"
if [[ -d "$HOME/proxychains-ng" ]]; then
cd "$HOME/proxychains-ng" && \
sudo ./configure --prefix=/usr --sysconfdir=/etc/proxychains >/dev/null && \
sudo make >/dev/null && sudo make install >/dev/null && sudo make install-config >/dev/null
fi
fi
isNewInstall="yes"
fi
if [[ "$isNewInstall" == "yes" ]]; then
PROXYCHAINS_CONFIG="/etc/proxychains/proxychains.conf"
[[ ! -s "${PROXYCHAINS_CONFIG}" ]] && \
PROXYCHAINS_CONFIG="/etc/proxychains4.conf"
if [[ -s "${PROXYCHAINS_CONFIG}" ]]; then
sudo cp ${PROXYCHAINS_CONFIG} ${PROXYCHAINS_CONFIG}.bak && \
sudo sed -i 's/socks4/# socks4/g' ${PROXYCHAINS_CONFIG}
check_set_global_proxy 7891 7890
if [[ -n "${GLOBAL_PROXY_IP}" ]]; then
echo 'socks5 ${GLOBAL_PROXY_IP} ${GLOBAL_PROXY_SOCKS_PORT}' | sudo tee -a ${PROXYCHAINS_CONFIG} >/dev/null
fi
fi
fi
cd "${CURRENT_DIR}" || exit
|
import React from 'react'
import MainContent from '../components/MainContent';
const IndexPage = () => (
<MainContent />
);
export default IndexPage;
|
class SimpleOnnxConverter:
def __init__(self, model):
self.model = model
def convert_to_onnx(self):
try:
# Simulate the conversion process
# Replace the following line with actual conversion logic
onnx_model = f"{self.model}_onnx"
return f"Conversion successful. ONNX model: {onnx_model}"
except Exception as e:
return f"Conversion failed: {str(e)}"
|
#;
#;
filep='Do something.'
echo ''
echo '* Summary: $filep'
#;
function fct1() {
echo ''
echo "Hello World from function fct1."
}
#;
$1
#;
|
<gh_stars>0
export { GmailPlugin } from './Plugin';
export { IGoogleRateLimiter, GoogleRateLimiter } from './RateLimiter';
|
<reponame>jrfaller/maracas
package mainclient.classTypeChanged;
import main.classTypeChanged.ClassTypeChangedI2C;
public class ClassTypeChangedI2CImp implements ClassTypeChangedI2C {
}
|
use Eraple\Core\App;
use Eraple\Core\Task;
class SampleTaskHandlesReplaceTaskEvent extends Task
{
public function handleEvent($event)
{
if ($event instanceof ReplaceTaskEvent) {
// Replace specific task handles within the application
// Your implementation logic here
}
}
}
|
<reponame>hou-2021/hou-2021.github.io
import {
createRouter,
createWebHistory
} from "vue-router";
import Home from '../views/home.vue'
const routes = [{
path: '',
require: Home
},
{
path: '/home',
component: Home,
meta: {
title: '首页'
},
}
];
const router = createRouter({
history: createWebHistory(process.env.BASE_URL),
routes,
});
// 全局守卫
router.beforeEach(function (to, from, next) {
document.title = to.matched[0].meta.title;
next();
})
export default router;
|
<filename>index.js
'use strict';
var utils = require('expand-utils');
var define = require('define-property');
var Target = require('expand-target');
var Task = require('expand-task');
var use = require('use');
/**
* Expand a declarative configuration with tasks and targets.
* Create a new Config with the given `options`
*
* ```js
* var config = new Config();
*
* // example usage
* config.expand({
* jshint: {
* src: ['*.js', 'lib/*.js']
* }
* });
* ```
* @param {Object} `options`
* @api public
*/
function Config(options) {
if (!(this instanceof Config)) {
return new Config(options);
}
utils.is(this, 'config');
use(this);
define(this, 'count', 0);
this.options = options || {};
this.targets = {};
this.tasks = {};
if (utils.isConfig(options)) {
this.options = {};
this.expand(options);
return this;
}
}
/**
* Expand and normalize a declarative configuration into tasks, targets,
* and `options`.
*
* ```js
* config.expand({
* options: {},
* assemble: {
* site: {
* mapDest: true,
* src: 'templates/*.hbs',
* dest: 'site/'
* },
* docs: {
* src: 'content/*.md',
* dest: 'site/docs/'
* }
* }
* });
* ```
* @param {Object} `config` Config object with tasks and/or targets.
* @return {Object}
* @api public
*/
Config.prototype.expand = function(config) {
if (utils.isTarget(config)) {
this.addTarget('target' + (this.count++), config);
return this;
}
for (var key in config) {
if (config.hasOwnProperty(key)) {
var val = config[key];
if (utils.isTask(val)) {
this.addTask(key, val);
} else if (utils.isTarget(val)) {
this.addTarget(key, val);
} else {
this[key] = val;
}
}
}
};
/**
* Add a task to the config, while also normalizing targets with src-dest mappings and
* expanding glob patterns in each target.
*
* ```js
* task.addTask('assemble', {
* site: {src: '*.hbs', dest: 'templates/'},
* docs: {src: '*.md', dest: 'content/'}
* });
* ```
* @param {String} `name` the task's name
* @param {Object} `config` Task object where each key is a target or `options`.
* @return {Object}
* @api public
*/
Config.prototype.addTask = function(name, config) {
if (typeof name !== 'string') {
throw new TypeError('Config#addTask expects name to be a string');
}
var task = new Task(this.options);
define(task, 'name', name);
utils.run(this, 'config', task);
task.addTargets(config);
this.tasks[name] = task;
return task;
};
/**
* Add a target to the config, while also normalizing src-dest mappings and
* expanding glob patterns in the target.
*
* ```js
* config.addTarget({src: '*.hbs', dest: 'templates/'});
* ```
* @param {String} `name` The target's name
* @param {Object} `target` Target object with a `files` property, or `src` and optionally a `dest` property.
* @return {Object}
* @api public
*/
Config.prototype.addTarget = function(name, config) {
if (typeof name !== 'string') {
throw new TypeError('Config#addTarget expects name to be a string');
}
var target = new Target(this.options);
define(target, 'name', name);
utils.run(this, 'target', target);
target.addFiles(config);
this.targets[name] = target;
return target;
};
/**
* Expose `Config`
*/
module.exports = Config;
|
public class SwapExample{
public static void main(String args[]){
int x = 7;
int y = 9;
System.out.println("Before Swapping - x: "+x+", y: "+y);
swap(x, y);
System.out.println("After Swapping - x: "+x+", y: "+y);
}
public static void swap(int x, int y){
// Interchange the values of x and y
int temp = x;
x = y;
y = temp;
}
}
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -u
mkdir -p build
cd build
cp ../cmake/config.cmake .
echo set\(USE_SORT ON\) >> config.cmake
echo set\(USE_MICRO ON\) >> config.cmake
echo set\(USE_MICRO_STANDALONE_RUNTIME ON\) >> config.cmake
echo set\(USE_GRAPH_RUNTIME_DEBUG ON\) >> config.cmake
echo set\(USE_VM_PROFILER ON\) >> config.cmake
echo set\(USE_EXAMPLE_EXT_RUNTIME ON\) >> config.cmake
echo set\(USE_LLVM llvm-config-8\) >> config.cmake
echo set\(USE_NNPACK ON\) >> config.cmake
echo set\(NNPACK_PATH /NNPACK/build/\) >> config.cmake
echo set\(USE_ANTLR ON\) >> config.cmake
echo set\(CMAKE_CXX_COMPILER g++\) >> config.cmake
echo set\(CMAKE_CXX_FLAGS -Werror\) >> config.cmake
echo set\(HIDE_PRIVATE_SYMBOLS ON\) >> config.cmake
echo set\(USE_VTA_TSIM ON\) >> config.cmake
echo set\(USE_VTA_FSIM ON\) >> config.cmake
|
def swap(arr, idx1, idx2):
arr[idx1], arr[idx2] = arr[idx2], arr[idx1]
arr = [10, 20, 30, 40, 50]
swap(arr, 3, 4)
print(arr)
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const express_1 = require("express");
const login_controller_1 = require("../controller/login.controller");
class LoginRoute {
constructor() {
this.router = express_1.Router();
this._config();
}
_config() {
this.router.post('/', login_controller_1.loginController.login);
}
}
const loginRoute = new LoginRoute();
exports.default = loginRoute.router;
|
#!/bin/sh
#
# Copyright (C) 2010, 2012 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# $Id: stop.sh,v 1.2 2010/06/17 05:38:05 marka Exp $
. ./conf.sh
$PERL ./stop.pl "$@"
|
#!/usr/bin/env bash
bash <(curl -Ss https://my-netdata.io/kickstart.sh) --non-interactive all
|
import os
def search_files(directory, extension):
filenames = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(extension):
filenames.append(os.path.join(root, file))
return filenames
filenames = search_files('./sample_files', '*.py')
for filename in filenames:
print(filename)
|
#!/bin/bash
set -uo pipefail
# shellcheck disable=SC2155
export DEFAULT_ZITI_HOME_LOCATION="${HOME}/.ziti/quickstart/$(hostname)"
export ZITI_QUICKSTART_ENVROOT="${HOME}/.ziti/quickstart"
ASCI_WHITE='\033[01;37m'
ASCI_RESTORE='\033[0m'
ASCI_RED='\033[00;31m'
ASCI_GREEN='\033[00;32m'
ASCI_YELLOW='\033[00;33m'
ASCI_BLUE='\033[00;34m'
#ASCI_MAGENTA='\033[00;35m'
#ASCI_PURPLE='\033[00;35m'
#ASCI_CYAN='\033[00;36m'
#ASCI_LIGHTGRAY='\033[00;37m'
#ASCI_LRED='\033[01;31m'
#ASCI_LGREEN='\033[01;32m'
#ASCI_LYELLOW='\033[01;33m'
#ASCI_LBLUE='\033[01;34m'
#ASCI_LMAGENTA='\033[01;35m'
#ASCI_LPURPLE='\033[01;35m'
#ASCI_LCYAN='\033[01;36m'
function WHITE {
echo "${ASCI_WHITE}${1-}${ASCI_RESTORE}"
}
function RED {
echo "${ASCI_RED}${1-}${ASCI_RESTORE}"
}
function GREEN {
echo "${ASCI_GREEN}${1-}${ASCI_RESTORE}"
}
function YELLOW {
echo "${ASCI_YELLOW}${1-}${ASCI_RESTORE}"
}
function BLUE {
echo "${ASCI_BLUE}${1-}${ASCI_RESTORE}"
}
function zitiLogin {
unused=$("${ZITI_BIN_DIR-}/ziti" edge login "${ZITI_EDGE_CONTROLLER_API}" -u "${ZITI_USER-}" -p "${ZITI_PWD}" -c "${ZITI_PKI_OS_SPECIFIC}/${ZITI_EDGE_CONTROLLER_ROOTCA_NAME}/certs/${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}.cert")
}
function cleanZitiController {
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
rm -rf "${ziti_home}/db"
mkdir "${ziti_home}/db"
initializeController
}
function initializeController {
"${ZITI_BIN_DIR-}/ziti-controller" edge init "${ZITI_HOME_OS_SPECIFIC}/controller.yaml" -u "${ZITI_USER-}" -p "${ZITI_PWD}" &> "${ZITI_HOME_OS_SPECIFIC}/controller-init.log"
echo -e "ziti-controller initialized. see $(BLUE "${ZITI_HOME-}/controller-init.log") for details"
}
function startZitiController {
# shellcheck disable=SC2034
unused=$("${ZITI_BIN_DIR-}/ziti-controller" run "${ZITI_HOME_OS_SPECIFIC}/controller.yaml" > "${ZITI_HOME_OS_SPECIFIC}/ziti-edge-controller.log" 2>&1 &)
echo -e "ziti-controller started. log located at: $(BLUE "${ZITI_HOME-}/ziti-edge-controller.log")"
}
function stopZitiController {
killall ziti-controller
}
function checkHostsFile {
ctrlexists=$(grep -c "${ZITI_CONTROLLER_HOSTNAME}" /etc/hosts)
edgectrlexists=$(grep -c "${ZITI_EDGE_CONTROLLER_HOSTNAME}" /etc/hosts)
erexists=$(grep -c "${ZITI_EDGE_ROUTER_HOSTNAME}" /etc/hosts)
if [[ "0" = "${ctrlexists}" ]] || [[ "0" = "${edgectrlexists}" ]] || [[ "0" = "${erexists}" ]]; then
echo " "
echo -e "$(YELLOW "Ziti is generally used to create an overlay network. Generally speaking this will involve more than one host")"
echo -e "$(YELLOW "Since this is a script geared towards setting up a very minimal development environment it needs to make some")"
echo -e "$(YELLOW "assumptions. One of these assumptions is that the three specific entries are entered onto your hosts file.")"
echo -e "$(YELLOW "One or more of these are missing:")"
echo " "
if [[ "0" == "${ctrlexists}" ]]; then
echo -e " * $(RED "MISSING: ${ZITI_EDGE_CONTROLLER_HOSTNAME}") "
else
echo -e " * $(GREEN " FOUND: ${ZITI_EDGE_CONTROLLER_HOSTNAME}") "
fi
if [[ "0" == "${edgectrlexists}" ]]; then
echo -e " * $(RED "MISSING: ${ZITI_EDGE_CONTROLLER_HOSTNAME}") "
else
echo -e " * $(GREEN " FOUND: ${ZITI_EDGE_CONTROLLER_HOSTNAME}") "
fi
if [[ "0" == "${erexists}" ]]; then
echo -e " * $(RED "MISSING: ${ZITI_EDGE_ROUTER_HOSTNAME}") "
else
echo -e " * $(GREEN " FOUND: ${ZITI_EDGE_ROUTER_HOSTNAME}") "
fi
echo " "
echo "The easiest way to correct this is to run the following command:"
echo " echo \"127.0.0.1 ${ZITI_CONTROLLER_HOSTNAME} ${ZITI_EDGE_CONTROLLER_HOSTNAME} ${ZITI_EDGE_ROUTER_HOSTNAME}\" | sudo tee -a /etc/hosts"
echo " "
echo "add these entries to your hosts file, and rerun the script when ready"
return 1
fi
}
function getLatestZitiVersion {
setupZitiHome
if ! setOs; then
return 1
fi
ZITI_ARCH="amd64"
if [[ "$(uname -a)" == *"arm"* ]]; then
ZITI_ARCH="arm"
fi
unset ZITI_BINARIES_VERSION
if [[ "${ZITI_BINARIES_VERSION-}" == "" ]]; then
zitilatest=$(curl -s https://api.github.com/repos/openziti/ziti/releases/latest)
# shellcheck disable=SC2155
export ZITI_BINARIES_FILE=$(echo "${zitilatest}" | tr '\r\n' ' ' | jq -r '.assets[] | select(.name | startswith("'"ziti-${ZITI_OSTYPE}-${ZITI_ARCH}"'")) | .name')
# shellcheck disable=SC2155
export ZITI_BINARIES_VERSION=$(echo "${zitilatest}" | tr '\r\n' ' ' | jq -r '.tag_name')
fi
echo "ZITI_BINARIES_VERSION: ${ZITI_BINARIES_VERSION}"
}
function getLatestZiti {
setupZitiHome
if [[ "${ZITI_HOME-}" == "" ]]; then
echo "ERROR: ZITI_HOME is not set!"
return 1
fi
ziti_bin_root="${ZITI_BIN_ROOT-}"
if [[ "${ziti_bin_root}" == "" ]]; then
ziti_bin_root="${ZITI_HOME-}/ziti-bin"
fi
export ZITI_BIN_ROOT="${ziti_bin_root}/ziti-bin"
mkdir -p "${ziti_bin_root}"
if ! getLatestZitiVersion; then
return 1
fi
ziti_bin_ver="${ZITI_BINARIES_VERSION-}"
if [[ "${ziti_bin_ver}" == "" ]]; then
echo "ERROR: ZITI_BINARIES_VERSION is not set!"
return 1
fi
export ZITI_BIN_DIR="${ziti_bin_root}/ziti-${ziti_bin_ver}"
ZITI_BINARIES_FILE_ABSPATH="${ZITI_HOME-}/ziti-bin/${ZITI_BINARIES_FILE}"
if ! test -f "${ZITI_BINARIES_FILE_ABSPATH}"; then
zitidl="https://github.com/openziti/ziti/releases/download/${ZITI_BINARIES_VERSION-}/${ZITI_BINARIES_FILE}"
echo -e 'Downloading '"$(BLUE "${zitidl}")"' to '"$(BLUE "${ZITI_BINARIES_FILE_ABSPATH}")"
curl -Ls "${zitidl}" -o "${ZITI_BINARIES_FILE_ABSPATH}"
else
echo -e "$(YELLOW 'Already Downloaded ')""$(BLUE "${ZITI_BINARIES_FILE}")"' at: '"${ZITI_BINARIES_FILE_ABSPATH}"
fi
echo -e 'UNZIPPING '"$(BLUE "${ZITI_BINARIES_FILE_ABSPATH}")"' into: '"$(GREEN "${ZITI_BIN_DIR}")"
rm -rf "${ziti_bin_root}/ziti-${ZITI_BINARIES_VERSION-}"
if [[ "${ZITI_OSTYPE}" == "windows" ]]; then
unzip "${ZITI_BINARIES_FILE_ABSPATH}" -d "${ZITI_BIN_DIR}"
mv "${ZITI_BIN_DIR}/ziti/"* "${ZITI_BIN_DIR}/"
rm -rf "${ZITI_BIN_DIR}/ziti/"*
rmdir "${ZITI_BIN_DIR}/ziti/"
chmod +x "${ZITI_BIN_DIR}/"*
else
tar -xf "${ZITI_BINARIES_FILE_ABSPATH}" --directory "${ziti_bin_root}"
mv "${ziti_bin_root}/ziti" "${ZITI_BIN_DIR}"
fi
echo -e 'Marking executables at '"$(GREEN "${ZITI_BIN_DIR}")"' executable'
chmod +x "${ZITI_BIN_DIR}/"*
if [[ "${1-}" == "yes" ]]; then
echo "Adding ${ZITI_BIN_DIR} to the path if necessary:"
if [[ "$(echo "$PATH"|grep -q "${ZITI_BIN_DIR}" && echo "yes")" == "yes" ]]; then
echo -e "$(GREEN "${ZITI_BIN_DIR}") is already on the path"
else
echo -e "adding $(RED "${ZITI_BIN_DIR}") to the path"
export PATH=$PATH:"${ZITI_BIN_DIR}"
fi
fi
}
function checkPrereqs {
commands_to_test=(curl jq)
missing_requirements=""
# verify all the commands required in the automation exist before trying to run the full suite
for cmd in "${commands_to_test[@]}"
do
# checking all commands are on the path before continuing...
if ! [[ -x "$(command -v "${cmd}")" ]]; then
missing_requirements="${missing_requirements} * ${cmd}\n"
fi
done
# are requirements ? if yes, stop here and help 'em out
if ! [[ "" = "${missing_requirements}" ]]; then
echo " "
echo "You're missing one or more commands that are used in this script."
echo "Please ensure the commands listed are on the path and then try again."
printf "%s\n", "${missing_requirements}"
echo " "
echo " "
return 1
fi
echo "Let's get stated creating your local development network!"
echo ""
echo ""
}
function checkControllerName {
if [[ "${ZITI_EDGE_CONTROLLER_HOSTNAME}" == *['!'@#\$%^\&*\(\)_+]* ]]; then
echo -e "$(RED " - The provided Network name contains an invalid character: '!'@#\$%^\&*()_+")"
return 1
fi
return 0
}
function unsetZitiEnv {
for zEnvVar in $(set -o posix ; set | grep -e "^ZITI_" | sort); do envvar="$(echo "${zEnvVar}" | cut -d '=' -f1)"; echo unsetting "[${envvar}]${zEnvVar}"; unset "${envvar}"; done
}
function issueGreeting {
#echo "-------------------------------------------------------------"
echo " "
echo " _ _ _"
echo " ____ (_) | |_ (_)"
echo " |_ / | | | __| | |"
echo " / / | | | |_ | |"
echo " /___| |_| \__| |_|"
echo "-------------------------------------------------------------"
echo " "
echo "This script will make it trivial to setup a very simple environment locally which will allow you to start"
echo "learning ziti. This environment is suitable for development work only and is not a decent representation of"
echo "a fully redundant production-caliber network."
echo ""
echo "Please note that this script will write files to your home directory into a directory named .ziti."
echo -n "For you this location will be: "
echo -e "$(BLUE "${ZITI_QUICKSTART_ENVROOT}")"
echo " "
}
function setupZitiNetwork {
if [[ "${1-}" == "" ]]; then
echo " "
echo "Creating a controller is effectively creating a network. The name of the network will be used when writing"
echo "configuration files locally. Choose the name of your network now. The format of the network name should resemble"
echo -n "what a hostname looks like. A good choice is to actually use your system's hostname: "
echo -e "$(BLUE "$(hostname)")"
echo " "
read -rp "$(echo -ne "Network Name [$(BLUE "$(hostname)")]: ")" ZITI_NETWORK
echo " "
if checkControllerName; then
: #clear to continue
if [[ "${ZITI_NETWORK-}" == "" ]]; then
ZITI_NETWORK="$(hostname)"
fi
echo "name: ${ZITI_NETWORK-}"
else
echo " "
echo "nah bro"
return 1
fi
echo " "
else
ZITI_NETWORK="${1-}"
fi
}
function setupZitiHome {
if [[ "${ZITI_HOME-}" == "" ]]; then
export ZITI_HOME="${HOME}/.ziti/quickstart/${ZITI_NETWORK-}"
echo "using default ZITI_HOME: ${ZITI_HOME}"
fi
}
function generateEnvFile {
echo -e "Generating new network with name: $(BLUE "${ZITI_NETWORK-}")"
if [[ "${ZITI_CONTROLLER_RAWNAME-}" == "" ]]; then export export ZITI_CONTROLLER_RAWNAME="${ZITI_NETWORK-}-controller"; fi
if [[ "${ZITI_CONTROLLER_HOSTNAME-}" == "" ]]; then export export ZITI_CONTROLLER_HOSTNAME="${ZITI_NETWORK-}"; fi
if [[ "${ZITI_EDGE_CONTROLLER_RAWNAME-}" == "" ]]; then export export ZITI_EDGE_CONTROLLER_RAWNAME="${ZITI_NETWORK-}-edge-controller"; fi
if [[ "${ZITI_EDGE_CONTROLLER_HOSTNAME-}" == "" ]]; then export export ZITI_EDGE_CONTROLLER_HOSTNAME="${ZITI_NETWORK-}"; fi
if [[ "${ZITI_ZAC_RAWNAME-}" == "" ]]; then export export ZITI_ZAC_RAWNAME="${ZITI_NETWORK-}"; fi
if [[ "${ZITI_ZAC_HOSTNAME-}" == "" ]]; then export export ZITI_ZAC_HOSTNAME="${ZITI_NETWORK-}"; fi
if [[ "${ZITI_EDGE_ROUTER_RAWNAME-}" == "" ]]; then export export ZITI_EDGE_ROUTER_RAWNAME="${ZITI_NETWORK-}-edge-router"; fi
if [[ "${ZITI_EDGE_ROUTER_HOSTNAME-}" == "" ]]; then export export ZITI_EDGE_ROUTER_HOSTNAME="${ZITI_NETWORK-}"; fi
if [[ "${ZITI_EDGE_ROUTER_PORT-}" == "" ]]; then export ZITI_EDGE_ROUTER_PORT="3022"; fi
if [[ "${ZITI_BIN_ROOT-}" == "" ]]; then
export ZITI_BIN_ROOT="${ZITI_HOME-}/ziti-bin"
fi
if ! ziti_createEnvFile; then
return 1
fi
export ENV_FILE="${ZITI_HOME-}/${ZITI_NETWORK-}.env"
echo -e "environment file created and sourced from: $(BLUE "${ENV_FILE}")"
}
function ziti_expressConfiguration {
echo " "
echo "___________ _______________________________________^__"
echo " ___ ___ ||| ___ ___ ___ ___ ___ | __ ,----\ "
echo "| | | |||| | | | | | | | | | | | | |_____\ "
echo "|___| |___|||| |___| |___| |___| | O | O | | | | \ "
echo " ||| ===== EXPRESS ==== |___|___| | |__| )"
echo "___________|||______________________________|______________/"
echo " ||| /--------"
echo "-----------'''---------------------------------------'"
echo ""
if [[ "${1-}" == "" ]]; then
nw="$(hostname)"
else
nw="${1-}"
fi
setupZitiNetwork "${nw}"
setupZitiHome
if ! getLatestZiti "no"; then
echo -e "$(RED "getLatestZiti failed")"
return 1
fi
if ! generateEnvFile; then
echo "Exiting as env file was not generated"
return 1
fi
#checkHostsFile
createPki
createControllerConfig
createControllerSystemdFile
initializeController
startZitiController
echo "starting the ziti controller to enroll the edge router"
waitForController
zitiLogin
echo -e "---------- Creating an edge router policy allowing all identities to connect to routers with a $(GREEN "#public") attribute"
unused=$("${ZITI_BIN_DIR-}/ziti" edge delete edge-router-policy allEdgeRouters)
unused=$("${ZITI_BIN_DIR-}/ziti" edge create edge-router-policy allEdgeRouters --edge-router-roles '#public' --identity-roles '#all' )
echo -e "---------- Creating a service edge router policy allowing all services to use $(GREEN "#public") edge routers"
unused=$("${ZITI_BIN_DIR-}/ziti" edge delete service-edge-router-policy allSvcPublicRouters)
unused=$("${ZITI_BIN_DIR-}/ziti" edge create service-edge-router-policy allSvcPublicRouters --edge-router-roles '#public' --service-roles '#all')
createRouterPki
createEdgeRouterConfig "${ZITI_EDGE_ROUTER_RAWNAME}"
createRouterSystemdFile "${ZITI_EDGE_ROUTER_RAWNAME}"
echo "---------- Creating edge-router ${ZITI_EDGE_ROUTER_RAWNAME}...."
unused=$("${ZITI_BIN_DIR-}/ziti" edge delete edge-router "${ZITI_EDGE_ROUTER_RAWNAME}")
unused=$("${ZITI_BIN_DIR-}/ziti" edge create edge-router "${ZITI_EDGE_ROUTER_RAWNAME}" -o "${ZITI_HOME_OS_SPECIFIC}/${ZITI_EDGE_ROUTER_RAWNAME}.jwt" -t)
sleep 1
echo "---------- Enrolling edge-router ${ZITI_EDGE_ROUTER_RAWNAME}...."
unused=$("${ZITI_BIN_DIR-}/ziti-router" enroll "${ZITI_HOME_OS_SPECIFIC}/${ZITI_EDGE_ROUTER_RAWNAME}.yaml" --jwt "${ZITI_HOME_OS_SPECIFIC}/${ZITI_EDGE_ROUTER_RAWNAME}.jwt" &> "${ZITI_HOME_OS_SPECIFIC}/${ZITI_EDGE_ROUTER_RAWNAME}.enrollment.log")
echo ""
sleep 1
# shellcheck disable=SC2034
unused=$("${ZITI_BIN_DIR-}/ziti-router" run "${ZITI_HOME_OS_SPECIFIC}/${ZITI_EDGE_ROUTER_RAWNAME}.yaml" > "${ZITI_HOME_OS_SPECIFIC}/${ZITI_EDGE_ROUTER_RAWNAME}.log" 2>&1 &)
echo "Express setup complete!"
}
function decideToUseDefaultZitiHome {
yn=""
while true
do
echo "ZITI_HOME has not been set. Do you want to use the default ZITI_HOME: ${DEFAULT_ZITI_HOME_LOCATION}"
echo " "
read -rp "Select an action: " yn
case $yn in
[yY]* )
break;;
[Nn]* )
echo ""
return 1;;
* )
echo " "
echo "Answer $yn is not valid. Please answer yes or no. (y/n [yes/NO])";;
esac
yn=
done
echo " "
}
function decideOperation {
yn="${1-}"
while true
do
if [[ "${yn}" == "" ]]; then
echo "What sort of operation are you looking to perform?"
echo " 1.) Express configuration - a simple overlay will be spawned containing one controller and two edge routers"
echo " 2.) Create Controller configuration - answer a few questions and a controller config will be emitted"
echo " 3.) Create Edge Router configuration - answer a few questions and an edge router config will be emitted"
echo " 4.) Start a network with the provided name"
echo " "
read -rp "Select an action: " yn
fi
case $yn in
[1]* )
ziti_expressConfiguration "$2"
break;;
[2]* )
generateEnvFile
;;
[3]* )
echo "333 has been chosen"
echo " "
;;
[4]* )
echo "4444 has been chosen"
echo " "
;;
[yYqQeE]* )
break
;;
[Nn]* )
echo ""; echo "Ok - come back when you're ready."
exit;;
* ) echo "Please answer yes or no. (yes/NO)";;
esac
yn=
done
echo " "
}
function expressInstall {
#greet the user with the banner and quick blurb about what to expect
issueGreeting
#make sure the user has all the necessary commands to be successful
checkPrereqs
#prompt the user for input and do what they want/need
decideOperation 1 "${1-}"
}
function pki_client_server {
name_local=${1-}
ZITI_CA_NAME_local=$2
ip_local=$3
if [[ "${ip_local}" == "" ]]; then
ip_local="127.0.0.1"
fi
if ! test -f "${ZITI_PKI}/${ZITI_CA_NAME_local}/keys/${name_local}-server.key"; then
echo "Creating server cert from ca: ${ZITI_CA_NAME_local} for ${name_local}"
"${ZITI_BIN_DIR-}/ziti" pki create server --pki-root="${ZITI_PKI_OS_SPECIFIC}" --ca-name "${ZITI_CA_NAME_local}" \
--server-file "${name_local}-server" \
--dns "${name_local},localhost" --ip "${ip_local}" \
--server-name "${name_local} server certificate"
else
echo "Creating server cert from ca: ${ZITI_CA_NAME_local} for ${name_local}"
echo "key exists"
fi
if ! test -f "${ZITI_PKI}/${ZITI_CA_NAME_local}/keys/${name_local}-client.key"; then
echo "Creating client cert from ca: ${ZITI_CA_NAME_local} for ${name_local}"
"${ZITI_BIN_DIR-}/ziti" pki create client --pki-root="${ZITI_PKI_OS_SPECIFIC}" --ca-name "${ZITI_CA_NAME_local}" \
--client-file "${name_local}-client" \
--key-file "${name_local}-server" \
--client-name "${name_local}"
else
echo "Creating client cert from ca: ${ZITI_CA_NAME_local} for ${name_local}"
echo "key exists"
fi
echo " "
}
function pki_create_ca {
cert=$1
echo "Creating CA: ${cert}"
if ! test -f "${ZITI_PKI}/${cert}/keys/${cert}.key"; then
"${ZITI_BIN_DIR}/ziti" pki create ca --pki-root="${ZITI_PKI_OS_SPECIFIC}" --ca-file="${cert}" --ca-name="${cert} Root CA"
else
echo "key exists"
fi
echo " "
}
function pki_create_intermediate {
echo "Creating intermediate: ${1} ${2} ${3}"
if ! test -f "${ZITI_PKI}/${2}/keys/${2}.key"; then
"${ZITI_BIN_DIR}/ziti" pki create intermediate --pki-root "${ZITI_PKI_OS_SPECIFIC}" --ca-name "${1}" \
--intermediate-name "${2}" \
--intermediate-file "${2}" --max-path-len "${3}"
else
echo "key exists"
fi
echo " "
}
function printUsage()
{
echo "Usage: ${1-} [cert to test] [ca pool to use]"
}
function verifyCertAgainstPool()
{
if [[ "" == "${1-}" ]]
then
printUsage "verifyCertAgainstPool"
return 1
fi
if [[ "" == "$2" ]]
then
printUsage "verifyCertAgainstPool"
return 1
fi
echo " Verifying that this certificate:"
echo " - ${1-}"
echo " is valid for this ca pool:"
echo " - $2"
echo ""
openssl verify -partial_chain -CAfile "$2" "${1-}"
# shellcheck disable=SC2181
if [ $? -eq 0 ]; then
echo ""
echo "============ SUCCESS! ============"
else
echo ""
echo "============ FAILED TO VALIDATE ============"
fi
}
function showIssuerAndSubjectForPEM()
{
echo "Displaying Issuer and Subject for cert pool:"
echo " ${1-}"
openssl crl2pkcs7 -nocrl -certfile "${1-}" | openssl pkcs7 -print_certs -text -noout | grep -E "(Subject|Issuer)"
}
function createRouterPki {
pki_client_server "${ZITI_EDGE_ROUTER_RAWNAME}" "${ZITI_CONTROLLER_INTERMEDIATE_NAME}" "${ZITI_EDGE_ROUTER_IP_OVERRIDE-}"
}
function createPrivateRouterConfig {
router_name="${1-}"
if [[ "${router_name}" == "" ]]; then
echo -e " * ERROR: $(RED "createPrivateRouterConfig requires a parameter to be supplied") "
return 1
fi
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
cat > "${ZITI_HOME-}/${ZITI_EDGE_ROUTER_RAWNAME}.yaml" <<HereDocForEdgeRouter
v: 3
identity:
cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-client.cert"
server_cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-server.cert"
key: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${router_name}-server.key"
ca: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-cas.cert"
ctrl:
endpoint: tls:${ZITI_CONTROLLER_HOSTNAME}:${ZITI_FAB_CTRL_PORT}
link:
# listeners:
# - binding: transport
# bind: tls:0.0.0.0:10080
# advertise: tls:${ZITI_EDGE_ROUTER_HOSTNAME}:10080
# options:
# outQueueSize: 16
dialers:
- binding: transport
listeners:
# - binding: tunnel
# options:
# mode: host #tproxy|tun|host
# - binding: transport
# address: tls:0.0.0.0:${ZITI_EDGE_ROUTER_PORT}
# options:
# advertise: ${ZITI_EDGE_ROUTER_HOSTNAME}:${ZITI_EDGE_ROUTER_PORT}
# connectTimeoutMs: 5000
# getSessionTimeout: 60s
#edge:
csr:
country: US
province: NC
locality: Charlotte
organization: NetFoundry
organizationalUnit: Ziti
sans:
dns:
- ${ZITI_EDGE_ROUTER_HOSTNAME}
- localhost
ip:
- "127.0.0.1"
#transport:
# ws:
# writeTimeout: 10
# readTimeout: 5
# idleTimeout: 5
# pongTimeout: 60
# pingInterval: 54
# handshakeTimeout: 10
# readBufferSize: 4096
# writeBufferSize: 4096
# enableCompression: true
# server_cert: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_EDGE_WSS_ROUTER_HOSTNAME-}-router-server.cert
# key: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${ZITI_EDGE_WSS_ROUTER_HOSTNAME-}-router-server.key
forwarder:
latencyProbeInterval: 1000
xgressDialQueueLength: 1000
xgressDialWorkerCount: 128
linkDialQueueLength: 1000
linkDialWorkerCount: 10
HereDocForEdgeRouter
}
function createPki {
echo "Generating PKI"
pki_create_ca "${ZITI_CONTROLLER_ROOTCA_NAME}"
pki_create_ca "${ZITI_EDGE_CONTROLLER_ROOTCA_NAME}"
pki_create_ca "${ZITI_SIGNING_ROOTCA_NAME}"
ZITI_SPURIOUS_INTERMEDIATE="${ZITI_SIGNING_INTERMEDIATE_NAME}_spurious_intermediate"
pki_create_intermediate "${ZITI_CONTROLLER_ROOTCA_NAME}" "${ZITI_CONTROLLER_INTERMEDIATE_NAME}" 1
pki_create_intermediate "${ZITI_EDGE_CONTROLLER_ROOTCA_NAME}" "${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}" 1
pki_create_intermediate "${ZITI_SIGNING_ROOTCA_NAME}" "${ZITI_SPURIOUS_INTERMEDIATE}" 2
pki_create_intermediate "${ZITI_SPURIOUS_INTERMEDIATE}" "${ZITI_SIGNING_INTERMEDIATE_NAME}" 1
if ! test -f "${ZITI_PKI}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${ZITI_NETWORK-}-dotzeet.key"; then
echo "Creating ziti-fabric client certificate for network: ${ZITI_NETWORK-}"
"${ZITI_BIN_DIR-}/ziti" pki create client --pki-root="${ZITI_PKI_OS_SPECIFIC}" --ca-name="${ZITI_CONTROLLER_INTERMEDIATE_NAME}" \
--client-file="${ZITI_NETWORK-}-dotzeet" \
--client-name "${ZITI_NETWORK-} Management"
else
echo "Creating ziti-fabric client certificate for network: ${ZITI_NETWORK-}"
echo "key exists"
fi
echo " "
pki_client_server "${ZITI_CONTROLLER_HOSTNAME}" "${ZITI_CONTROLLER_INTERMEDIATE_NAME}" "${ZITI_CONTROLLER_IP_OVERRIDE-}"
pki_client_server "${ZITI_EDGE_CONTROLLER_HOSTNAME}" "${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}" "${ZITI_EDGE_CONTROLLER_IP_OVERRIDE-}"
}
function createFabricRouterConfig {
router_name="${1-}"
if [[ "${router_name}" == "" ]]; then
echo -e " * ERROR: $(RED "createFabricRouterConfig requires a parameter to be supplied") "
return 1
fi
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
cat > "${ZITI_HOME}/${ZITI_EDGE_ROUTER_RAWNAME}.yaml" <<HereDocForEdgeRouter
v: 3
identity:
cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-client.cert"
server_cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-server.cert"
key: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${router_name}-server.key"
ca: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-cas.cert"
ctrl:
endpoint: tls:${ZITI_CONTROLLER_HOSTNAME}:${ZITI_FAB_CTRL_PORT}
link:
listeners:
- binding: transport
bind: tls:0.0.0.0:10080
advertise: tls:${ZITI_EDGE_ROUTER_HOSTNAME}:10080
options:
outQueueSize: 16
dialers:
- binding: transport
listeners:
# - binding: tunnel
# options:
# mode: host #tproxy|tun|host
- binding: transport
address: tls:0.0.0.0:${ZITI_EDGE_ROUTER_PORT}
options:
advertise: ${ZITI_EDGE_ROUTER_HOSTNAME}:${ZITI_EDGE_ROUTER_PORT}
connectTimeoutMs: 5000
getSessionTimeout: 60s
#edge:
csr:
country: US
province: NC
locality: Charlotte
organization: NetFoundry
organizationalUnit: Ziti
sans:
dns:
- ${ZITI_EDGE_ROUTER_HOSTNAME}
- localhost
ip:
- "127.0.0.1"
#transport:
# ws:
# writeTimeout: 10
# readTimeout: 5
# idleTimeout: 5
# pongTimeout: 60
# pingInterval: 54
# handshakeTimeout: 10
# readBufferSize: 4096
# writeBufferSize: 4096
# enableCompression: true
# server_cert: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_EDGE_WSS_ROUTER_HOSTNAME-}-router-server.cert
# key: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${ZITI_EDGE_WSS_ROUTER_HOSTNAME-}-router-server.key
forwarder:
latencyProbeInterval: 1000
xgressDialQueueLength: 1000
xgressDialWorkerCount: 128
linkDialQueueLength: 1000
linkDialWorkerCount: 10
HereDocForEdgeRouter
}
function createEdgeRouterWssConfig {
router_name="${1-}"
if [[ "${router_name}" == "" ]]; then
echo -e " * ERROR: $(RED "createEdgeRouterWssConfig requires a parameter to be supplied") "
return 1
fi
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
cat > "${ZITI_HOME-}/${router_name}.yaml" <<HereDocForEdgeRouter
v: 3
identity:
cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-client.cert"
server_cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-server.cert"
key: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${router_name}-server.key"
ca: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-cas.cert"
ctrl:
endpoint: tls:${ZITI_CONTROLLER_HOSTNAME}:${ZITI_FAB_CTRL_PORT}
link:
listeners:
- binding: transport
bind: tls:0.0.0.0:10080
advertise: tls:${ZITI_EDGE_ROUTER_HOSTNAME}:10080
options:
outQueueSize: 16
dialers:
- binding: transport
listeners:
- binding: tunnel
options:
mode: host #tproxy|tun|host
- binding: edge
address: ws:0.0.0.0:3023
options:
advertise: ${ZITI_EDGE_ROUTER_HOSTNAME}:3023
connectTimeoutMs: 5000
getSessionTimeout: 60s
edge:
csr:
country: US
province: NC
locality: Charlotte
organization: NetFoundry
organizationalUnit: Ziti
sans:
dns:
- ${ZITI_EDGE_ROUTER_HOSTNAME}
- localhost
ip:
- "127.0.0.1"
transport:
ws:
writeTimeout: 10
readTimeout: 5
idleTimeout: 5
pongTimeout: 60
pingInterval: 54
handshakeTimeout: 10
readBufferSize: 4096
writeBufferSize: 4096
enableCompression: true
server_cert: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_EDGE_ROUTER_HOSTNAME}-router-server.cert
key: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${ZITI_EDGE_ROUTER_HOSTNAME}-router-server.key
forwarder:
latencyProbeInterval: 1000
xgressDialQueueLength: 1000
xgressDialWorkerCount: 128
linkDialQueueLength: 1000
linkDialWorkerCount: 10
HereDocForEdgeRouter
}
# shellcheck disable=SC2120
function createEdgeRouterConfig {
router_name="${1-}"
if [[ "${router_name}" == "" ]]; then
echo -e " * ERROR: $(RED "createEdgeRouterConfig requires a parameter to be supplied") "
return 1
fi
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
cat > "${ziti_home}/${router_name}.yaml" <<HereDocForEdgeRouter
v: 3
identity:
cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-client.cert"
server_cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-server.cert"
key: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${router_name}-server.key"
ca: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${router_name}-cas.cert"
ctrl:
endpoint: tls:${ZITI_CONTROLLER_HOSTNAME}:${ZITI_FAB_CTRL_PORT}
link:
listeners:
- binding: transport
bind: tls:0.0.0.0:10080
advertise: tls:${ZITI_EDGE_ROUTER_HOSTNAME}:10080
options:
outQueueSize: 16
dialers:
- binding: transport
listeners:
- binding: tunnel
options:
mode: host #tproxy|tun|host
- binding: edge
address: tls:0.0.0.0:${ZITI_EDGE_ROUTER_PORT}
options:
advertise: ${ZITI_EDGE_ROUTER_HOSTNAME}:${ZITI_EDGE_ROUTER_PORT}
connectTimeoutMs: 5000
getSessionTimeout: 60s
edge:
csr:
country: US
province: NC
locality: Charlotte
organization: NetFoundry
organizationalUnit: Ziti
sans:
dns:
- ${ZITI_EDGE_ROUTER_HOSTNAME}
- localhost
ip:
- "127.0.0.1"
#transport:
# ws:
# writeTimeout: 10
# readTimeout: 5
# idleTimeout: 5
# pongTimeout: 60
# pingInterval: 54
# handshakeTimeout: 10
# readBufferSize: 4096
# writeBufferSize: 4096
# enableCompression: true
# server_cert: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_EDGE_WSS_ROUTER_HOSTNAME-}-router-server.cert
# key: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${ZITI_EDGE_WSS_ROUTER_HOSTNAME-}-router-server.key
forwarder:
latencyProbeInterval: 1000
xgressDialQueueLength: 1000
xgressDialWorkerCount: 128
linkDialQueueLength: 1000
linkDialWorkerCount: 10
HereDocForEdgeRouter
}
function createFabricIdentity {
cat > "${ZITI_HOME}/identities.yml" <<IdentitiesJsonHereDoc
---
default:
caCert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_CONTROLLER_HOSTNAME}-server.chain.pem"
cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_NETWORK-}-dotzeet.cert"
key: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${ZITI_NETWORK-}-dotzeet.key"
endpoint: tls:${ZITI_CONTROLLER_HOSTNAME}:${ZITI_FAB_MGMT_PORT}
IdentitiesJsonHereDoc
}
function createControllerConfig {
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
cat > "${ziti_home}/controller.yaml" <<HereDocForEdgeConfiguration
v: 3
#trace:
# path: "${ZITI_CONTROLLER_RAWNAME}.trace"
#profile:
# memory:
# path: ctrl.memprof
db: "${ZITI_HOME_OS_SPECIFIC}/db/ctrl.db"
identity:
cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_CONTROLLER_HOSTNAME}-client.cert"
server_cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_CONTROLLER_HOSTNAME}-server.chain.pem"
key: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/keys/${ZITI_CONTROLLER_HOSTNAME}-server.key"
ca: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_CONTROLLER_INTERMEDIATE_NAME}.cert"
ctrl:
listener: tls:0.0.0.0:${ZITI_FAB_CTRL_PORT}
mgmt:
listener: tls:${ZITI_CONTROLLER_HOSTNAME}:${ZITI_FAB_MGMT_PORT}
#metrics:
# influxdb:
# url: http://localhost:8086
# database: ziti
# xctrl_example
#
#example:
# enabled: false
# delay: 5s
# By having an 'edge' section defined, the ziti-controller will attempt to parse the edge configuration. Removing this
# section, commenting out, or altering the name of the section will cause the edge to not run.
edge:
# This section represents the configuration of the Edge API that is served over HTTPS
api:
#(optional, default 90s) Alters how frequently heartbeat and last activity values are persisted
# activityUpdateInterval: 90s
#(optional, default 250) The number of API Sessions updated for last activity per transaction
# activityUpdateBatchSize: 250
# sessionTimeout - optional, default 10m
# The number of minutes before an Edge API session will timeout. Timeouts are reset by
# API requests and connections that are maintained to Edge Routers
sessionTimeout: 30m
# address - required
# The default address (host:port) to use for enrollment for the Client API. This value must match one of the addresses
# defined in this webListener's bindPoints.
address: ${ZITI_EDGE_CONTROLLER_API}
# This section is used to define option that are used during enrollment of Edge Routers, Ziti Edge Identities.
enrollment:
# signingCert - required
# A Ziti Identity configuration section that specifically makes use of the cert and key fields to define
# a signing certificate from the PKI that the Ziti environment is using to sign certificates. The signingCert.cert
# will be added to the /.well-known CA store that is used to bootstrap trust with the Ziti Controller.
signingCert:
cert: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_SIGNING_INTERMEDIATE_NAME}/certs/${ZITI_SIGNING_INTERMEDIATE_NAME}.cert
key: ${ZITI_PKI_OS_SPECIFIC}/${ZITI_SIGNING_INTERMEDIATE_NAME}/keys/${ZITI_SIGNING_INTERMEDIATE_NAME}.key
# edgeIdentity - optional
# A section for identity enrollment specific settings
edgeIdentity:
# durationMinutes - optional, default 5m
# The length of time that a Ziti Edge Identity enrollment should remain valid. After
# this duration, the enrollment will expire and not longer be usable.
duration: 14400m
# edgeRouter - Optional
# A section for edge router enrollment specific settings.
edgeRouter:
# durationMinutes - optional, default 5m
# The length of time that a Ziti Edge Router enrollment should remain valid. After
# this duration, the enrollment will expire and not longer be usable.
duration: 14400m
# web
# Defines webListeners that will be hosted by the controller. Each webListener can host many APIs and be bound to many
# bind points.
web:
# name - required
# Provides a name for this listener, used for logging output. Not required to be unique, but is highly suggested.
- name: client-management
# bindPoints - required
# One or more bind points are required. A bind point specifies an interface (interface:port string) that defines
# where on the host machine the webListener will listen and the address (host:port) that should be used to
# publicly address the webListener(i.e. mydomain.com, localhost, 127.0.0.1). This public address may be used for
# incoming address resolution as well as used in responses in the API.
bindPoints:
#interface - required
# A host:port string on which network interface to listen on. 0.0.0.0 will listen on all interfaces
- interface: 0.0.0.0:${ZITI_EDGE_CONTROLLER_PORT}
# address - required
# The public address that external incoming requests will be able to resolve. Used in request processing and
# response content that requires full host:port/path addresses.
address: ${ZITI_EDGE_CONTROLLER_API}
# identity - optional
# Allows the webListener to have a specific identity instead of defaulting to the root 'identity' section.
identity:
ca: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}.cert"
key: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}/keys/${ZITI_EDGE_CONTROLLER_HOSTNAME}-server.key"
server_cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_EDGE_CONTROLLER_HOSTNAME}-server.chain.pem"
cert: "${ZITI_PKI_OS_SPECIFIC}/${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}/certs/${ZITI_EDGE_CONTROLLER_HOSTNAME}-client.cert"
# options - optional
# Allows the specification of webListener level options - mainly dealing with HTTP/TLS settings. These options are
# used for all http servers started by the current webListener.
options:
# idleTimeoutMs - optional, default 5000ms
# The maximum amount of idle time in milliseconds allowed for pipelined HTTP requests. Setting this too high
# can cause resources on the host to be consumed as clients remain connected and idle. Lowering this value
# will cause clients to reconnect on subsequent HTTPs requests.
idleTimeout: 5000ms #http timeouts, new
# readTimeoutMs - optional, default 5000ms
# The maximum amount of time in milliseconds http servers will wait to read the first incoming requests. A higher
# value risks consuming resources on the host with clients that are acting bad faith or suffering from high latency
# or packet loss. A lower value can risk losing connections to high latency/packet loss clients.
readTimeout: 5000ms
# writeTimeoutMs - optional, default 10000ms
# The total maximum time in milliseconds that the http server will wait for a single requests to be received and
# responded too. A higher value can allow long running requests to consume resources on the host. A lower value
# can risk ending requests before the server has a chance to respond.
writeTimeout: 100000ms
# minTLSVersion - optional, default TSL1.2
# The minimum version of TSL to support
minTLSVersion: TLS1.2
# maxTLSVersion - optional, default TSL1.3
# The maximum version of TSL to support
maxTLSVersion: TLS1.3
# apis - required
# Allows one or more APIs to be bound to this webListener
apis:
# binding - required
# Specifies an API to bind to this webListener. Built-in APIs are
# - edge-management
# - edge-client
# - fabric-management
- binding: edge-management
# options - variable optional/required
# This section is used to define values that are specified by the API they are associated with.
# These settings are per API. The example below is for the 'edge-api' and contains both optional values and
# required values.
options: { }
- binding: edge-client
options: { }
HereDocForEdgeConfiguration
echo "controller configuration file written to: ${ziti_home}/controller.yaml"
}
# shellcheck disable=SC2120
function ziti_createEnvFile {
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e "$(RED "ERROR: ZITI_HOME HAS NOT BEEN DECLARED!")"
if decideToUseDefaultZitiHome; then
# shellcheck disable=SC2155
export ZITI_NETWORK="$(hostname)"
ziti_home="${DEFAULT_ZITI_HOME_LOCATION}"
else
return 1
fi
fi
export ZITI_HOME="${ziti_home}"
if [[ "${ZITI_OSTYPE}" == "windows" ]]; then
export ZITI_HOME_OS_SPECIFIC="$(cygpath -m ${ZITI_HOME})"
else
export ZITI_HOME_OS_SPECIFIC="${ZITI_HOME}"
fi
export ENV_FILE="${ZITI_HOME}/${ZITI_NETWORK}.env"
export ZITI_SHARED="${ZITI_HOME}"
if [[ "${ENV_FILE}" == "" ]]; then
echo -e "$(RED "ERROR: ENV_FILE HAS NOT BEEN DECLARED!")"
echo " "
return 1
fi
if [[ "${network_name-}" != "" ]]; then
export ZITI_NETWORK="${network_name}"
fi
if [[ "${ZITI_NETWORK-}" == "" ]]; then
if [[ "${1-}" != "" ]]; then
export ZITI_NETWORK="${1-}"
fi
if [[ "${ZITI_NETWORK-}" = "" ]]; then
echo -e "$(YELLOW "WARN: ZITI_NETWORK HAS NOT BEEN DECLARED! USING hostname: $(hostname)")"
# shellcheck disable=SC2155
export ZITI_NETWORK="$(hostname)"
fi
fi
echo "ZITI_NETWORK set to: ${ZITI_NETWORK}"
if [[ "${ZITI_USER-}" == "" ]]; then export ZITI_USER="admin"; fi
if [[ "${ZITI_PWD-}" == "" ]]; then export ZITI_PWD="admin"; fi
if [[ "${ZITI_DOMAIN_SUFFIX-}" == "" ]]; then export ZITI_DOMAIN_SUFFIX=""; fi
if [[ "${ZITI_ID-}" == "" ]]; then export ZITI_ID="${ZITI_HOME}/identities.yml"; fi
if [[ "${ZITI_FAB_MGMT_PORT-}" == "" ]]; then export ZITI_FAB_MGMT_PORT="10000"; fi
if [[ "${ZITI_FAB_CTRL_PORT-}" == "" ]]; then export ZITI_FAB_CTRL_PORT="6262"; fi
if [[ "${ZITI_CONTROLLER_RAWNAME-}" == "" ]]; then export ZITI_CONTROLLER_RAWNAME="${ZITI_NETWORK}-controller"; fi
if [[ "${ZITI_EDGE_CONTROLLER_RAWNAME-}" == "" ]]; then export ZITI_EDGE_CONTROLLER_RAWNAME="${ZITI_NETWORK}-edge-controller"; fi
export ZITI_PKI="${ZITI_SHARED}/pki"
if [[ "${ZITI_OSTYPE}" == "windows" ]]; then
export ZITI_PKI_OS_SPECIFIC="$(cygpath -m ${ZITI_PKI})"
else
export ZITI_PKI_OS_SPECIFIC="${ZITI_PKI}"
fi
if [[ "${ZITI_EDGE_CONTROLLER_PORT-}" == "" ]]; then export ZITI_EDGE_CONTROLLER_PORT="1280"; fi
if [[ "${ZITI_CONTROLLER_HOSTNAME-}" == "" ]]; then export ZITI_CONTROLLER_HOSTNAME="${ZITI_CONTROLLER_RAWNAME}${ZITI_DOMAIN_SUFFIX}"; fi
if [[ "${ZITI_EDGE_CONTROLLER_HOSTNAME-}" == "" ]]; then export ZITI_EDGE_CONTROLLER_HOSTNAME="${ZITI_EDGE_CONTROLLER_RAWNAME}${ZITI_DOMAIN_SUFFIX}"; fi
if [[ "${ZITI_ZAC_HOSTNAME-}" == "" ]]; then export ZITI_ZAC_HOSTNAME="${ZITI_ZAC_RAWNAME}${ZITI_DOMAIN_SUFFIX}"; fi
if [[ "${ZITI_EDGE_CONTROLLER_API-}" == "" ]]; then export ZITI_EDGE_CONTROLLER_API="${ZITI_EDGE_CONTROLLER_HOSTNAME}:${ZITI_EDGE_CONTROLLER_PORT}"; fi
export ZITI_SIGNING_CERT_NAME="${ZITI_NETWORK}-signing"
export ZITI_CONTROLLER_ROOTCA_NAME="${ZITI_CONTROLLER_HOSTNAME}-root-ca"
export ZITI_CONTROLLER_INTERMEDIATE_NAME="${ZITI_CONTROLLER_HOSTNAME}-intermediate"
export ZITI_EDGE_CONTROLLER_ROOTCA_NAME="${ZITI_EDGE_CONTROLLER_HOSTNAME}-root-ca"
export ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME="${ZITI_EDGE_CONTROLLER_HOSTNAME}-intermediate"
export ZITI_SIGNING_ROOTCA_NAME="${ZITI_SIGNING_CERT_NAME}-root-ca"
export ZITI_SIGNING_INTERMEDIATE_NAME="${ZITI_SIGNING_CERT_NAME}-intermediate"
export ZITI_BIN_ROOT="${ZITI_HOME}/ziti-bin"
mkdir -p "${ZITI_BIN_ROOT}"
mkdir -p "${ZITI_HOME}/db"
mkdir -p "${ZITI_PKI}"
echo "" > "${ENV_FILE}"
for zEnvVar in $(set -o posix ; set | grep ZITI_ | sort); do envvar="$(echo "${zEnvVar}" | cut -d '=' -f1)"; envval="$(echo "${zEnvVar}" | cut -d '=' -f2-100)"; echo "export ${envvar}=\"${envval}\"" >> "${ENV_FILE}"; done
export PFXLOG_NO_JSON=true
# shellcheck disable=SC2129
echo "export PFXLOG_NO_JSON=true" >> "${ENV_FILE}"
echo "alias zec='ziti edge'" >> "${ENV_FILE}"
echo "alias zlogin='ziti edge login \"\${ZITI_EDGE_CONTROLLER_API}\" -u \"\${ZITI_USER-}\" -p \"\${ZITI_PWD}\" -c \"\${ZITI_PKI}/\${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}/certs/\${ZITI_EDGE_CONTROLLER_INTERMEDIATE_NAME}.cert\"'" >> "${ENV_FILE}"
echo "alias psz='ps -ef | grep ziti'" >> "${ENV_FILE}"
#when sourcing the emitted file add the bin folder to the path
tee -a "${ENV_FILE}" > /dev/null <<'heredoc'
echo " "
if [[ ! "$(echo "$PATH"|grep -q "${ZITI_BIN_DIR}" && echo "yes")" == "yes" ]]; then
echo "adding ${ZITI_BIN_DIR} to the path"
export PATH=$PATH:"${ZITI_BIN_DIR}"
else
echo " ziti binaries are located at: ${ZITI_BIN_DIR}"
echo -e 'add this to your path if you want by executing: export PATH=$PATH:'"${ZITI_BIN_DIR}"
echo " "
fi
heredoc
}
function waitForController {
#devnull="/dev/null"
#if [[ "${ZITI_OSTYPE}" == "windows" ]]; then
# devnull="nul"
#fi
# shellcheck disable=SC2091
#until $(curl -o /dev/null -sk /dev/null --fail "https://${ZITI_EDGE_CONTROLLER_API}"); do
# echo "waiting for https://${ZITI_EDGE_CONTROLLER_API}"
# sleep 2
#done
while [[ "$(curl -w "%{http_code}" -m 1 -s -k -o /dev/null https://${ZITI_EDGE_CONTROLLER_API}/version)" != "200" ]]; do
echo "waiting for https://${ZITI_EDGE_CONTROLLER_API}"
sleep 3
done
}
function createControllerSystemdFile {
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
ziti_bin_dir="${ZITI_BIN_DIR-}"
if [[ "${ziti_bin_dir}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_BIN_DIR is not set") "
return 1
fi
ziti_ctrl_name="${ZITI_CONTROLLER_RAWNAME-}"
if [[ "${ziti_ctrl_name}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_CONTROLLER_RAWNAME is not set") "
return 1
fi
systemd_file="${ziti_home}/ziti-controller.service"
cat > "${systemd_file}" <<HeredocForSystemd
[Unit]
Description=Ziti-Controller
After=network.target
[Service]
User=root
WorkingDirectory=${ziti_home}
ExecStart="${ziti_bin_dir}/ziti-controller" run "${ziti_home}/controller.yaml"
Restart=always
RestartSec=2
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
HeredocForSystemd
echo "Controller systemd file written to: ${systemd_file}"
}
function createRouterSystemdFile {
router_name="${1-}"
if [[ "${router_name}" == "" ]]; then
echo -e " * ERROR: $(RED "createRouterSystemdFile requires a parameter to be supplied") "
return 1
fi
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
ziti_bin_dir="${ZITI_BIN_DIR-}"
if [[ "${ziti_bin_dir}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_BIN_DIR is not set") "
return 1
fi
systemd_file="${ziti_home}/ziti-router-${router_name}.service"
cat > "${systemd_file}" <<HeredocForSystemd
[Unit]
Description=Ziti-Router for ${router_name}
After=network.target
[Service]
User=root
WorkingDirectory=${ziti_home}
ExecStart="${ziti_bin_dir}/ziti-router" run "${ziti_home}/${router_name}.yaml"
Restart=always
RestartSec=2
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
HeredocForSystemd
echo "Router systemd file written to: ${systemd_file}"
}
function createZacSystemdFile {
ziti_home="${ZITI_HOME-}"
if [[ "${ziti_home}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_HOME is not set") "
return 1
fi
ziti_bin_dir="${ZITI_BIN_DIR-}"
if [[ "${ziti_bin_dir}" == "" ]]; then
echo -e " * ERROR: $(RED "ZITI_BIN_DIR is not set") "
return 1
fi
systemd_file="${ziti_home}/ziti-console.service"
cat > "${systemd_file}" <<HeredocForSystemd
[Unit]
Description=Ziti-Console
After=network.target
[Service]
User=root
WorkingDirectory=${ziti_home}/ziti-console
ExecStart=node "${ziti_home}/ziti-console/server.js"
Restart=always
RestartSec=2
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
HeredocForSystemd
echo "ziti-console systemd file written to: ${systemd_file}"
}
function setOs {
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
export ZITI_OSTYPE="linux"
elif [[ "$OSTYPE" == "darwin"* ]]; then
export ZITI_OSTYPE="darwin"
elif [[ "$OSTYPE" == "cygwin" ]]; then
export ZITI_OSTYPE="windows"
#echo -e " * ERROR: $(RED "\$OSTYPE [$OSTYPE] is not supported at this time") "
#return 1
elif [[ "$OSTYPE" == "msys" ]]; then
export ZITI_OSTYPE="windows"
#echo -e " * ERROR: $(RED "\$OSTYPE [$OSTYPE] is not supported at this time") "
#return 1
elif [[ "$OSTYPE" == "win32" ]]; then
export ZITI_OSTYPE="windows"
elif [[ "$OSTYPE" == "freebsd"* ]]; then
echo -e " * ERROR: $(RED "\$OSTYPE [$OSTYPE] is not supported at this time") "
return 1
else
echo -e " * ERROR: $(RED "\$OSTYPE is not set or is unknown: [$OSTYPE]. Cannot continue") "
return 1
fi
return 0
}
param1="$@"
if [[ "${param1}" != "" ]]; then
eval "$@"
fi
set +uo pipefail
|
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class Application {
public static void main(String[] args) {
SpringApplication.run(Application.class, args);
}
}
// Create a model class for the table
public class User {
private int id;
private String name;
private int age;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
}
//Create a table in the database
CREATE TABLE IF NOT EXISTS users (
id INT NOT NULL AUTO_INCREMENT,
name VARCHAR(255) NOT NULL,
age INT NOT NULL
PRIMARY KEY (id)
);
// Create Model class
public class TableController {
@Autowired
private UserRepository userRepository;
@GetMapping("/users")
public List<User> getUsers() {
List<User> users = (List<User>) userRepository.findAll();
return users;
}
}
// Create view
<html>
<head>
<title>Users Table</title>
</head>
<body>
<h1>Users Table</h1>
<table>
<tr>
<th>ID</th>
<th>Name</th>
<th>Age</th>
</tr>
<% for (User user : users) { %>
<tr>
<td><%= user.getId() %></td>
<td><%= user.getName() %></td>
<td><%= user.getAge() %></td>
</tr>
<% } %>
</table>
</body>
</html>
|
<gh_stars>10-100
//#####################################################################
// Copyright 2005, <NAME>, <NAME>, <NAME>.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
#include <PhysBAM_Tools/Grids_Uniform/GRID.h>
#include <PhysBAM_Tools/Grids_Uniform/UNIFORM_GRID_ITERATOR_CELL.h>
#include <PhysBAM_Tools/Math_Tools/exchange.h>
#include <PhysBAM_Tools/Random_Numbers/NOISE.h>
#include <PhysBAM_Geometry/Read_Write/Grids_Uniform_Level_Sets/READ_WRITE_LEVELSET_1D.h>
#include <PhysBAM_Geometry/Read_Write/Grids_Uniform_Level_Sets/READ_WRITE_LEVELSET_2D.h>
#include <PhysBAM_Geometry/Read_Write/Grids_Uniform_Level_Sets/READ_WRITE_LEVELSET_3D.h>
#include <PhysBAM_Fluids/PhysBAM_Incompressible/Incompressible_Flows/INCOMPRESSIBLE_UNIFORM.h>
#include <PhysBAM_Dynamics/Boundaries/BOUNDARY_PHI_WATER.h>
#include <PhysBAM_Dynamics/Solids_And_Fluids/FLUIDS_PARAMETERS_UNIFORM.h>
#include <PhysBAM_Dynamics/Solids_And_Fluids/SOLIDS_FLUIDS_EXAMPLE_UNIFORM.h>
#include <PhysBAM_Dynamics/Standard_Tests/WATER_STANDARD_TESTS_2D.h>
#include <PhysBAM_Dynamics/Standard_Tests/WATER_STANDARD_TESTS_3D.h>
#include <PhysBAM_Dynamics/Standard_Tests/WATER_STANDARD_TESTS_MULTIPHASE.h>
using namespace PhysBAM;
template<class T_GRID,class T_WATER_STANDARD_TESTS> WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
WATER_STANDARD_TESTS_MULTIPHASE(SOLIDS_FLUIDS_EXAMPLE_UNIFORM<T_GRID>& example,FLUIDS_PARAMETERS_UNIFORM<T_GRID>& fluids_parameters_input,INCOMPRESSIBLE_FLUID_CONTAINER<T_GRID>& incompressible_fluid_container_input,
RIGID_BODY_COLLECTION<TV>& rigid_body_collection_input)
:T_WATER_STANDARD_TESTS(example,fluids_parameters_input,rigid_body_collection_input),fluids_parameters_uniform(fluids_parameters_input),
incompressible_fluid_container(incompressible_fluid_container_input)
{
}
template<class T_GRID,class T_WATER_STANDARD_TESTS> WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
~WATER_STANDARD_TESTS_MULTIPHASE()
{
}
template<class T_GRID,class T_WATER_STANDARD_TESTS> void WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
Initialize(const int test_number_input,const int resolution,const int restart_frame)
{
BASE::Initialize(Non_Multiphase_Test_Number(test_number_input),resolution);
test_number=test_number_input;
fluids_parameters.use_removed_positive_particles=false;fluids_parameters.use_removed_negative_particles=false;
fluids_parameters.write_removed_positive_particles=false;fluids_parameters.write_removed_negative_particles=false;
test_number=test_number_input;
fluids_parameters.incompressible_iterations=100;
use_open_wall=false;air_region=-1;
FLUIDS_PARAMETERS_UNIFORM<T_GRID>& fluids_parameters_input=static_cast<FLUIDS_PARAMETERS_UNIFORM<T_GRID>&>(fluids_parameters);
if(test_number<10){
fluids_parameters_input.densities(1)=1000;fluids_parameters_input.densities(2)=1;
source_region.Resize(sources.m);ARRAYS_COMPUTATIONS::Fill(source_region,1);
fluids_parameters_input.pseudo_dirichlet_regions(2)=true;
fluids_parameters_input.second_order_cut_cell_method=false;}
else if(test_number==11){
fluids_parameters_input.densities(1)=1000;fluids_parameters_input.densities(2)=500;fluids_parameters_input.densities(3)=2000;
fluids_parameters_input.surface_tensions(1,2)=fluids_parameters_input.surface_tensions(2,1)=(T).1;
fluids_parameters_input.surface_tensions(1,3)=fluids_parameters_input.surface_tensions(3,1)=(T).1;
fluids_parameters_input.surface_tensions(2,3)=fluids_parameters_input.surface_tensions(3,2)=(T).1;}
else if(test_number==12){
fluids_parameters_input.dirichlet_regions(1)=true;use_open_wall=true;air_region=1;
fluids_parameters_input.densities(1)=1;fluids_parameters_input.densities(2)=800;fluids_parameters_input.densities(3)=1000;fluids_parameters_input.densities(4)=3000;}
else if(test_number==13){
fluids_parameters_input.densities(1)=1400;fluids_parameters_input.densities(2)=500;fluids_parameters_input.densities(3)=1000;fluids_parameters_input.densities(4)=1;}
else if(test_number==14){
fluids_parameters_input.densities(1)=(T)1.226;fluids_parameters_input.densities(2)=1000;
fluids_parameters_input.surface_tensions(1,2)=fluids_parameters_input.surface_tensions(2,1)=(T).0728;
fluids_parameters_input.viscosities(1)=(T).0000178;
fluids_parameters_input.viscosities(2)=(T).001137;
fluids_parameters_input.implicit_viscosity=false;
fluids_parameters_input.incompressible_iterations=200;
fluids_parameters_input.implicit_viscosity_iterations=200;}
else if(test_number==15){
fluids_parameters_input.densities(1)=1000;
fluids_parameters_input.viscosities(1)=(T)500;
//fluids_parameters_input.viscosities(1)=(T)50;
//fluids_parameters_input.use_multiphase_strain(1)=true;
//fluids_parameters_input.elastic_moduli(1)=20000;
//fluids_parameters_input.plasticity_alphas(1)=0;
fluids_parameters_input.densities(2)=1000;
fluids_parameters_input.viscosities(2)=(T)60;
fluids_parameters_input.densities(3)=1000;
fluids_parameters_input.viscosities(3)=10;
// SOURCE SET IN DERIVED EXAMPLE
fluids_parameters_input.densities(4)=1000;
fluids_parameters_input.viscosities(4)=0;
// SOURCE SET IN DERIVED EXAMPLE
fluids_parameters_input.densities(5)=1;
fluids_parameters_input.dirichlet_regions(5)=true;//use_open_wall=true;air_region=6;
//fluids_parameters_input.cfl/=8;
fluids_parameters_input.implicit_viscosity_iterations=50;
fluids_parameters_input.implicit_viscosity=true;
//fluids_parameters_input.cfl=4;
}
else if(test_number==16){
if(restart_frame>=500){
fluids_parameters_input.densities(1)=500;
fluids_parameters_input.use_multiphase_strain(1)=true;
fluids_parameters_input.elastic_moduli(1)=15000;
fluids_parameters_input.plasticity_alphas(1)=0;
fluids_parameters_input.implicit_viscosity_iterations=50;
fluids_parameters_input.implicit_viscosity=true;
fluids_parameters_input.viscosities(1)=(T)200;
fluids_parameters_input.densities(4)=1000;
fluids_parameters_input.use_multiphase_strain(4)=true;
fluids_parameters_input.elastic_moduli(4)=15000;
fluids_parameters_input.plasticity_alphas(4)=0;
fluids_parameters_input.viscosities(4)=(T)200;}
else if(restart_frame>=296) fluids_parameters_input.densities(1)=500;
else if(restart_frame>=68){
fluids_parameters_input.densities(1)=1500;
fluids_parameters_input.implicit_viscosity_iterations=50;
fluids_parameters_input.implicit_viscosity=false;
fluids_parameters_input.viscosities(1)=0;
}
else{
fluids_parameters_input.densities(1)=500;
fluids_parameters_input.use_multiphase_strain(1)=true;
fluids_parameters_input.elastic_moduli(1)=15000;
fluids_parameters_input.plasticity_alphas(1)=0;
fluids_parameters_input.implicit_viscosity_iterations=50;
fluids_parameters_input.implicit_viscosity=true;
fluids_parameters_input.viscosities(1)=(T)200;
}
fluids_parameters_input.densities(2)=1000;
fluids_parameters_input.densities(3)=1;
fluids_parameters_input.dirichlet_regions(3)=true;
fluids_parameters_input.reseeding_frame_rate=10;
fluids_parameters_input.cfl/=2;
}
else if(test_number==17){
fluids_parameters_input.densities(1)=1000;
fluids_parameters_input.densities(2)=(T)1.226;
//fluids_parameters_input.surface_tensions(1,2)=fluids_parameters_input.surface_tensions(2,1)=(T)2;
//fluids_parameters_input.surface_tensions(1,2)=fluids_parameters_input.surface_tensions(2,1)=(T)2;
fluids_parameters_input.cfl/=2;
fluids_parameters_input.reseeding_frame_rate=1;
}
else{
std::stringstream ss;ss<<"Unrecognized example: "<<test_number<<std::endl;LOG::filecout(ss.str());
PHYSBAM_FATAL_ERROR();}
if(test_number==15){fluids_parameters_input.solid_affects_fluid=true;fluids_parameters_input.fluid_affects_solid=false;}
}
//#####################################################################
// Function Initialize_Advection
//#####################################################################
template<class T_GRID,class T_WATER_STANDARD_TESTS> void WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
Initialize_Advection(const bool always_use_objects)
{
if(always_use_objects||test_number==4||test_number==5||test_number==6||test_number==15) fluids_parameters.Use_Fluid_Coupling_Defaults();
else fluids_parameters.Use_No_Fluid_Coupling_Defaults();
if(use_open_wall)
for(int i=1;i<=Number_Of_Regions(test_number);i++){
BOUNDARY_PHI_WATER<T_GRID>* boundary=new BOUNDARY_PHI_WATER<T_GRID>();
boundary->Set_Velocity_Pointer(incompressible_fluid_container.face_velocities);
if(i==air_region)boundary->sign=-1;
fluids_parameters.phi_boundary_multiphase(i)=boundary;}
}
//#####################################################################
// Function Initialize_Bodies
//#####################################################################
template<class T_GRID,class T_WATER_STANDARD_TESTS> void WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
Initialize_Bodies()
{
T_WATER_STANDARD_TESTS::Initialize_Bodies();
if(test_number==16){
GRID<VECTOR<T,3> > armadillo_temp_grid;
ARRAY<T,VECTOR<int,3> > armadillo_temp_phi;
LEVELSET_3D<GRID<VECTOR<T,3> > > armadillo_temp(armadillo_temp_grid,armadillo_temp_phi);
FILE_UTILITIES::Read_From_File<float>(example.data_directory+"/Rigid_Bodies/armadillo_high_res.phi",armadillo_temp);
armadillo=new T_LEVELSET(*(new T_GRID(*fluids_parameters.grid)),*(new T_ARRAYS_SCALAR(fluids_parameters.grid->Domain_Indices(1))));
for(CELL_ITERATOR iterator(*fluids_parameters.grid,1);iterator.Valid();iterator.Next()){TV_INT cell=iterator.Cell_Index();
VECTOR<T,3> vec=VECTOR<T,3>(iterator.Location());exchange(vec.x,vec.z);
if(T_GRID::dimension==3){armadillo->phi(cell)=armadillo_temp.Extended_Phi((vec-VECTOR<T,3>((T).5,(T).35,(T).5+(T).07*vec.y))*(T)145)/(T)145;}
else{
armadillo->phi(cell)=1;
for(T i=0;i<=1;i+=(T).01)armadillo->phi(cell)=min(armadillo->phi(cell),armadillo_temp.Extended_Phi((VECTOR<T,3>(i,vec.y,vec.z)-VECTOR<T,3>((T).5,(T).35,(T).5+(T).07*vec.y))*(T)145)/(T)145);}}
}
}
//#####################################################################
// Function Number_Of_Regions
//#####################################################################
template<class T_GRID,class T_WATER_STANDARD_TESTS> int WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
Number_Of_Regions(int test_number)
{
if(test_number<=5) return 2;
if(test_number==11) return 3;
if(test_number==12) return 4;
if(test_number==13) return 4;
if(test_number==14) return 2;
if(test_number==15) return 5;
if(test_number==16) return 4;
if(test_number==17) return 2;
std::stringstream ss;ss<<"Unrecognized example: "<<test_number<<std::endl;LOG::filecout(ss.str());
PHYSBAM_FATAL_ERROR();
}
//#####################################################################
// Function Non_Multiphase_Test_Number
//#####################################################################
template<class T_GRID,class T_WATER_STANDARD_TESTS> int WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
Non_Multiphase_Test_Number(int test_number)
{
if(test_number<10) return test_number;
if(test_number==11) return 1;
if(test_number==12) return 4;
if(test_number==13) return 1;
if(test_number==14) return 1;
if(test_number==15) return 1;
if(test_number==16) return 1;
if(test_number==17) return 1;
std::stringstream ss;ss<<"Unrecognized example: "<<test_number<<std::endl;LOG::filecout(ss.str());
PHYSBAM_FATAL_ERROR();
}
//#####################################################################
// Function Initial_Phi
//#####################################################################
template<class T_GRID,class T_WATER_STANDARD_TESTS> typename T_GRID::VECTOR_T::SCALAR WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
Initial_Phi(const int region,const TV& X) const
{
if(test_number<10){
if(region==1)return Initial_Phi(X);
else if(region==2)return -Initial_Phi(X);}
ARRAY<T> phis(50);
// two drops test
if(test_number==11){
TV center1=TV(VECTOR<T,2>((T).055,(T).03)),center2=TV(VECTOR<T,2>((T).05,(T).07));
if(T_GRID::dimension==3){center1[3]=(T).055;center2[3]=(T).05;}
T radius=(T).015;
phis(2)=(X-center1).Magnitude()-radius;
phis(3)=(X-center2).Magnitude()-radius;
phis(1)=-min(phis(2),phis(3));}
// splash
if(test_number==12){
phis(2)=abs(X.y-(T).30)-(T).10;
phis(3)=abs(X.y-(T).10)-(T).10;
phis(4)=abs(X.y+(T).25)-(T).25;
phis(1)=-min(phis(2),phis(3),phis(4));}
// RT
if(test_number==13){
VECTOR<double,3> vec=VECTOR<double,3>(VECTOR<T,3>(X*(T)10));
if(T_GRID::dimension==3) vec[3]=X[3]*10;
T y_pos=X.y-(T)NOISE<double>::Noise1(vec,5,(T).5)*(T)0.002;
phis(1)=abs(y_pos-(T).25)-(T).25;
phis(2)=abs(y_pos-(T).75)-(T).25;
phis(3)=abs(y_pos-(T)1.25)-(T).25;
phis(4)=abs(y_pos-(T)1.75)-(T).25;}
// rising bubble
if(test_number==14){
T radius=(T)1/(T)300;
phis(2)=radius-X.Magnitude(); // center is at 0,0,0
phis(1)=-phis(2);}
// incline plane
if(test_number==15){
SPHERE<VECTOR<T,3> > sphere1((VECTOR<T,3>((T).4,(T).35,(T).35)),(T).1);if(T_GRID::dimension==2)sphere1.center.z=0;
SPHERE<VECTOR<T,3> > sphere2((VECTOR<T,3>((T).4,(T).35,(T).65)),(T).1);if(T_GRID::dimension==2){sphere2.center.z=0;sphere2.center.x=(T).15;}
phis(1)=sphere1.Signed_Distance(VECTOR<T,3>(X));
phis(2)=sphere2.Signed_Distance(VECTOR<T,3>(X));
phis(3)=1;
phis(4)=1;
phis(5)=-min(phis(1),phis(2),phis(3),phis(4));}
// viscoelastic armadillo
if(test_number==16){
phis(1)=armadillo->Extended_Phi(X);
phis(2)=max(X.y-(T).3,-phis(1));
phis(3)=-min(phis(1),phis(2));
phis(4)=1;}
// milk crown
if(test_number==17){
TV center1=TV(VECTOR<T,2>((T).05,(T).03));if(T_GRID::dimension==3)center1[3]=(T).05;
T radius=(T).01;
phis(1)=min((X-center1).Magnitude()-radius,X.y-(T).01);
phis(2)=-phis(1);}
return phis(region);
}
//#####################################################################
// Function Initial_Velocity
//#####################################################################
template<class T_GRID,class T_WATER_STANDARD_TESTS> typename T_GRID::VECTOR_T WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
Initial_Velocity(const TV& X) const
{
// milk crown
if(test_number==17){
TV center1=TV(VECTOR<T,2>((T).05,(T).03));if(T_GRID::dimension==3)center1[3]=(T).05;
T radius=(T).011;
if((X-center1).Magnitude()-radius<=0) return -TV::Axis_Vector(2);}
return TV();
}
//#####################################################################
// Function Update_Sources
//#####################################################################
template<class T_GRID,class T_WATER_STANDARD_TESTS> void WATER_STANDARD_TESTS_MULTIPHASE<T_GRID,T_WATER_STANDARD_TESTS>::
Update_Sources(const T time)
{
if(test_number==15){
// if(time>(T)1.15) source_velocity(2)=TV(VECTOR<T,2>(0,(T)-.6));
if(time>(T)1.15){source_region(1)=5;}
if(time>(T)1.5){source_region(1)=4;}
}
}
template class WATER_STANDARD_TESTS_MULTIPHASE<GRID<VECTOR<float,2> >,WATER_STANDARD_TESTS_2D<GRID<VECTOR<float,2> > > >;
template class WATER_STANDARD_TESTS_MULTIPHASE<GRID<VECTOR<float,3> >,WATER_STANDARD_TESTS_3D<GRID<VECTOR<float,3> > > >;
#ifndef COMPILE_WITHOUT_DOUBLE_SUPPORT
template class WATER_STANDARD_TESTS_MULTIPHASE<GRID<VECTOR<double,2> >,WATER_STANDARD_TESTS_2D<GRID<VECTOR<double,2> > > >;
template class WATER_STANDARD_TESTS_MULTIPHASE<GRID<VECTOR<double,3> >,WATER_STANDARD_TESTS_3D<GRID<VECTOR<double,3> > > >;
#endif
|
<filename>db/models.py
from django.db import models
consequence_choices = ["m","k","b"]
class User(models.Model):
u_id = models.CharField(max_length=255,verbose_name="User id, slack",primary_key=True)
categories = models.CharField(max_length=1024,verbose_name="personal categories",default=None,null=True)
filter_list = models.CharField(max_length=1024,verbose_name="personal filter list",default=None,null=True)
def __repr__(self):
return f"ID:{self.u_id}, gld: {self.s_id}"
def __str__(self):
return self.__repr__()
|
package demo;
import static org.mockito.Mockito.*;
import static org.mockito.Mockito.mock;
import demo.impl.UserModel;
import demo.impl.UserService;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.noear.solon.test.SolonJUnit4ClassRunner;
import java.util.List;
/**
* @author noear 2021/4/14 created
*/
public class MockTest {
@Test
public void test(){
//create mock
List mockedList = mock(List.class);
//use mock object
mockedList.add("one");
mockedList.clear();
//验证add方法是否在前面被调用了一次,且参数为“one”。clear方法同样。
verify(mockedList).add("one");
verify(mockedList).clear();
//下面的验证会失败。因为没有调用过add("two")。
assert false == verify(mockedList).add("two");
}
@Test
public void test2(){
UserService userService = mock(UserService.class);
when(userService.get(12)).thenReturn(mock(UserModel.class));
UserModel tmp = userService.get(12);
System.out.println(tmp.toString());
assert tmp.uid != 0;
}
}
|
/**
* Copyright(c) u-next.
*/
package org.docksidestage.app.web.lido.sea;
import org.docksidestage.dbflute.allcommon.CDef;
import org.lastaflute.web.validation.Required;
/**
* @author x-zeng
*/
public class LidoSeaBody {
public Integer productId;
public String productName;
@Required
public CDef.PaymentMethod pay;
}
|
/*
* Gets the schedules of the flights
*/
require('date-utils');
var getValidDateLimits = require('../lib/UtilityFunctions/getValidDateLimits');
var getAirportCities = require('../lib/getAirportCities');
var async = require('async');
function getFlightData(conn, rome2RioData, dateSet,budget, dates, times, ratingRatio,numPeople, callback) {
console.log('FLIGHT times:'+times);
var queryString = '';
var numOfTravels = rome2RioData.length;
var flightDateSetObjectArray=[];
var lengthOfRoutesArray=[];
//Duration of origin city to airport has to be added in the dateSet for that flight
//maintain index of drive,if any, for the routes
var indexOfDrive=[];
var sourceAirportCodeArray = [];
var destinationAirportCodeArray = [];
for(var i = 0; i < numOfTravels; i++)
{
var allRoutes = rome2RioData[i].routes;
//Needed by getDefaultModeOfTravel.js
lengthOfRoutesArray[i]=allRoutes.length;
indexOfDrive[i]=-1;
console.log("Places:"+rome2RioData[i].places[0].name+":"+rome2RioData[i].places[1].name);
console.log(dateSet.dateStart[i]+":"+dateSet.dateEnd[i]);
for(var j = 0; j < allRoutes.length; j++)
{
console.log("Route Name:",allRoutes[j].name);
if(allRoutes[j].name=="Drive")
{
indexOfDrive[i]=j;
}
var allSegments = allRoutes[j].segments;
var durBeforeFlight=0;
for(var k = 0; k < allSegments.length; k++)
{
//console.log("allSegments[k].vehicle:"+allSegments[k].vehicle);
if(allSegments[k].kind != undefined && allSegments[k].kind == "flight")//A part of this route is a flight
{
var dateSetAccToFlight={
dateStart:new Date(dateSet.dateStart[i].getTime() + durBeforeFlight*60000),
dateEnd:new Date(dateSet.dateEnd[i].getTime() + durBeforeFlight*60000)
};
var sourceAirportCode = allSegments[k].sCode;
var destinationAirportCode = allSegments[k].tCode;
console.log("Source Dest:"+sourceAirportCode+":"+destinationAirportCode);
flightDateSetObjectArray.push(getFlightDateSetObject(sourceAirportCode,destinationAirportCode,dateSetAccToFlight));
sourceAirportCodeArray.push(sourceAirportCode);
destinationAirportCodeArray.push(destinationAirportCode);
}
durBeforeFlight+=allSegments[k].duration;
}
}
}
if(sourceAirportCodeArray.length > 0){
var connection=conn.conn();
connection.connect();
var fullQueryString = 'select OriginCityID,DestinationCityID,Operator,FlightNumber,DaysOfTravel,DepartureTime as OriginDepartureTime ,ArrivalTime as DestArrivalTime, OriginDay, DestDay,Hops,CarrierType, OriginAirportCode, OriginAirportName, DestinationAirportCode, DestinationAirportName from '
+' (select OriginCityID,DestinationCityID,Operator,FlightNumber,DaysOfTravel,DepartureTime,ArrivalTime,OriginDay, DestDay,Hops,CarrierType, OriginAirportCode, OriginAirportName from '
+' (select * from Flight_Schedule) c '
+' JOIN '
+' (select CityID,AirportCode as OriginAirportCode, AirportName as OriginAirportName from Airport_In_City where AirportCode IN ( '+connection.escape(sourceAirportCodeArray)+' )) a '
+' ON (c.OriginCityID = a.CityID)) d '
+' JOIN '
+' (select CityID,AirportCode as DestinationAirportCode ,AirportName as DestinationAirportName from Airport_In_City where AirportCode IN ( '+connection.escape(destinationAirportCodeArray)+' )) b '
+' ON (d.DestinationCityID = b.CityID);';
console.log('QUERY for flights:'+fullQueryString);
connection.query(fullQueryString, function(err, rows, fields) {
var airportList = [];
if (err)
{
throw err;
}
else{
for (var i in rows) {
//console.log(rows[i]);
}
var countOfVehicleFlight=0;
//Iterating the array of rome2rio objects
for(var i = 0; i < numOfTravels; i++)
{
console.log("i:"+i);
var allRoutes = rome2RioData[i].routes;
for(var j = 0; j < allRoutes.length; j++)
{
var allSegments = allRoutes[j].segments;
var isRecommendedRoute = 1;
for(var k = 0; k < allSegments.length; k++)
{
if(allSegments[k].isMajor == 1 && allSegments[k].kind != undefined && allSegments[k].kind == "flight")//A part of this route is a flight
{
var sourceAirportCode = allSegments[k].sCode;
var destinationAirportCode = allSegments[k].tCode;
var startDate=flightDateSetObjectArray[countOfVehicleFlight].dateSet.dateStart;
var endDate=flightDateSetObjectArray[countOfVehicleFlight].dateSet.dateEnd;
var startTime=startDate.toFormat("HH24")+":"+startDate.toFormat("MI")+":00";
var endTime=endDate.toFormat("HH24")+":"+endDate.toFormat("MI")+":00";
var atLeastAFlight=0;
var flightData=[];
for(var airportIndex in rome2RioData[i].airports) {
var airport = rome2RioData[i].airports[airportIndex];
if(airport.code == allSegments[k].sCode) {
allSegments[k].sAirport = airport;
airportList.push(airport);
airport.Latitude = airport.pos.split(',')[0];
airport.Longitude = airport.pos.split(',')[1];
}
else if(airport.code == allSegments[k].tCode) {
allSegments[k].tAirport = airport;
airportList.push(airport);
airport.Latitude = airport.pos.split(',')[0];
airport.Longitude = airport.pos.split(',')[1];
}
}
//Iterate the flight rows from the database to check whether there are flights on the possible days:times
for (var t in rows) {
if((sourceAirportCode==rows[t].OriginAirportCode)&&(destinationAirportCode==rows[t].DestinationAirportCode))
{
var daysOfTravelArray=rows[t].DaysOfTravel.split("");
var OriginDepartureTime=rows[t].OriginDepartureTime;
var flightDateLimits = getValidDateLimits.getValidDateLimits(startDate,endDate,startTime,endTime,daysOfTravelArray,OriginDepartureTime);
if(flightDateLimits.length > 0)
{
atLeastAFlight=1;
console.log("Flight found:"+rows[t].FlightNumber+":"+rows[t].Operator);
rows[t].isRecommended=1;
}
else
{
rows[t].isRecommended=0;
}
rows[t].dateLimits = flightDateLimits;
rows[t].flightIndex = t;
flightData.push(rows[t]);
}
}
if(atLeastAFlight==1)
{
allSegments[k].isRecommendedSegment=1;
isRecommendedRoute = 1;
}
else
{
allSegments[k].isRecommendedSegment=0;
isRecommendedRoute = 0;
break;
}
allSegments[k].flightData=flightData;
countOfVehicleFlight++;
}
}
if(isRecommendedRoute==1)
{
allRoutes[j].isRecommendedRouteFlight=1;
}
else
{
allRoutes[j].isRecommendedRouteFlight=0;
}
}
}
}
getAirportCities.getAirportCities(conn, airportList, function onGetAirportCities(){
callback(null, rome2RioData);
});
});
connection.end();
}
else {
callback(null, rome2RioData);
}
}
module.exports.getFlightData = getFlightData;
function getFlightDateSetObject(originAirportCode,destinationAirportCode,dateSet)
{
return {
originAirportCode:originAirportCode,
destinationAirportCode:destinationAirportCode,
dateSet:dateSet
};
}
|
//#####################################################################
// Copyright 2009, <NAME>.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
#include <PhysBAM_Tools/Ordinary_Differential_Equations/EXAMPLE.h>
#include <PhysBAM_Tools/Parallel_Computation/MPI_WORLD.h>
#include <PhysBAM_Tools/Parsing/PARSE_ARGS.h>
#include <PhysBAM_Tools/Read_Write/Utilities/FILE_UTILITIES.h>
#include <PhysBAM_Tools/Read_Write/Grids_Uniform/READ_WRITE_GRID.h>
#include <PhysBAM_Tools/Read_Write/Grids_Uniform_Arrays/READ_WRITE_FACE_ARRAYS.h>
#include <PhysBAM_Tools/Utilities/PROCESS_UTILITIES.h>
using namespace PhysBAM;
//#####################################################################
// EXAMPLE
//#####################################################################
template<class TV> EXAMPLE<TV>::
EXAMPLE(const STREAM_TYPE stream_type_input)
:stream_type(stream_type_input),initial_time(0),first_frame(0),last_frame(120),frame_rate(24),frame_title(""),write_substeps_level(-1),write_first_frame(true),write_last_frame(true),write_time(true),
output_directory("output"),data_directory("../../Public_Data/Archives"),auto_restart(false),restart(false),restart_frame(0),write_output_files(true),write_frame_title(true),
abort_when_dt_below(0),parse_args(0),mpi_world(0),want_mpi_world(false),need_finish_logging(false),test_number(0),fixed_dt((T)0),substeps_delay_frame(-1),substeps_delay_level(-1)
{
ARRAY<std::string> directory_tokens;
STRING_UTILITIES::Split(FILE_UTILITIES::Get_Working_Directory(),"/",directory_tokens);
bool is_archived_project=false;
for(int i=1;i<=directory_tokens.Size();++i) if(STRING_UTILITIES::Ends_With(directory_tokens(i), "Archives")) is_archived_project=true;
if(is_archived_project) data_directory = "../" + data_directory;
}
//#####################################################################
// ~EXAMPLE
//#####################################################################
template<class TV> EXAMPLE<TV>::
~EXAMPLE()
{
delete mpi_world;
if(need_finish_logging) LOG::Finish_Logging();
delete parse_args;
}
template<class TV> typename TV::SCALAR EXAMPLE<TV>::
Time_At_Frame(const int frame) const
{
return initial_time+(frame-first_frame)/frame_rate;
}
template<class TV> void EXAMPLE<TV>::
Clamp_Time_Step_With_Target_Time(const T time,const T target_time,T& dt,bool& done,const T min_dt,bool* min_dt_failed)
{
if(dt<min_dt){dt=min_dt;if(min_dt_failed) *min_dt_failed=true;}
if(time+dt>=target_time){dt=target_time-time;done=true;}
else if(time+2*dt>=target_time) dt=min(dt,(T).51*(target_time-time));
}
template<class TV> void EXAMPLE<TV>::
Set_Write_Substeps_Level(const int level)
{
write_substeps_level=level;
DEBUG_SUBSTEPS::Set_Write_Substeps_Level(level);
}
template<class TV> void EXAMPLE<TV>::
Write_Frame_Title(const int frame) const
{
if(write_frame_title) FILE_UTILITIES::Write_To_Text_File(STRING_UTILITIES::string_sprintf("%s/%d/frame_title",output_directory.c_str(),frame),frame_title);
}
template<class TV> void EXAMPLE<TV>::
Limit_Dt(T& dt,const T time)
{
PHYSBAM_WARN_IF_NOT_OVERRIDDEN();
}
//#####################################################################
// Function Log_Parameters
//#####################################################################
template<class TV> void EXAMPLE<TV>::
Log_Parameters() const
{
LOG::SCOPE scope("EXAMPLE parameters");
std::stringstream ss;
ss<<"initial_time="<<initial_time<<std::endl;
ss<<"first_frame="<<first_frame<<std::endl;
ss<<"last_frame="<<last_frame<<std::endl;
ss<<"frame_rate="<<frame_rate<<std::endl;
ss<<"auto_restart="<<auto_restart<<std::endl;
ss<<"restart="<<restart<<std::endl;
ss<<"restart_frame="<<restart_frame<<std::endl;
ss<<"write_output_files="<<write_output_files<<std::endl;
ss<<"write_first_frame="<<write_first_frame<<std::endl;
ss<<"write_last_frame="<<write_last_frame<<std::endl;
ss<<"write_time="<<write_time<<std::endl;
ss<<"write_frame_title="<<write_frame_title<<std::endl;
ss<<"write_substeps_level="<<write_substeps_level<<std::endl;
ss<<"output_directory="<<output_directory<<std::endl;
ss<<"data_directory="<<data_directory<<std::endl;
ss<<"frame_title="<<frame_title<<std::endl;
ss<<"abort_when_dt_below="<<abort_when_dt_below<<std::endl;
LOG::filecout(ss.str());
}
//#####################################################################
// Function Register_Options
//#####################################################################
template<class TV> void EXAMPLE<TV>::
Register_Options()
{
if(!parse_args) return;
parse_args->Set_Extra_Arguments(-1,"<example number>");
parse_args->Add_String_Argument("-o","","output directory");
parse_args->Add_String_Argument("-d","","data directory");
parse_args->Add_Integer_Argument("-restart",0,"frame","restart frame");
parse_args->Add_Option_Argument("-auto_restart","restart from last_frame");
parse_args->Add_Integer_Argument("-substeps",-1,"level","substep output level");
parse_args->Add_Integer_Argument("-delay_substeps",-1,"frame","delay substeps until later frame");
parse_args->Add_Integer_Argument("-first_frame",0,"frame","first frame");
parse_args->Add_Integer_Argument("-last_frame",0,"frame","last frame");
parse_args->Add_Double_Argument("-framerate",24,"frame rate");
parse_args->Add_Option_Argument("-query_output","print the output directory and exit");
parse_args->Add_Integer_Argument("-v",1<<30,"level","verbosity level");
parse_args->Add_Option_Argument("-nolog","disable log.txt");
parse_args->Add_Double_Argument("-dt",0,"fix the time step size to this value.");
if(mpi_world) parse_args->Add_Option_Argument("-all_verbose","all mpi processes write to stdout (not just the first)");
}
//#####################################################################
// Function Parse_Options
//#####################################################################
template<class TV> void EXAMPLE<TV>::
Parse_Options()
{
if(!parse_args) return;
test_number=Subexample(1);
if(parse_args->Is_Value_Set("-d")) data_directory=parse_args->Get_String_Value("-d");
else if(const char* d=getenv("PHYSBAM_DATA_DIRECTORY")) data_directory=d;
int verbosity=parse_args->Get_Integer_Value("-v");
if(mpi_world && !parse_args->Is_Value_Set("-all_verbose") && mpi_world->initialized && mpi_world->rank) verbosity=0;
need_finish_logging=true;
LOG::Initialize_Logging(verbosity<10,false,verbosity,!parse_args->Is_Value_Set("-nolog"));
}
//#####################################################################
// Function Override_Options
//#####################################################################
template<class TV> void EXAMPLE<TV>::
Override_Options()
{
if(parse_args->Is_Value_Set("-o")) output_directory=parse_args->Get_String_Value("-o");
if(parse_args->Is_Value_Set("-restart")){restart=true;restart_frame=parse_args->Get_Integer_Value("-restart");}
if(parse_args->Is_Value_Set("-auto_restart")) auto_restart=true;
if(parse_args->Is_Value_Set("-delay_substeps")){
substeps_delay_frame=parse_args->Get_Integer_Value("-delay_substeps");
substeps_delay_level=parse_args->Get_Integer_Value("-substeps");}
else if(parse_args->Is_Value_Set("-substeps")) Set_Write_Substeps_Level(parse_args->Get_Integer_Value("-substeps"));
if(parse_args->Is_Value_Set("-first_frame")) first_frame=parse_args->Get_Integer_Value("-first_frame");
if(parse_args->Is_Value_Set("-framerate")) frame_rate=(T)parse_args->Get_Double_Value("-framerate");
if(parse_args->Is_Value_Set("-query_output")){LOG::filecout(output_directory);exit(0);}
if(parse_args->Is_Value_Set("-dt")) fixed_dt=(T)parse_args->Get_Double_Value("-dt");
if(!parse_args->Is_Value_Set("-nolog")){
if(!restart && !auto_restart) FILE_UTILITIES::Create_Directory(output_directory);
FILE_UTILITIES::Create_Directory(output_directory+"/common");
LOG::Instance()->Copy_Log_To_File(output_directory+"/common/log.txt",restart);}
}
//#####################################################################
// Function Parse_Late_Options
//#####################################################################
template<class TV> void EXAMPLE<TV>::
Parse_Late_Options()
{
if(!parse_args) return;
if(parse_args->Is_Value_Set("-last_frame")) last_frame=parse_args->Get_Integer_Value("-last_frame");
}
//#####################################################################
// Function Parse
//#####################################################################
template<class TV> void EXAMPLE<TV>::
Parse(int argc,char* argv[])
{
PROCESS_UTILITIES::Set_Floating_Point_Exception_Handling(true);
PROCESS_UTILITIES::Set_Backtrace(true);
if(want_mpi_world) mpi_world=new MPI_WORLD(argc,argv);
parse_args=new PARSE_ARGS;
Register_Options();
parse_args->Parse(argc,argv);
std::string print_args=parse_args->Print_Arguments(argc,argv);
Parse_Options();
LOG::filecout(print_args);
Override_Options();
}
//#####################################################################
// Function Subexample
//#####################################################################
template<class TV> int EXAMPLE<TV>::
Subexample(const int default_example) const
{
if(parse_args->Num_Extra_Args()<1) return default_example;
int parsed_value;
if(STRING_UTILITIES::String_To_Value(parse_args->Extra_Arg(1),parsed_value)) return parsed_value;
throw VALUE_ERROR("The argument is not an integer.");
}
//#####################################################################
template class EXAMPLE<VECTOR<float,1> >;
template class EXAMPLE<VECTOR<float,2> >;
template class EXAMPLE<VECTOR<float,3> >;
#ifndef COMPILE_WITHOUT_DOUBLE_SUPPORT
template class EXAMPLE<VECTOR<double,1> >;
template class EXAMPLE<VECTOR<double,2> >;
template class EXAMPLE<VECTOR<double,3> >;
#endif
|
<filename>src/components/Sobre/index.tsx<gh_stars>0
import React from 'react'
import * as S from './styled'
const Sobre = () => (
<S.SectionWrapper>
<S.Title>
Fala ai Dev Blz??
</S.Title>
<S.Text>
Meu nome Filipe e trabalho com Desenvolvimento a um longo,longo tempo..
Amo Projetos Open Source apesar de ainda não ter contribuído efetivamente, talvez por falta de tempo.
Também sou Pai do Arthur que chegou neste ano em que escrevo (2020).
</S.Text>
<S.Text>
Adoro programação e desde pequeno eu ja sabia exatamente com o que gostaria de fazer para o resto
da minha vida , acho que isso e uma dádiva ! algumas das minhas habilidades são : Dot.net , C# , PL/SQL , NodeJS , ReactJS.
</S.Text>
</S.SectionWrapper>
)
export default Sobre
|
#!/bin/sh
./autogen.sh
CFLAGS=-DDBL_EPSILON=__DBL_EPSILON__ ./configure --enable-maintainer-mode --prefix=${ZCPREF} --host=${ZCHOST} --build="$(${ZCTOP}/zcbe/config.guess)" --with-libgpg-error-prefix=${ZCPREF} --with-libassuan-prefix=${ZCPREF}
make
make install
make distclean
exit 0
|
/*
* Copyright (c) 2021 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mem/pool_manager.h"
#include "utils/arena_containers.h"
#include "utils/small_vector.h"
#include <gtest/gtest.h>
namespace panda::test {
class SmallVectorTest : public ::testing::Test {
public:
SmallVectorTest()
{
panda::mem::MemConfig::Initialize(0, 64_MB, 256_MB, 32_MB);
PoolManager::Initialize();
allocator_ = new ArenaAllocator(SpaceType::SPACE_TYPE_COMPILER);
}
virtual ~SmallVectorTest()
{
delete allocator_;
PoolManager::Finalize();
panda::mem::MemConfig::Finalize();
}
ArenaAllocator *GetAllocator() const
{
return allocator_;
}
private:
ArenaAllocator *allocator_ {nullptr};
};
template <typename Vector>
void TestVectorGrow(Vector &vector)
{
std::array values = {10, 20, 30, 40, 50, 60, 70, 80, 90, 100};
ASSERT_EQ(vector.size(), 0);
ASSERT_EQ(vector.capacity(), 4U);
vector.push_back(values[0]);
ASSERT_EQ(vector.size(), 1);
ASSERT_EQ(vector.capacity(), 4U);
ASSERT_TRUE(vector.IsStatic());
vector.push_back(values[1]);
vector.push_back(values[2U]);
vector.push_back(values[3U]);
ASSERT_EQ(vector.size(), 4U);
ASSERT_EQ(vector.capacity(), 4U);
ASSERT_TRUE(vector.IsStatic());
vector.push_back(values[4U]);
ASSERT_EQ(vector.size(), 5U);
ASSERT_GE(vector.capacity(), 5U);
ASSERT_FALSE(vector.IsStatic());
ASSERT_TRUE(std::equal(values.begin(), values.begin() + 5U, vector.begin()));
std::copy(values.begin() + 5U, values.end(), std::back_inserter(vector));
ASSERT_EQ(vector.size(), 10U);
ASSERT_FALSE(vector.IsStatic());
for (size_t i = 0; i < values.size(); i++) {
ASSERT_EQ(vector[i], values[i]);
}
}
TEST_F(SmallVectorTest, Growing)
{
{
SmallVector<int, 4> vector;
TestVectorGrow(vector);
}
{
SmallVector<int, 4, ArenaAllocator> vector(GetAllocator());
TestVectorGrow(vector);
}
}
template <typename Vector>
void TestVectorIteration(Vector &vector)
{
std::array values = {10, 20, 30, 40, 50, 60, 70, 80, 90, 100};
ASSERT_EQ(vector.size(), 0);
std::copy(values.begin(), values.begin() + 4U, std::back_inserter(vector));
ASSERT_TRUE(vector.IsStatic());
ASSERT_EQ(vector.size(), 4U);
ASSERT_TRUE(std::equal(vector.begin(), vector.end(), values.begin()));
{
auto it = std::find(vector.begin(), vector.end(), 30U);
ASSERT_NE(it, vector.end());
ASSERT_EQ(*it, 30U);
ASSERT_EQ(std::distance(vector.begin(), it), 2U);
it = std::find(vector.begin(), vector.end(), 50U);
ASSERT_EQ(it, vector.end());
}
{
auto it = std::find(vector.rbegin(), vector.rend(), 30U);
ASSERT_NE(it, vector.rend());
ASSERT_EQ(*it, 30U);
ASSERT_EQ(std::distance(vector.rbegin(), it), 1);
it = std::find(vector.rbegin(), vector.rend(), 50U);
ASSERT_EQ(it, vector.rend());
}
{
const auto const_vector = vector;
ASSERT_TRUE(std::equal(const_vector.begin(), const_vector.end(), values.begin()));
}
std::copy(values.begin() + 4U, values.end(), std::back_inserter(vector));
ASSERT_EQ(vector.size(), 10U);
ASSERT_FALSE(vector.IsStatic());
ASSERT_TRUE(std::equal(vector.begin(), vector.end(), values.begin()));
{
auto it = std::find(vector.crbegin(), vector.crend(), 30U);
ASSERT_NE(it, vector.crend());
ASSERT_EQ(*it, 30U);
ASSERT_EQ(std::distance(vector.crbegin(), it), 7U);
it = std::find(vector.crbegin(), vector.crend(), 190U);
ASSERT_EQ(it, vector.crend());
}
{
auto it = vector.begin();
ASSERT_EQ(*(it + 3U), vector[3U]);
std::advance(it, 8U);
ASSERT_EQ(*it, vector[8U]);
it -= 3U;
ASSERT_EQ(*it, vector[5U]);
ASSERT_EQ(*(it - 3U), vector[2U]);
it++;
ASSERT_EQ(*(it - 3U), vector[3U]);
--it;
ASSERT_EQ(*(it - 3U), vector[2U]);
it--;
ASSERT_EQ(*(it - 3U), vector[1]);
}
}
TEST_F(SmallVectorTest, Iteration)
{
{
SmallVector<int, 4> vector;
TestVectorIteration(vector);
}
{
SmallVector<int, 4, ArenaAllocator> vector(GetAllocator());
TestVectorIteration(vector);
}
}
struct Item {
Item()
{
constructed++;
}
Item(int aa, double bb) : a(aa), b(bb)
{
constructed++;
}
virtual ~Item()
{
destroyed++;
}
DEFAULT_COPY_SEMANTIC(Item);
DEFAULT_MOVE_SEMANTIC(Item);
bool operator==(const Item &rhs) const
{
return a == rhs.a && b == rhs.b;
}
static void Reset()
{
constructed = 0;
destroyed = 0;
}
int a {101};
double b {202};
static inline size_t constructed = 0;
static inline size_t destroyed = 0;
};
TEST_F(SmallVectorTest, Emplace)
{
SmallVector<Item, 1> vector;
vector.emplace_back(1, 1.1);
ASSERT_EQ(vector.size(), 1);
ASSERT_EQ(vector[0], Item(1, 1.1));
ASSERT_TRUE(vector.IsStatic());
vector.emplace_back(2, 2.2);
ASSERT_FALSE(vector.IsStatic());
ASSERT_EQ(vector[1], Item(2, 2.2));
vector.push_back(Item(3, 3.3));
ASSERT_EQ(vector[2], Item(3, 3.3));
}
TEST_F(SmallVectorTest, ResizeStatic)
{
SmallVector<Item, 4> vector;
vector.push_back(Item(1, 1.2));
ASSERT_EQ(vector[0], Item(1, 1.2));
Item::Reset();
vector.resize(3);
ASSERT_EQ(Item::constructed, 2);
ASSERT_EQ(vector.size(), 3);
ASSERT_TRUE(vector.IsStatic());
ASSERT_EQ(vector[0], Item(1, 1.2));
ASSERT_EQ(vector[1], Item());
ASSERT_EQ(vector[2], Item());
Item::Reset();
vector.resize(1);
ASSERT_EQ(vector.size(), 1);
ASSERT_EQ(Item::destroyed, 2);
Item::Reset();
vector.clear();
ASSERT_EQ(Item::destroyed, 1);
ASSERT_EQ(vector.size(), 0);
}
TEST_F(SmallVectorTest, ResizeDynamic)
{
std::array values = {Item(1, 1.2), Item(2, 2.3), Item(3, 3.4)};
SmallVector<Item, 2> vector;
Item::Reset();
vector.resize(6);
ASSERT_EQ(Item::constructed, 6);
ASSERT_FALSE(vector.IsStatic());
ASSERT_EQ(vector.size(), 6);
ASSERT_TRUE(std::all_of(vector.begin(), vector.end(), [](const auto &v) { return v == Item(); }));
Item::Reset();
vector.resize(3);
ASSERT_EQ(vector.size(), 3);
ASSERT_EQ(Item::destroyed, 3);
ASSERT_FALSE(vector.IsStatic());
Item::Reset();
vector.clear();
ASSERT_EQ(Item::destroyed, 3);
ASSERT_EQ(vector.size(), 0);
ASSERT_FALSE(vector.IsStatic());
}
TEST_F(SmallVectorTest, ResizeStaticWithValue)
{
SmallVector<Item, 4> vector;
vector.push_back(Item(1, 1.2));
ASSERT_EQ(vector[0], Item(1, 1.2));
Item::Reset();
vector.resize(3, Item(3, 3.3));
ASSERT_EQ(vector.size(), 3);
ASSERT_TRUE(vector.IsStatic());
ASSERT_EQ(vector[0], Item(1, 1.2));
ASSERT_EQ(vector[1], Item(3, 3.3));
ASSERT_EQ(vector[2], Item(3, 3.3));
Item item(3, 3.3);
Item::Reset();
vector.resize(1, item);
ASSERT_EQ(vector.size(), 1);
ASSERT_EQ(Item::destroyed, 2);
Item::Reset();
vector.clear();
ASSERT_EQ(Item::destroyed, 1);
ASSERT_EQ(vector.size(), 0);
}
TEST_F(SmallVectorTest, ResizeDynamicWithValue)
{
std::array values = {Item(1, 1.2), Item(2, 2.3), Item(3, 3.4)};
SmallVector<Item, 2> vector;
Item::Reset();
vector.resize(6, Item(3, 3.3));
ASSERT_FALSE(vector.IsStatic());
ASSERT_EQ(vector.size(), 6);
ASSERT_TRUE(std::all_of(vector.begin(), vector.end(), [](const auto &v) { return v == Item(3, 3.3); }));
Item item(3, 3.3);
Item::Reset();
vector.resize(3, item);
ASSERT_EQ(vector.size(), 3);
ASSERT_EQ(Item::destroyed, 3);
ASSERT_FALSE(vector.IsStatic());
Item::Reset();
vector.clear();
ASSERT_EQ(Item::destroyed, 3);
ASSERT_EQ(vector.size(), 0);
ASSERT_FALSE(vector.IsStatic());
}
TEST_F(SmallVectorTest, Constructing)
{
std::array values = {0, 1, 2, 3, 4, 5, 6, 7};
// Assign from static vector to dynamic
{
SmallVector<int, 2> vector1;
SmallVector<int, 2> vector2;
std::copy(values.begin(), values.end(), std::back_inserter(vector1));
vector2.push_back(values[0]);
vector2.push_back(values[1]);
vector1 = vector2;
ASSERT_EQ(vector1.size(), 2);
ASSERT_TRUE(vector1.IsStatic());
ASSERT_TRUE(std::equal(vector1.begin(), vector1.end(), vector2.begin()));
vector1.push_back(values[2]);
ASSERT_FALSE(vector1.IsStatic());
}
// Assign from dynamic vector to static
{
SmallVector<int, 2> vector1;
SmallVector<int, 2> vector2;
std::copy(values.begin(), values.end(), std::back_inserter(vector2));
vector1.push_back(values[0]);
vector1.push_back(values[1]);
vector1 = vector2;
ASSERT_EQ(vector1.size(), values.size());
ASSERT_FALSE(vector1.IsStatic());
ASSERT_TRUE(std::equal(vector1.begin(), vector1.end(), vector2.begin()));
}
// Move assign from static vector to dynamic
{
SmallVector<int, 2> vector1;
SmallVector<int, 2> vector2;
std::copy(values.begin(), values.end(), std::back_inserter(vector1));
vector2.push_back(values[0]);
vector2.push_back(values[1]);
vector1 = std::move(vector2);
ASSERT_EQ(vector1.size(), 2);
ASSERT_EQ(vector2.size(), 0);
ASSERT_TRUE(vector2.IsStatic());
ASSERT_TRUE(vector1.IsStatic());
ASSERT_TRUE(std::equal(vector1.begin(), vector1.end(), values.begin()));
}
// Move assign from dynamic vector to static
{
SmallVector<int, 2> vector1;
SmallVector<int, 2> vector2;
std::copy(values.begin(), values.end(), std::back_inserter(vector2));
vector1.push_back(values[0]);
vector1.push_back(values[1]);
vector1 = std::move(vector2);
ASSERT_EQ(vector1.size(), values.size());
ASSERT_EQ(vector2.size(), 0);
ASSERT_TRUE(vector2.IsStatic());
ASSERT_FALSE(vector1.IsStatic());
ASSERT_TRUE(std::equal(vector1.begin(), vector1.end(), values.begin()));
}
// Copy constructor from dynamic
{
SmallVector<int, 2> vector1;
std::copy(values.begin(), values.end(), std::back_inserter(vector1));
ASSERT_FALSE(vector1.IsStatic());
ASSERT_EQ(vector1.size(), values.size());
SmallVector<int, 2> vector2(vector1);
ASSERT_EQ(vector1.size(), values.size());
ASSERT_EQ(vector2.size(), values.size());
ASSERT_TRUE(std::equal(vector2.begin(), vector2.end(), vector1.begin()));
}
// Copy constructor from static
{
SmallVector<int, 2> vector1;
std::copy(values.begin(), values.begin() + 2, std::back_inserter(vector1));
ASSERT_TRUE(vector1.IsStatic());
SmallVector<int, 2> vector2(vector1);
ASSERT_EQ(vector1.size(), 2);
ASSERT_EQ(vector2.size(), 2);
ASSERT_TRUE(std::equal(vector2.begin(), vector2.end(), vector1.begin()));
}
// Move constructor from dynamic
{
SmallVector<int, 2> vector1;
std::copy(values.begin(), values.end(), std::back_inserter(vector1));
ASSERT_FALSE(vector1.IsStatic());
ASSERT_EQ(vector1.size(), values.size());
SmallVector<int, 2> vector2(std::move(vector1));
ASSERT_EQ(vector1.size(), 0);
ASSERT_EQ(vector2.size(), values.size());
ASSERT_TRUE(std::equal(vector2.begin(), vector2.end(), values.begin()));
}
// Move constructor from static
{
SmallVector<int, 2> vector1;
std::copy(values.begin(), values.begin() + 2, std::back_inserter(vector1));
ASSERT_TRUE(vector1.IsStatic());
SmallVector<int, 2> vector2(std::move(vector1));
ASSERT_EQ(vector1.size(), 0);
ASSERT_EQ(vector2.size(), 2);
ASSERT_TRUE(std::equal(vector2.begin(), vector2.end(), values.begin()));
}
}
} // namespace panda::test
|
# Python program to find the sum of
# all prime numbers in a given interval
# Fucntion to calculate the sum of all prime numbers in a given range
def prime_sum_in_range(start, end):
# Initialize sum of primes
prime_sum = 0
# loop through each number from start to end
for num in range(start, end + 1):
# check if the number is prime
if is_prime(num):
# Add the prime numbers to the sum
prime_sum += num
return prime_sum
# Function to check if a number is prime or not
def is_prime(n):
# Check if the number is less than or equal to 1
if n <= 1:
return False
# Check if the number is a perfect square
sqr = int(math.sqrt(n)) + 1
# Iterate through each value from 2 to the square root of the number
for divisor in range(2, sqr):
if n % divisor == 0:
return False
# Return True for a prime number
return True
start = 100
end = 200
print("Sum of prime numbers in given range is", prime_sum_in_range(start, end))
|
# Specifying API routes
Routes = [
{
'path': '/login',
'method': 'POST',
'handler': 'login_handler'
},
{
'path': '/upload',
'method': 'POST',
'handler': 'upload_handler'
},
{
'path': '/users/{user_id}',
'method': 'GET',
'handler': 'get_user_handler'
},
{
'path': '/users/{user_id}',
'method': 'PUT',
'handler': 'update_user_handler'
}
]
# Specifying API handlers
def login_handler(req):
# code for handling a login request
def upload_handler(req):
# code for handling an upload request
def get_user_handler(req):
# code for handling a get user request
def update_user_handler(req):
# code for handling an update user request
|
<reponame>hmrc/claim-tax-refund-frontend<filename>test/base/SpecBase.scala<gh_stars>1-10
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package base
import config.{AddressLookupConfig, FrontendAppConfig}
import org.scalatestplus.mockito.MockitoSugar
import org.scalatestplus.play.PlaySpec
import org.scalatestplus.play.guice._
import play.api.Application
import play.api.i18n.{Messages, MessagesApi}
import play.api.inject.Injector
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.{JsValue, Json}
import play.api.mvc.MessagesControllerComponents
import play.api.test.FakeRequest
import uk.gov.hmrc.auth.core.AuthConnector
import uk.gov.hmrc.auth.core.retrieve.ItmpAddress
import uk.gov.hmrc.http.HeaderCarrier
import utils.SequenceUtil
import scala.concurrent.ExecutionContext
trait SpecBase extends PlaySpec with GuiceOneAppPerSuite with MockitoSugar {
override lazy val app: Application = {
new GuiceApplicationBuilder().build()
}
def injector: Injector = app.injector
def frontendAppConfig: FrontendAppConfig = injector.instanceOf[FrontendAppConfig]
val ec: ExecutionContext = mock[ExecutionContext]
def addressLookupConfig: AddressLookupConfig = injector.instanceOf[AddressLookupConfig]
def messagesControllerComponents: MessagesControllerComponents = injector.instanceOf[MessagesControllerComponents]
def authConnector: AuthConnector = injector.instanceOf[AuthConnector]
def messagesApi: MessagesApi = messagesControllerComponents.messagesApi
def fakeRequest = FakeRequest(method = "", path = "")
def messages: Messages = messagesApi.preferred(fakeRequest)
def sequenceUtil[A]: SequenceUtil[A] = injector.instanceOf[SequenceUtil[A]]
val itmpAddress = ItmpAddress(
Some("Line1"),
Some("Line2"),
Some("Line3"),
Some("Line4"),
Some("Line5"),
Some("AB1 2CD"),
Some("United Kingdom"),
Some("UK")
)
val testResponseAddress: JsValue = {
Json.parse(input = "{\n\"auditRef\":\"e9e2fb3f-268f-4c4c-b928-3dc0b17259f2\",\n\"address\":{\n\"lines\":[\n\"Line1\",\n\"Line2\",\n\"Line3\",\n\"Line4\"\n],\n \"postcode\":\"NE1 1LX\",\n\"country\":{\n\"code\":\"GB\",\n\"name\":\"United Kingdom\"\n}\n}\n}")
}
implicit val hc: HeaderCarrier = HeaderCarrier()
}
|
#!/bin/bash
set -e
source ./lifecycle/common.sh
checkRegionProvided "$1"
./lifecycle/prepare/prepare-deploy-backend.sh "$1"
./lifecycle/prepare/prepare-deploy-frontend.sh "$1"
APP_NAME=$(getGlobalParam "appName")
cdk deploy $APP_NAME-$1
|
#!/bin/sh
go run /setup/load_and_run_files.go
|
<reponame>seants/integrations-core<filename>vsphere/datadog_checks/vsphere/cache_config.py
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import threading
from collections import defaultdict
class CacheConfig:
"""
Wraps configuration and status for the Morlist and Metadata caches.
CacheConfig is threadsafe and can be used from different workers in the
threading pool.
"""
Morlist = 0
Metadata = 1
def __init__(self):
self._lock = threading.RLock()
self.clear()
def _check_type(self, type_):
"""
Basic sanity check to avoid KeyErrors
"""
if type_ not in (CacheConfig.Morlist, CacheConfig.Metadata):
raise TypeError("Wrong cache type passed")
def clear(self):
"""
Reset the config object to its initial state
"""
with self._lock:
self._config = {
CacheConfig.Morlist: {
'last': defaultdict(float),
'intl': {},
},
CacheConfig.Metadata: {
'last': defaultdict(float),
'intl': {},
}
}
def set_last(self, type_, key, ts):
self._check_type(type_)
with self._lock:
self._config[type_]['last'][key] = ts
def get_last(self, type_, key):
"""
Notice: this will return the defaultdict default value also for keys
that are not in the configuration, this is a tradeoff to keep the code simple.
"""
self._check_type(type_)
with self._lock:
return self._config[type_]['last'][key]
def set_interval(self, type_, key, ts):
self._check_type(type_)
with self._lock:
self._config[type_]['intl'][key] = ts
def get_interval(self, type_, key):
self._check_type(type_)
with self._lock:
return self._config[type_]['intl'].get(key)
|
#!/bin/bash
for id in $(openstack user list | grep $1 | awk {'print $2'})
do
openstack user delete $id
done
# To use: $ . this_script.sh mark_word
# For instance: . this_script.sh create to delete all user contain "create" in name
|
public static void quickSort(int[] array, int left, int right) {
int index = partition(array, left, right);
if (left < index - 1) {
quickSort(array, left, index -1);
}
if (index < right) {
quickSort(array, index, right);
}
}
public static int partition(int[] array, int left, int right) {
int pivot = array[(left + right) / 2];
while (left <= right) {
while (array[left] < pivot) {
left++;
}
while (array[right] > pivot) {
right--;
}
if (left <= right) {
int temp = array[left];
array[left] = array[right];
array[right] = temp;
left++;
right--;
}
}
return left;
}
|
/**
* <p>Title: liteflow</p>
* <p>Description: 轻量级的组件式流程框架</p>
* @author Bryan.Zhang
* @email <EMAIL>
* @Date 2020/4/1
*/
package com.yomahub.liteflow.entity.data;
import java.text.MessageFormat;
public class CmpStep {
private String nodeId;
private CmpStepType stepType;
public CmpStep(String nodeId, CmpStepType stepType) {
this.nodeId = nodeId;
this.stepType = stepType;
}
public String getNodeId() {
return nodeId;
}
public void setNodeId(String nodeId) {
this.nodeId = nodeId;
}
public CmpStepType getStepType() {
return stepType;
}
public void setStepType(CmpStepType stepType) {
this.stepType = stepType;
}
@Override
public String toString() {
if(stepType.equals(CmpStepType.SINGLE)) {
return MessageFormat.format("{0}", nodeId);
}else {
return MessageFormat.format("{0}({1})", nodeId,stepType);
}
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}else {
if(getClass() != obj.getClass()) {
return false;
}else {
if(((CmpStep)obj).getNodeId().equals(this.getNodeId())) {
return true;
}else {
return false;
}
}
}
}
}
|
from findfiles import Window
from PySide import QtCore
#===================================================================================================
# test_basic_search
#===================================================================================================
def test_basic_search(qtbot, tmpdir):
'''
test to ensure basic find files functionality is working.
'''
tmpdir.join('video1.avi').ensure()
tmpdir.join('video1.srt').ensure()
tmpdir.join('video2.avi').ensure()
tmpdir.join('video2.srt').ensure()
window = Window()
window.show()
qtbot.addWidget(window)
window.fileComboBox.clear()
qtbot.keyClicks(window.fileComboBox, '*.avi')
window.directoryComboBox.clear()
qtbot.keyClicks(window.directoryComboBox, str(tmpdir))
qtbot.mouseClick(window.findButton, QtCore.Qt.LeftButton)
assert window.filesTable.rowCount() == 2
assert window.filesTable.item(0, 0).text() == 'video1.avi'
assert window.filesTable.item(1, 0).text() == 'video2.avi'
|
import requests
# api-endpoint
URL = "https://www.example.com/api/v1/data"
# location given here
# sending get request and saving the response as response object
r = requests.get(url = URL)
# extracting data in json format
data = r.json()
# extracting latitude, longitude and formatted address
# of the first matching location
# latitude = data['results'][0]['geometry']['location']['lat']
# longitude = data['results'][0]['geometry']['location']['lng']
# formatted_address = data['results'][0]['formatted_address']
# printing the output
# print("Latitude:%s\nLongitude:%s\nFormatted Address:%s"
# %(latitude, longitude,formatted_address))
|
import handleRequest from '../../src/handler'
import makeServiceWorkerEnv from 'service-worker-mock'
import docsData from '../../src/apps/alphafold/docs'
declare var global: any
const setup = () => {
Object.assign(global, makeServiceWorkerEnv())
jest.resetModules()
}
describe('/alphafold', () => {
beforeEach(setup)
test('handle GET /', async () => {
const result = await handleRequest(new Request('/alphafold', { method: 'GET' }))
expect(result.status).toEqual(200)
const text = await result.text()
expect(text).toEqual(JSON.stringify(docsData))
})
})
describe('/alphafold/submit', () => {
beforeEach(setup)
test('handle POST /submit', async () => {
const result = await handleRequest(new Request('/alphafold/submit', { method: 'POST', body: JSON.stringify(docsData.endpoints['/alphafold/submit'].payload_example)}))
expect(result.status).toEqual(200)
const text = await result.text()
expect(text).toEqual(JSON.stringify(docsData.endpoints['/alphafold/submit'].response_example))
})
})
describe('/alphafold/status/:jobId', () => {
beforeEach(setup)
test('handle GET /status/:jobId', async () => {
const status = await handleRequest(new Request('/alphafold/status/12345', { method: 'GET' }))
expect(status.status).toEqual(200)
const text = await status.text()
expect(text).toEqual(JSON.stringify(docsData.endpoints['/alphafold/status/:jobId'].response_example))
})
})
|
import json
def json_compare(json1, json2):
# convert both the JSON objects to Python dictionary
json1_dict = json.loads(json1)
json2_dict = json.loads(json2)
# check if the keys are equal
if (json1_dict.keys() == json2_dict.keys()):
# if keys are equal, check if the values are equal
for key in json1_dict.keys():
if (json1_dict[key] != json2_dict[key]):
return False
return True
else:
return False
|
package com.aivinog1.cardpay;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import static org.junit.Assert.*;
@RunWith(SpringRunner.class)
@SpringBootTest
public class AppTest {
@Autowired
private App app;
@Test
public void verifyThatRunTest() {
assertNotNull(app);
}
}
|
import nltk
import numpy as np
import random
import string
# Read in the corpus
with open('chatbot.txt') as file:
data = file.readlines()
# Tokenize the corpus
data = [line.lower().replace('\n', '').split(' ') for line in data]
# Build the word dictionary
word_dict = {}
for line in data:
for word in line:
if word not in word_dict:
word_dict[word] = len(word_dict)
# Build the training data
X_train = np.zeros((len(data), len(word_dict)))
y_train = np.zeros((len(data), len(word_dict)))
for i, line in enumerate(data):
for j, word in enumerate(line[:-1]):
X_train[i, word_dict[word]] = 1
y_train[i, word_dict[line[j+1]]] = 1
# Create the model
model = Sequential([
Dense(256, input_dim=X_train.shape[1]),
Activation('sigmoid'),
Dense(y_train.shape[1]),
Activation('softmax')
])
# Compile and fit the model
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(X_train, y_train, epochs=30);
# Helper function to generate predictions
def predict_next_word(input_text):
input_text = input_text.lower().replace('\n', '').split(' ')
preds = np.zeros(X_train.shape[1])
for word in input_text:
if word in word_dict:
preds[word_dict[word]] = 1
preds = model.predict(preds.reshape(1, X_train.shape[1]))
preds = np.argsort(preds.flatten())[::-1][:3]
return [list(word_dict.keys())[pred] for pred in preds]
# Use the chatbot
while True:
input_text = input()
preds = predict_next_word(input_text)
print('-> {}'.format(' '.join(preds)))
|
package training.linkedlist;
import org.junit.jupiter.api.Test;
import java.util.function.BiFunction;
import static training.linkedlist.ListNode.*;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* 合并两个排序的链表,并将其作为排序表返回。该列表应通过将前两个列表的节点拼接在一起制成。
* <p>
* 例 1:
* Input: l1 = [1,2,4], l2 = [1,3,4]
* Output: [1,1,2,3,4,4]
* <p>
* 例 2:
* Input: l1 = [], l2 = []
* Output: []
* <p>
* 例 3:
* Input: l1 = [], l2 = [0]
* Output: [0]
*
* 约束:
* - 链表的长度范围为 [0, 50]
* - -100 <= Node.val <= 100
* - 两个链表都是非递减链表
*/
public class E21_Easy_MergeTwoSortedLists {
static void test(BiFunction<ListNode, ListNode, ListNode> method) {
ListNode l1 = newList(-1, 1, 2, 4),
l2 = newList(-1, 1, 3, 4);
ListNode merged = method.apply(l1, l2);
printList(merged);
assertTrue(listEqual(merged, 1, 1, 2, 3, 4, 4));
assertNull(method.apply(null, null));
assertTrue(listEqual(method.apply(null, new ListNode(0)),
0));
}
public ListNode mergeTwoLists(ListNode l1, ListNode l2) {
ListNode start = new ListNode(0), p = start;
for (;l1 != null && l2 != null; p = p.next) {
if (l1.val < l2.val) {
p.next = l1;
l1 = l1.next;
} else if (l1.val > l2.val) {
p.next = l2;
l2 = l2.next;
} else {
p.next = l1;
l1 = l1.next;
p = p.next;
p.next = l2;
l2 = l2.next;
}
}
if (l1 != null)
p.next = l1;
else if (l2 != null)
p.next = l2;
return start.next;
}
@Test
public void testMergeTwoLists() {
test(this::mergeTwoLists);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dtstack.flinkx.client.kubernetes;
import com.dtstack.flinkx.client.ClusterClientHelper;
import com.dtstack.flinkx.client.JobDeployer;
import com.dtstack.flinkx.client.util.JobGraphUtil;
import com.dtstack.flinkx.options.Options;
import org.apache.flink.api.common.JobID;
import org.apache.flink.client.program.ClusterClient;
import org.apache.flink.client.program.ClusterClientProvider;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.kubernetes.KubernetesClusterDescriptor;
import org.apache.flink.kubernetes.configuration.KubernetesConfigOptions;
import org.apache.flink.kubernetes.kubeclient.DefaultKubeClientFactory;
import org.apache.flink.kubernetes.kubeclient.FlinkKubeClient;
import org.apache.flink.runtime.jobgraph.JobGraph;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
/**
* @program: flinkx
* @author: xiuzhu
* @create: 2021/05/31
*/
public class KubernetesSessionClusterClientHelper implements ClusterClientHelper {
private static final Logger LOG =
LoggerFactory.getLogger(KubernetesSessionClusterClientHelper.class);
@Override
public ClusterClient submit(JobDeployer jobDeployer) throws Exception {
Options launcherOptions = jobDeployer.getLauncherOptions();
List<String> programArgs = jobDeployer.getProgramArgs();
Configuration configuration = jobDeployer.getEffectiveConfiguration();
String clusterId = configuration.get(KubernetesConfigOptions.CLUSTER_ID);
if (StringUtils.isBlank(clusterId)) {
throw new IllegalArgumentException("Kubernetes Session Mode Must Set CLUSTER_ID!");
}
FlinkKubeClient flinkKubeClient =
DefaultKubeClientFactory.getInstance().fromConfiguration(configuration);
try (KubernetesClusterDescriptor descriptor =
new KubernetesClusterDescriptor(configuration, flinkKubeClient); ) {
ClusterClientProvider<String> retrieve = descriptor.retrieve(clusterId);
ClusterClient<String> clusterClient = retrieve.getClusterClient();
JobGraph jobGraph =
JobGraphUtil.buildJobGraph(launcherOptions, programArgs.toArray(new String[0]));
JobID jobID = (JobID) clusterClient.submitJob(jobGraph).get();
LOG.info("submit job successfully, jobID = {}", jobID);
return clusterClient;
}
}
}
|
set -euo pipefail
. "`cd $(dirname ${BASH_SOURCE[0]}) && pwd`/../../helper/helper.bash"
env=`cat "${1}/env"`
shift
key="${1}"
if [ -z "${key}" ]; then
echo "[:(] arg 'key' is empty, exit" >&2
exit 1
fi
pp=`env_val "${env}" "${key}"`
echo "${key} = ${pp}"
|
"""
Normalize a given list of strings
"""
def normalize(lst):
output = []
for item in lst:
output.append(item.lower())
return output
if __name__ == '__main__':
input_list = ['Red', 'red', 'WhITE', 'white', 'bLUE', 'blue']
print(normalize(input_list))
|
<reponame>yakky/microservice-talk<filename>tests/test_api.py<gh_stars>0
from urllib.parse import urlencode
import httpx
import pytest
from book_search.main import app
@pytest.mark.asyncio
async def test_search_basic(load_books):
async with httpx.AsyncClient(app=app, base_url="http://testserver") as client:
url = app.url_path_for("search")
params = urlencode({"q": "<NAME>"})
response = await client.get(f"{url}?{params}")
assert response.status_code == 200
data = response.json()
assert data["results"]
assert data["count"] == 3
assert [row["book_id"] for row in data["results"]] == [1, 17, 20]
for row in data["results"]:
assert row["title"]
assert row["isbn13"]
@pytest.mark.asyncio
async def test_search_year(load_books):
async with httpx.AsyncClient(app=app, base_url="http://testserver") as client:
url = app.url_path_for("search")
params = urlencode({"year": 2008})
response = await client.get(f"{url}?{params}")
assert response.status_code == 200
data = response.json()
assert data["results"]
assert data["count"] == 4
assert [row["book_id"] for row in data["results"]] == [1, 56, 73, 88]
for row in data["results"]:
assert row["title"]
assert row["isbn13"]
@pytest.mark.asyncio
async def test_search_tags(load_books):
async with httpx.AsyncClient(app=app, base_url="http://testserver") as client:
url = app.url_path_for("search")
params = urlencode({"tags": ["between-film", "address-year"]})
response = await client.get(f"{url}?{params}")
assert response.status_code == 200
data = response.json()
assert data["results"]
assert data["count"] == 4
assert [row["book_id"] for row in data["results"]] == [1, 2, 67, 90]
for row in data["results"]:
assert row["title"]
assert row["isbn13"]
@pytest.mark.asyncio
async def test_ping():
async with httpx.AsyncClient(app=app, base_url="http://testserver") as client:
url = app.url_path_for("ping")
response = await client.get(url)
assert response.status_code == 200
data = response.json()
assert data["message"] == "Ping"
|
<reponame>jab142/tasktimer
package com.ergdyne.tasktimer;
import android.content.ContentProvider;
import android.content.ContentValues;
import android.database.Cursor;
import android.database.SQLException;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteQueryBuilder;
import android.net.Uri;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import com.ergdyne.lib.DBMap;
/**
* Created by j on 3/18/17.
*/
//I actually have very little idea what is going on with the Provider, but this is supposedly what makes the infinite scrolling work!
public class DBProvider extends ContentProvider{
private DBHelper dbHelper;
protected SQLiteDatabase db;
//TODO centralize these settings type details.
public static final String AUTHORITY = "com.ergdyne.tasktimer.DBProvider";
//DOESN'T HAVE TO BE TABLE_NAME
public static final String TABLE_NAME = DBMap.EventTable.table;
static final Uri CONTENT_URI = Uri.parse("content://" + AUTHORITY
+ "/" + TABLE_NAME);
@Override
public boolean onCreate() {
open();
return true;
}
private void open() throws SQLException{
dbHelper = new DBHelper(getContext());
db = dbHelper.getWritableDatabase(); //I don't think it has to be writable...
}
private void close(){dbHelper.close();}
@Override
public Cursor query(@NonNull Uri uri, @Nullable String[] projection, @Nullable String selection, @Nullable String[] selectionArgs, @Nullable String sortOrder) {
SQLiteQueryBuilder builder = new SQLiteQueryBuilder();
builder.setTables(
DBMap.EventTable.table + " INNER JOIN " + DBMap.TaskTable.table +
" ON (" + DBMap.EventTable.taskID
+ " = " + DBMap.TaskTable.table + "." +DBMap._ID + ")");
return builder.query(db,projection,selection,selectionArgs,null,null,sortOrder);
}
@Nullable
@Override
public String getType(@NonNull Uri uri) {
return null;
}
@Nullable
@Override
public Uri insert(@NonNull Uri uri, @Nullable ContentValues values) {
return null;
}
@Override
public int delete(@NonNull Uri uri, @Nullable String selection, @Nullable String[] selectionArgs) {
return 0;
}
@Override
public int update(@NonNull Uri uri, @Nullable ContentValues values, @Nullable String selection, @Nullable String[] selectionArgs) {
return 0;
}
}
|
<gh_stars>0
#include <gtest/gtest.h>
#include "cc/motor.h"
#include "tests/arduino_simulator.h"
namespace markbot {
namespace {
const unsigned int ENABLE_PIN = 40;
const unsigned int DIR_PIN = 41;
const unsigned int STEP_PIN = 42;
void InitMotor(tensixty::FakeArduino &arduino, Motor *motor) {
MotorInitProto init_proto;
init_proto.enable_pin = ENABLE_PIN;
init_proto.dir_pin = DIR_PIN;
init_proto.step_pin = STEP_PIN;
motor->Init(init_proto, &arduino);
}
void Configure(Motor *motor) {
MotorConfigProto config;
config.zero = true;
config.min_steps = -1000;
config.max_steps = 1200;
motor->Config(config);
}
TEST(MotorTest, Init) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
EXPECT_EQ(motor.Direction(), false);
EXPECT_EQ(motor.StepsRemaining(), 0);
}
TEST(MotorTest, Step) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 0);
EXPECT_EQ(arduino.testIsPinOutput(ENABLE_PIN), true);
EXPECT_EQ(arduino.testIsPinOutput(DIR_PIN), true);
EXPECT_EQ(arduino.testIsPinOutput(STEP_PIN), true);
EXPECT_EQ(arduino.testGetPinOutput(ENABLE_PIN), true);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 1.0;
move_proto.acceleration = 0.001;
move_proto.absolute_steps = 100;
motor.Update(move_proto);
}
EXPECT_EQ(motor.StepsRemaining(), 100);
EXPECT_EQ(motor.Direction(), true);
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), true);
EXPECT_EQ(arduino.testGetPinOutput(ENABLE_PIN), false);
int pin_steps = 0;
for (int i = 0; i < 200; ++i) {
bool step_before = arduino.testGetPinOutput(STEP_PIN);
motor.FastTick();
if (step_before != arduino.testGetPinOutput(STEP_PIN)) {
++pin_steps;
}
if (i < 100) {
EXPECT_EQ(motor.StepsRemaining(), 99 - i);
} else {
EXPECT_EQ(motor.StepsRemaining(), 0);
}
}
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), true);
EXPECT_EQ(pin_steps, 100);
}
TEST(MotorTest, Tare) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 0);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 1.0;
move_proto.acceleration = 0.001;
move_proto.absolute_steps = 100;
motor.Update(move_proto);
}
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), true);
EXPECT_EQ(motor.StepsRemaining(), 100);
motor.Tare(-21);
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), true);
EXPECT_EQ(motor.StepsRemaining(), 121);
motor.Tare(300);
EXPECT_EQ(motor.StepsRemaining(), 200);
}
TEST(MotorTest, ObserveMaxPositions) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 0);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 1.0;
move_proto.acceleration = 0.001;
move_proto.absolute_steps = 2000;
motor.Update(move_proto);
}
EXPECT_EQ(motor.StepsRemaining(), 1200);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 1.0;
move_proto.acceleration = 0.001;
move_proto.absolute_steps = -2000;
motor.Update(move_proto);
}
EXPECT_EQ(motor.StepsRemaining(), 1000);
}
TEST(MotorTest, SpeedUpAtStart) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
{
MotorConfigProto config;
config.zero = true;
config.min_steps = -1000;
config.max_steps = 12000;
motor.Config(config);
}
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.001;
move_proto.absolute_steps = 10000;
motor.Update(move_proto);
}
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 10000);
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 10000);
for (int i = 0; i < 42; ++i) {
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 10000);
}
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 9999);
for (int i = 0; i < 17; ++i) {
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 9999);
}
motor.FastTick();
for (int i = 0; i < 13; ++i) {
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 9998);
}
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 9997);
for (int i = 0; i < 11; ++i) {
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 9997);
}
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 9996);
}
TEST(MotorTest, ReachMaxSpeedAndSlowBackDown) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.01;
move_proto.absolute_steps = 1000;
motor.Update(move_proto);
}
bool reached_max_speed = false;
int tick_of_first_full_speed = 0;
int steps_of_first_full_speed = 0;
int steps_of_last_full_speed = 0;
for (int i = 0; i < 1200; ++i) {
if (i < 99) {
EXPECT_LT(motor.speed(), 1.0);
}
motor.FastTick();
if (motor.StepsRemaining()) {
EXPECT_GT(motor.speed(), 0.0);
}
if (!reached_max_speed) {
reached_max_speed = motor.speed() == 1.0;
}
if (reached_max_speed && tick_of_first_full_speed == 0) {
tick_of_first_full_speed = i;
steps_of_first_full_speed = motor.StepsRemaining();
}
if (motor.speed() == 1.0) {
steps_of_last_full_speed = motor.StepsRemaining();
}
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), true);
}
EXPECT_TRUE(reached_max_speed);
EXPECT_GT(steps_of_first_full_speed, 0);
EXPECT_GT(steps_of_last_full_speed, 0);
EXPECT_EQ(steps_of_last_full_speed, 999 - steps_of_first_full_speed);
EXPECT_EQ(motor.StepsRemaining(), 0);
EXPECT_LT(motor.speed(), 0.1);
}
TEST(MotorTest, ReachMaxSpeedAndSlowBackDownNegativeAndOffset) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
motor.Tare(200);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.01;
move_proto.absolute_steps = -800;
motor.Update(move_proto);
}
bool reached_max_speed = false;
int tick_of_first_full_speed = 0;
int steps_of_first_full_speed = 0;
int steps_of_last_full_speed = 0;
for (int i = 0; i < 1200; ++i) {
if (i < 99) {
EXPECT_LT(motor.speed(), 1.0);
}
motor.FastTick();
if (motor.StepsRemaining()) {
EXPECT_GT(motor.speed(), 0.0);
}
if (!reached_max_speed) {
reached_max_speed = motor.speed() == 1.0;
}
if (reached_max_speed && tick_of_first_full_speed == 0) {
tick_of_first_full_speed = i;
steps_of_first_full_speed = motor.StepsRemaining();
}
if (motor.speed() == 1.0) {
steps_of_last_full_speed = motor.StepsRemaining();
}
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), false);
}
EXPECT_TRUE(reached_max_speed);
EXPECT_GT(steps_of_first_full_speed, 0);
EXPECT_GT(steps_of_last_full_speed, 0);
EXPECT_EQ(steps_of_last_full_speed, 999 - steps_of_first_full_speed);
EXPECT_EQ(motor.StepsRemaining(), 0);
EXPECT_LT(motor.speed(), 0.1);
}
TEST(MotorTest, TriangleRamp) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.01;
move_proto.absolute_steps = 90;
motor.Update(move_proto);
}
float max_speed = 0.0;
int steps_at_max_speed = 0;
for (int i = 0; i < 1200; ++i) {
EXPECT_LT(motor.speed(), 1.0);
motor.FastTick();
if (motor.StepsRemaining()) {
EXPECT_GT(motor.speed(), 0.0);
}
if (motor.speed() > max_speed) {
max_speed = motor.speed();
steps_at_max_speed = motor.StepsRemaining();
}
}
EXPECT_EQ(steps_at_max_speed, 45);
EXPECT_EQ(motor.StepsRemaining(), 0);
EXPECT_LT(motor.speed(), 0.1);
}
TEST(MotorTest, TriangleRampNegative) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.01;
move_proto.absolute_steps = -90;
motor.Update(move_proto);
}
float max_speed = 0.0;
int steps_at_max_speed = 0;
for (int i = 0; i < 1200; ++i) {
EXPECT_LT(motor.speed(), 1.0);
motor.FastTick();
if (motor.StepsRemaining()) {
EXPECT_GT(motor.speed(), 0.0);
}
if (motor.speed() > max_speed) {
max_speed = motor.speed();
steps_at_max_speed = motor.StepsRemaining();
}
}
EXPECT_EQ(steps_at_max_speed, 45);
EXPECT_EQ(motor.StepsRemaining(), 0);
EXPECT_LT(motor.speed(), 0.1);
}
TEST(MotorTest, TareWhileMoving) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.01;
move_proto.absolute_steps = 90;
motor.Update(move_proto);
}
while (motor.StepsRemaining() > 40) {
motor.FastTick();
EXPECT_LT(motor.speed(), 1.0);
EXPECT_GT(motor.speed(), 0.0);
}
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), true);
motor.Tare(-200); // Still trying to get to 90 in absolute coords.
EXPECT_EQ(motor.StepsRemaining(), 290);
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), true);
const float midpoint_speed = motor.speed();
motor.FastTick();
EXPECT_GT(motor.speed(), midpoint_speed);
bool reached_max_speed = false;
int slowdown_step = 1000;
while (motor.StepsRemaining() > 0) {
ASSERT_GT(motor.speed(), 0.0);
motor.FastTick();
if (motor.speed() == 1.0) {
reached_max_speed = true;
} else if (reached_max_speed && slowdown_step == 1000) {
slowdown_step = motor.StepsRemaining();
}
}
EXPECT_EQ(motor.StepsRemaining(), 0);
EXPECT_EQ(slowdown_step, 49);
EXPECT_LT(motor.speed(), 0.1);
EXPECT_TRUE(reached_max_speed);
}
TEST(MotorTest, TareWhileMovingStop) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.01;
move_proto.absolute_steps = 90;
motor.Update(move_proto);
}
while (motor.StepsRemaining() > 40) {
motor.FastTick();
EXPECT_LT(motor.speed(), 1.0);
EXPECT_GT(motor.speed(), 0.0);
}
motor.Tare(90); // Still trying to get to 90 in absolute coords.
EXPECT_EQ(motor.StepsRemaining(), 0);
motor.FastTick();
EXPECT_EQ(motor.StepsRemaining(), 0);
EXPECT_EQ(motor.speed(), 0.0);
}
TEST(MotorTest, TareReverse) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.01;
move_proto.absolute_steps = 90;
motor.Update(move_proto);
}
EXPECT_TRUE(motor.Direction());
while (motor.StepsRemaining() > 40) {
motor.FastTick();
EXPECT_LT(motor.speed(), 1.0);
EXPECT_GT(motor.speed(), 0.0);
}
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), true);
motor.Tare(100);
EXPECT_EQ(arduino.testGetPinOutput(DIR_PIN), false);
EXPECT_FALSE(motor.Direction());
EXPECT_EQ(motor.StepsRemaining(), 10);
while (motor.StepsRemaining() > 0) {
motor.FastTick();
EXPECT_LT(motor.speed(), 1.0);
EXPECT_GT(motor.speed(), 0.0);
}
EXPECT_LT(motor.speed(), 0.2);
}
TEST(MotorTest, DisableAfterMoving) {
tensixty::FakeArduino arduino;
Motor motor;
InitMotor(arduino, &motor);
Configure(&motor);
EXPECT_EQ(arduino.testGetPinOutput(ENABLE_PIN), true);
{
MotorMoveProto move_proto;
move_proto.max_speed = 1.0;
move_proto.min_speed = 0.0;
move_proto.acceleration = 0.01;
move_proto.absolute_steps = 90;
move_proto.disable_after_moving = true;
motor.Update(move_proto);
}
while (motor.StepsRemaining() > 0) {
EXPECT_EQ(arduino.testGetPinOutput(ENABLE_PIN), false);
motor.FastTick();
EXPECT_GT(motor.speed(), 0.0);
}
motor.FastTick();
EXPECT_EQ(arduino.testGetPinOutput(ENABLE_PIN), true);
}
} // namespace
} // namespace markbot
|
<?php
//get weather data from API
$weatherData = file_get_contents('api.com/weather');
$data = json_decode($weatherData);
//get current temperature
$currentTemperature = $data->current->temperature;
//get forecast
$forecast = $data->forecast;
//display data on web page
echo '<h1>' . $currentTemperature . '°' . '</h1>';
echo '<h2>3 Day Forecast</h2>';
foreach($forecast as $day) {
echo '<h3>' . $day->date . '</h3>';
echo '<p>High: ' . $day->temperature->high . '°' . '</p>';
echo '<p>Low: ' . $day->temperature->low . '°' . '</p>';
}
?>
|
<gh_stars>0
import { HttpClient, HttpHeaders } from '@angular/common/http';
import { Observable, throwError } from 'rxjs';
import { first, map } from 'rxjs/operators';
import { InjectorInstance } from '../hal-form-client.module';
import { ContentTypeEnum } from './content-type.enum';
import { HTTP_METHODS, HttpMethodEnum } from './http-method.enum';
import { ILink, Link } from './link';
import { Resource } from './resource';
export interface ITemplateProperty {
name: string;
prompt?: string;
readOnly?: boolean;
regex?: string;
required?: boolean;
templated?: boolean;
value?: string;
cols?: number;
rows?: number;
min?: number;
max?: number;
minLength?: number;
maxLength?: number;
placeholder?: string;
step?: number;
type?: string;
options?: ITemplatePropertyOption[];
}
export interface ITemplatePropertyOption {
inline?: string[];
link?: ILink;
maxItems?: number;
minItems?: number;
promptField?: string;
selectedValues?: string[];
valueField?: string;
}
export interface ITemplate {
method?: HttpMethodEnum;
title?: string;
contentType?: ContentTypeEnum;
properties?: ITemplateProperty[];
target?: string;
}
export class Template implements ITemplate {
private httpClient: HttpClient = InjectorInstance.get(HttpClient);
method: HttpMethodEnum;
title?: string;
contentType?: ContentTypeEnum;
properties?: ITemplateProperty[];
target?: string;
targetLink?: Link;
constructor(raw: ITemplate, link?: Link) {
this.method = raw.method || HttpMethodEnum.GET;
this.properties = raw.properties;
if (raw.contentType) {
this.contentType = raw.contentType;
}
if (raw.title) {
this.title = raw.title;
}
if (raw.target) {
this.target = raw.target;
this.targetLink = new Link({
href: raw.target,
templated: raw.target.includes('{') && raw.target.includes('}'),
});
} else if (link) {
this.targetLink = link;
}
}
submit<T = any>(payload?: any, params?: any, observe: 'body' | 'events' | 'response' = 'body'): Observable<T> {
return this.submitToTarget(this.targetLink as any, payload, params, observe || 'body');
}
submitToTarget<T = any>(
link: Link,
body?: any,
params?: any,
observe: 'body' | 'events' | 'response' = 'body',
): Observable<T> {
if (!link?.href?.length) {
return throwError(() => new Error('Invalid link'));
}
if (!HTTP_METHODS.includes(this.method.toUpperCase())) {
return throwError(() => new Error(`Method ${this.method} is not supported`));
}
let headers = new HttpHeaders({ Accept: ContentTypeEnum.APPLICATION_JSON_HAL_FORMS });
if (body && this.contentType !== ContentTypeEnum.MULTIPART_FILE) {
headers = headers.append('Content-Type', this.contentType || ContentTypeEnum.APPLICATION_JSON);
}
const url = link.parseUrl(params || {});
return url
? this.httpClient
.request(this.method, url, {
headers,
body,
observe: observe || 'body',
})
.pipe(
first(),
map((response) => new Resource(response) as any),
)
: throwError(() => new Error(`Un-parsable Url ${link?.href}, ${params}`));
}
public getProperty(name: string): ITemplateProperty | undefined {
return this.properties?.find((prop) => prop.name === name);
}
public setProperty(property: string, key: string, value: any): this {
if (!property || !key) {
return this;
}
const find: any = this.getProperty(property);
if (find) {
find[key] = value;
} else {
if (!this.properties) {
this.properties = [];
}
this.properties.push({ [key]: value } as any);
}
return this;
}
}
|
import { useState } from 'react';
import Head from 'next/head';
import { useFormik } from 'formik';
import axios from 'axios';
import Header from '../components/Header';
import { useCart } from '../contexts/CartContext';
import { formatPrice } from '../util/format';
function Cart() {
const [orderStatus, setOrderStatus] = useState('pre-order');
const [qrcode, setQrcode] = useState(null);
const { cart, removeFromCart, updateProductQuantity } = useCart();
const form = useFormik({
initialValues: {
cpf: '',
name: '',
phone: ''
},
onSubmit: async (values) => {
const items = Object.keys(cart).map((item) => {
const formattedItem = {
quantity: cart[item].quantity,
price: cart[item].data.price,
name: cart[item].data.name,
};
return formattedItem;
});
const order = { ...values, items: [...items] };
setOrderStatus('ordering');
const { data } = await axios.post('http://localhost:3001/create-order', order);
setQrcode(data.qrcode);
setOrderStatus('order-received');
}
});
function handleQuantityChange(productId, newQuantity) {
updateProductQuantity(productId, newQuantity);
}
const itemsCount = Object.keys(cart).reduce((previousValue, currentValue) => {
return previousValue + cart[currentValue].quantity;
}, 0);
const itemsTotal = Object.keys(cart).reduce((previousValue, currentValue) => {
return previousValue + cart[currentValue].quantity * cart[currentValue].data.price;
}, 0);
return (
<>
<Head>
<title>SocksShop | Cart</title>
</Head>
<Header />
<div className="flex justify-center my-6">
<div className="flex flex-col w-full p-8 text-gray-800 bg-white shadow-lg pin-r pin-y md:w-4/5 lg:w-4/5">
<div className="flex-1">
<table className="w-full text-sm lg:text-base" cellSpacing="0">
<thead>
<tr className="h-12 uppercase">
<th className="hidden md:table-cell"></th>
<th className="text-left">Product</th>
<th className="lg:text-right text-left pl-5 lg:pl-0">
<span className="lg:hidden" title="Quantity">Qtd</span>
<span className="hidden lg:inline">Quantity</span>
</th>
<th className="hidden text-right md:table-cell">Unit price</th>
<th className="text-right">Total price</th>
</tr>
</thead>
<tbody>
{Object.keys(cart).map(key => {
const product = cart[key];
return (
<tr key={key}>
<td className="hidden pb-4 md:table-cell">
<a href="#">
<img src={product.data.featured_image.url} alt={product.data.featured_image?.alt} className="w-20 rounded" />
</a>
</td>
<td>
<p className="mb-2 md:ml-4">{product.data.name}</p>
<button
type="button"
className="text-gray-700 md:ml-4"
onClick={() => removeFromCart(product.id)}
>
<small>(Remove item)</small>
</button>
</td>
<td className="justify-center md:justify-end md:flex mt-6">
<div className="w-20 h-10">
<div className="relative flex flex-row w-full h-8">
<input
type="number"
defaultValue={product.quantity}
onChange={event => handleQuantityChange(product.id, event.target.value)}
className="w-full font-semibold text-center text-gray-700 bg-gray-200 outline-none focus:outline-none hover:text-black focus:text-black"
/>
</div>
</div>
</td>
<td className="hidden text-right md:table-cell">
<span className="text-sm lg:text-base font-medium">
{formatPrice(product.data.price)}
</span>
</td>
<td className="text-right">
<span className="text-sm lg:text-base font-medium">
{formatPrice(product.data.price * product.quantity)}
</span>
</td>
</tr>
);
})}
</tbody>
</table>
<hr className="pb-6 mt-6" />
<div className="my-4 mt-6 -mx-2 lg:flex">
<div className="lg:px-2 lg:w-1/2">
<div className="p-4 bg-gray-100 rounded-full">
<h1 className="ml-2 font-bold uppercase">Personal Data</h1>
</div>
<div className="p-4">
<div className="justify-center">
{ orderStatus === 'pre-order' && (
<form onSubmit={form.handleSubmit}>
<p className="mb-4 italic">Please, enter your details below to proceed.</p>
<div className="flex items-center w-full h-13 pl-3">
<label className="w-1/4">Name</label>
<input
type="text"
name="name"
id="name"
placeholder="Your name"
value={form.values.name}
onChange={form.handleChange}
className="my-2 p-4 w-3/4 bg-gray-100 outline-none appearance-none focus:outline-none active:outline-none bg-white border rounded-full"
/>
</div>
<div className="flex items-center w-full h-13 pl-3">
<label className="w-1/4">CPF</label>
<input
type="text"
name="cpf"
id="cpf"
placeholder="Your CPF number"
value={form.values.cpf}
onChange={form.handleChange}
className="my-2 p-4 w-3/4 bg-gray-100 outline-none appearance-none focus:outline-none active:outline-none bg-white border rounded-full"
/>
</div>
<div className="flex items-center w-full h-13 pl-3">
<label className="w-1/4">Whatsapp</label>
<input
type="text"
name="phone"
id="phone"
placeholder="Your whatsapp"
value={form.values.phone}
onChange={form.handleChange}
className="my-2 p-4 w-3/4 bg-gray-100 outline-none appearance-none focus:outline-none active:outline-none bg-white border rounded-full"
/>
</div>
<button type="submit" className="flex justify-center w-full px-10 py-3 mt-6 font-medium text-white uppercase bg-gray-800 rounded-full shadow item-center hover:bg-gray-700 focus:shadow-outline focus:outline-none">
<svg aria-hidden="true" data-prefix="far" data-icon="credit-card" className="w-8" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><path fill="currentColor" d="M527.9 32H48.1C21.5 32 0 53.5 0 80v352c0 26.5 21.5 48 48.1 48h479.8c26.6 0 48.1-21.5 48.1-48V80c0-26.5-21.5-48-48.1-48zM54.1 80h467.8c3.3 0 6 2.7 6 6v42H48.1V86c0-3.3 2.7-6 6-6zm467.8 352H54.1c-3.3 0-6-2.7-6-6V256h479.8v170c0 3.3-2.7 6-6 6zM192 332v40c0 6.6-5.4 12-12 12h-72c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h72c6.6 0 12 5.4 12 12zm192 0v40c0 6.6-5.4 12-12 12H236c-6.6 0-12-5.4-12-12v-40c0-6.6 5.4-12 12-12h136c6.6 0 12 5.4 12 12z"/></svg>
<span className="ml-2 mt-5px">Procceed to checkout</span>
</button>
</form>
)}
{orderStatus === 'ordering' && (
<p>Order being placed. Wait...</p>
)}
{orderStatus === 'order-received' && (
<>
<p>Make the payment with the QRCode below</p>
<img src={qrcode.imagemQrcode} />
</>
)}
</div>
</div>
</div>
<div className="lg:px-2 lg:w-1/2">
<div className="p-4 bg-gray-100 rounded-full">
<h1 className="ml-2 font-bold uppercase">Order Details</h1>
</div>
<div className="p-4">
<p className="mb-6 italic">Shipping and additionnal costs are calculated based on values you have entered</p>
<div className="flex justify-between border-b">
<div className="lg:px-4 lg:py-2 m-2 text-lg lg:text-xl font-bold text-center text-gray-800">
Socks Quantity
</div>
<div className="lg:px-4 lg:py-2 m-2 lg:text-lg font-bold text-center text-gray-900">
{itemsCount} un.
</div>
</div>
<div className="flex justify-between pt-4 border-b">
<div className="lg:px-4 lg:py-2 m-2 text-lg lg:text-xl font-bold text-center text-gray-800">
Total
</div>
<div className="lg:px-4 lg:py-2 m-2 lg:text-lg font-bold text-center text-gray-900">
{formatPrice(itemsTotal)}
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</>
)
}
export default Cart;
|
<filename>src/grapher.h
#pragma once
#ifndef _GRAPHER_h
#define _GRAPHER_h
#if defined(ARDUINO) && ARDUINO >= 100
#include "arduino.h"
#else
#include "WProgram.h"
#endif
#include "global.h"
#include "funktionBuffer.h"
#include "settings.h"
#include "graph.h"
#include "funktion.h"
#include "ILI9341_t3.h"
#include <math.h>
class GrapherClass
{
protected:
__int8_t _pageAt = 0;
funktion _funktions[5];
char _strings[5][512];
__int16_t _stringSize[5] = { 0 };
__int16_t _displatStart[5] = { 0 };
__int16_t _pointAt = 0;
__int16_t _pointInString = 0;
__int16_t _scaleMode = 0;
__int8_t _selected = 0;
float _x = 0;
float _y = 0;
float _scaleX = 2;
float _scaleY = 2 * ASPECT_RATIO;
float _xw = 0;
float _yw = 0;
float _scaleXw = 2;
float _scaleYw = 2 * ASPECT_RATIO;
float _xSelect = 0;
float _ySelect = 0;
float _movement = 0.1;
bool _select = false;
bool _canidateForSeclect[5];
bool _graphChange = true;
public:
void init();
void display();
void moveUp();
void moveDown();
void moveRight();
void moveLeft();
void insertChar(char c);
void enter();
void funktions();
void delet();
void deletAll();
};
extern GrapherClass Grapher;
#endif
|
(function (factory) {
'use strict';
/* global define:false */
if (typeof define !== 'undefined' && define.amd) {
define(['jquery'], factory);
} else if (typeof module === 'object' && module.exports) {
module.exports = factory(require('jquery'));
} else {
factory(window.jQuery);
}
})(function ($) {
'use strict';
$(function () {
$('.palmtree-form').each(function () {
$(this).find('.g-recaptcha-autoload').palmtreeRecaptcha(this);
});
});
var pluginName = 'palmtreeRecaptcha';
/**
*
* @param {HTMLElement} element
* @param {HTMLElement} form
* @constructor
*/
function Plugin(element, form) {
this.$el = $(element);
this.$form = $(form);
var _this = this;
window[this.$el.data('onload')] = function () {
var widgetId = window.grecaptcha.render(_this.$el.attr('id'), {
sitekey: _this.$el.data('site_key'),
callback: function (response) {
var $formControl = $('#' + _this.$el.data('form_control'));
$formControl.val(response);
if (_this.$form.palmtreeForm('isInitialized')) {
_this.$form.palmtreeForm('clearState', $formControl);
}
}
});
_this.$form.on('error.palmtreeForm success.palmtreeForm', function () {
window.grecaptcha.reset(widgetId);
});
};
$.getScript(this.$el.data('script_url'));
}
$.fn[pluginName] = function (form) {
return this.each(function () {
if (!$(this).data(pluginName)) {
$(this).data(pluginName, new Plugin(this, form));
}
});
};
return $.fn[pluginName];
});
|
package com.ice.restring;
import android.content.res.Resources;
import android.content.res.XmlResourceParser;
import android.support.design.widget.BottomNavigationView;
import android.util.AttributeSet;
import android.util.Pair;
import android.util.Xml;
import android.view.View;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* A transformer which transforms BottomNavigationView: it transforms the texts coming from the menu.
*/
class BottomNavigationViewTransformer implements ViewTransformerManager.Transformer {
private static final String ATTRIBUTE_MENU = "menu";
private static final String ATTRIBUTE_APP_MENU = "app:menu";
private static final String ATTRIBUTE_ID = "id";
private static final String ATTRIBUTE_ANDROID_ID = "android:id";
private static final String ATTRIBUTE_TITLE = "title";
private static final String ATTRIBUTE_ANDROID_TITLE = "android:title";
private static final String ATTRIBUTE_TITLE_CONDENSED = "titleCondensed";
private static final String ATTRIBUTE_ANDROID_TITLE_CONDENSED = "android:titleCondensed";
private static final String XML_MENU = "menu";
private static final String XML_ITEM = "item";
@Override
public Class<? extends View> getViewType() {
return BottomNavigationView.class;
}
@Override
public View transform(View view, AttributeSet attrs) {
if (view == null || !getViewType().isInstance(view)) {
return view;
}
Resources resources = view.getContext().getResources();
BottomNavigationView bottomNavigationView = (BottomNavigationView) view;
for (int index = 0; index < attrs.getAttributeCount(); index++) {
String attributeName = attrs.getAttributeName(index);
switch (attributeName) {
case ATTRIBUTE_APP_MENU:
case ATTRIBUTE_MENU: {
String value = attrs.getAttributeValue(index);
if (value == null || !value.startsWith("@")) break;
int resId = attrs.getAttributeResourceValue(index, 0);
Map<Integer, MenuItemStrings> itemStrings = getMenuItemsStrings(resources, resId);
for (Map.Entry<Integer, MenuItemStrings> entry : itemStrings.entrySet()) {
if (entry.getValue().title != 0) {
bottomNavigationView.getMenu().findItem(entry.getKey()).setTitle(
resources.getString(entry.getValue().title)
);
}
if (entry.getValue().titleCondensed != 0) {
bottomNavigationView.getMenu().findItem(entry.getKey()).setTitleCondensed(
resources.getString(entry.getValue().titleCondensed)
);
}
}
break;
}
}
}
return view;
}
private Map<Integer, MenuItemStrings> getMenuItemsStrings(Resources resources, int resId) {
XmlResourceParser parser = resources.getLayout(resId);
AttributeSet attrs = Xml.asAttributeSet(parser);
try {
return parseMenu(parser, attrs);
} catch (XmlPullParserException | IOException e) {
e.printStackTrace();
return new HashMap<>();
}
}
private Map<Integer, MenuItemStrings> parseMenu(XmlPullParser parser, AttributeSet attrs)
throws XmlPullParserException, IOException {
Map<Integer, MenuItemStrings> menuItems = new HashMap<>();
int eventType = parser.getEventType();
String tagName;
// This loop will skip to the menu start tag
do {
if (eventType == XmlPullParser.START_TAG) {
tagName = parser.getName();
if (tagName.equals(XML_MENU)) {
eventType = parser.next();
break;
}
throw new RuntimeException("Expecting menu, got " + tagName);
}
eventType = parser.next();
} while (eventType != XmlPullParser.END_DOCUMENT);
boolean reachedEndOfMenu = false;
int menuLevel = 0;
while (!reachedEndOfMenu) {
switch (eventType) {
case XmlPullParser.START_TAG:
tagName = parser.getName();
if (tagName.equals(XML_ITEM)) {
Pair<Integer, MenuItemStrings> item = parseMenuItem(attrs);
if (item != null) {
menuItems.put(item.first, item.second);
}
} else if (tagName.equals(XML_MENU)) {
menuLevel++;
}
break;
case XmlPullParser.END_TAG:
tagName = parser.getName();
if (tagName.equals(XML_MENU)) {
menuLevel--;
if (menuLevel <= 0) {
reachedEndOfMenu = true;
}
}
break;
case XmlPullParser.END_DOCUMENT:
reachedEndOfMenu = true;
}
eventType = parser.next();
}
return menuItems;
}
private Pair<Integer, MenuItemStrings> parseMenuItem(AttributeSet attrs) {
int menuId = 0;
MenuItemStrings menuItemStrings = null;
int attributeCount = attrs.getAttributeCount();
for (int index = 0; index < attributeCount; index++) {
switch (attrs.getAttributeName(index)) {
case ATTRIBUTE_ANDROID_ID:
case ATTRIBUTE_ID: {
menuId = attrs.getAttributeResourceValue(index, 0);
break;
}
case ATTRIBUTE_ANDROID_TITLE:
case ATTRIBUTE_TITLE: {
String value = attrs.getAttributeValue(index);
if (value == null || !value.startsWith("@")) break;
if (menuItemStrings == null) {
menuItemStrings = new MenuItemStrings();
}
menuItemStrings.title = attrs.getAttributeResourceValue(index, 0);
break;
}
case ATTRIBUTE_ANDROID_TITLE_CONDENSED:
case ATTRIBUTE_TITLE_CONDENSED: {
String value = attrs.getAttributeValue(index);
if (value == null || !value.startsWith("@")) break;
if (menuItemStrings == null) {
menuItemStrings = new MenuItemStrings();
}
menuItemStrings.titleCondensed = attrs.getAttributeResourceValue(index, 0);
break;
}
}
}
return (menuId != 0 && menuItemStrings != null)
? new Pair<>(menuId, menuItemStrings)
: null;
}
private static class MenuItemStrings {
public int title;
public int titleCondensed;
}
}
|
<filename>src/components/ScrollWrapper.js<gh_stars>0
import React from 'react'
import styled from 'styled-components'
import Menu from './Menu'
export const ScrollArea = styled.main`
height: 100vh;
max-width: 2000px;
margin: 0 auto;
width: 100vw;
overflow-x: hidden;
overflow-y: scroll;
-webkit-overflow-scrolling: touch;
position: relative;
padding: 36px;
@media (max-width: 600px) {
padding: 15px;
}
`
class ScrollWrapper extends React.Component {
constructor(props) {
super(props)
this.state = {
pagetop: true
}
}
handleScroll = e => {
const top = e.target.scrollTop > 36
if (top) {
this.setState({
pagetop: false,
})
} else {
this.setState({
pagetop: true,
})
}
}
render() {
const { children } = this.props
const anger = this.props.homePage
return (
<ScrollArea onScroll={this.handleScroll}>
<Menu id="navBar" pagetop={this.state.pagetop} darknav={this.props.darknav} homePage={anger}/>
{children}
</ScrollArea>
)
}
}
export default ScrollWrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.