text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
)
// 349. 两个数组的交集
// 给定两个数组,编写一个函数来计算它们的交集。
// https://leetcode-cn.com/problems/intersection-of-two-arrays/
func main() {
nums1 := []int{1, 2, 2, 1}
nums2 := []int{2, 2}
fmt.Println(intersection(nums1, nums2))
}
// 法一:暴力法,O(m*n),m和n分别是nums1和nums2的长度
// 法二:使用一个额外的map进行统计,O(n)
func intersection(nums1 []int, nums2 []int) (result []int) {
m := make(map[int]int, len(nums1))
for _, num := range nums1 {
m[num]++
}
for _, num := range nums2 {
if _, exist := m[num]; exist {
result = append(result, num)
delete(m, num)
}
}
return result
}
|
package mapreduce
import (
"bufio"
"encoding/json"
"io"
"os"
"sort"
)
func doReduce(
jobName string, // the name of the whole MapReduce job
reduceTask int, // which reduce task this is
outFile string, // write the output here
nMap int, // the number of map tasks that were run ("M" in the paper)
reduceF func(key string, values []string) string,
) {
//
// doReduce manages one reduce task: it should read the intermediate
// files for the task, sort the intermediate key/value pairs by key,
// call the user-defined reduce function (reduceF) for each key, and
// write reduceF's output to disk.
//
// You'll need to read one intermediate file from each map task;
// reduceName(jobName, m, reduceTask) yields the file
// name from map task m.
//
// Your doMap() encoded the key/value pairs in the intermediate
// files, so you will need to decode them. If you used JSON, you can
// read and decode by creating a decoder and repeatedly calling
// .Decode(&kv) on it until it returns an error.
//
// You may find the first example in the golang sort package
// documentation useful.
//
// reduceF() is the application's reduce function. You should
// call it once per distinct key, with a slice of all the values
// for that key. reduceF() returns the reduced value for that key.
//
// You should write the reduce output as JSON encoded KeyValue
// objects to the file named outFile. We require you to use JSON
// because that is what the merger than combines the output
// from all the reduce tasks expects. There is nothing special about
// JSON -- it is just the marshalling format we chose to use. Your
// output code will look something like this:
//
// enc := json.NewEncoder(file)
// for key := ... {
// enc.Encode(KeyValue{key, reduceF(...)})
// }
// file.Close()
//
// Your code here (Part I).
//
var keyValues []KeyValue
for i := 0; i < nMap; i++ {
interFile := reduceName(jobName, i, reduceTask)
file, ferr := os.Open(interFile)
if ferr != nil {
panic(ferr)
}
defer file.Close()
br := bufio.NewReader(file)
for {
line, _, next := br.ReadLine()
var tmp KeyValue
json.Unmarshal(line, &tmp)
if next == io.EOF {
break
}
keyValues = append(keyValues, tmp)
}
}
sort.Slice(keyValues, func(i, j int) bool {
return keyValues[i].Key < keyValues[j].Key
})
oFile, _ := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0666)
defer oFile.Close()
enc := json.NewEncoder(oFile)
tmp := keyValues[0].Key
var values []string
for _, keyValue := range keyValues {
if keyValue.Key == tmp {
values = append(values, keyValue.Value)
} else {
enc.Encode(KeyValue{tmp, reduceF(tmp, values)})
values = []string{keyValue.Value}
tmp = keyValue.Key
}
}
enc.Encode(KeyValue{tmp, reduceF(tmp, values)})
}
|
package mqtt
import (
"io"
"fmt"
"github.com/eclipse/paho.mqtt.golang"
"mqtt-adapter/src/config"
)
const qos = 0
// Subscriber is an interface that describes behavior of a subscriber to MQTT
type Subscriber interface {
Subscribe(topic string, writer io.Writer)
SubscribeBridge(topic string, msgChan chan<- string)
Disconnect()
}
// Publisher is an interface that describes behavior of a publisher to MQTT
type Publisher interface {
Publish(msg string) error
Disconnect()
}
// Message represent model of MQTT message
type Message struct {
Topic string `json:"topic"`
}
func newClient(broker, clientID string, credo config.Credentials) (mqtt.Client, error) {
opts := mqtt.NewClientOptions()
opts.AddBroker(broker)
opts.SetCleanSession(true)
opts.SetClientID(clientID)
if credo.UserName != "" || credo.Password != "" {
opts.SetUsername(credo.UserName)
opts.SetPassword(credo.Password)
}
client := mqtt.NewClient(opts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
return nil, fmt.Errorf("cannot connect to MQTT broker (%s): %v", broker, token.Error())
}
return client, nil
}
// NewMQTTClients creates and initializes publisher and listener
func NewMQTTClients(conf *config.Configuration) (pub Publisher, sub Subscriber, err error) {
var clS, clP mqtt.Client
listClientID := fmt.Sprintf("%s_%s_%s_lis", conf.Name, conf.Host, conf.UUID)
clS, err = newClient(conf.MQTTListenerURL, listClientID, conf.ListCredo)
if err != nil {
return nil, nil, err
}
if conf.Same {
sub = &subscriber{client: clS}
pub = &publisher{client: clS}
return pub, sub, nil
}
pubClientID := fmt.Sprintf("%s_%s_%s_pub", conf.Name, conf.Host, conf.UUID)
clP, err = newClient(conf.MQTTPublisherURL, pubClientID, conf.PubCredo)
if err != nil {
return nil, nil, err
}
sub = &subscriber{client: clS}
pub = &publisher{client: clP}
return pub, sub, nil
}
|
package agent
import (
"log"
"strconv"
"time"
"github.com/google/uuid"
zmq "github.com/pebbe/zmq4"
"google.golang.org/protobuf/proto"
"github.com/Project-Auxo/Olympus/pkg/mdapi"
"github.com/Project-Auxo/Olympus/pkg/util"
mdapi_pb "github.com/Project-Auxo/Olympus/proto/mdapi"
)
/*
broker
^
| (Broker Interface)
(dealer)
____coordinator____
| (router) | (Worker Interface)
| | |
(dealer) ... ...
worker worker worker
*/
const (
heartbeatLiveness = 3
)
const (
workersEndpoint = "inproc://workers"
)
type Coordinator struct {
actorName string
brokerSocket *zmq.Socket // Interface with the broker.
workerSocket *zmq.Socket // Interface with the internal workers.
broker string // Coordinator connects to broker through this endpoint.
endpoint string // Coordinator binds to this endpoint.
poller *zmq.Poller
runningWorkers map[string]*Worker
verbose bool // Print activity to stdout
// services map[string]*Worker // Hash of current running services.
loadableServices []string
// Heartbeat management.
heartbeatAt time.Time // When to send heartbeat.
liveness int // How many attempts left.
heartbeat time.Duration // Heartbeat delay, msecs.
reconnect time.Duration // Reconnect delay, msecs.
}
// NewCoordinator is the constructor for the coordinator.
func NewCoordinator(
actorName string,
broker string,
endpoint string,
loadableServices []string,
verbose bool) (coordinator *Coordinator, err error) {
// Initialize broker state.
coordinator = &Coordinator{
actorName: actorName,
broker: broker,
endpoint: endpoint,
poller: zmq.NewPoller(),
loadableServices: loadableServices,
runningWorkers: make(map[string]*Worker),
heartbeat: 1000 * time.Millisecond,
reconnect: 1000 * time.Millisecond,
verbose: verbose,
}
coordinator.ConnectToBroker() // Sets up brokerSocket.
coordinator.workerSocket, err = zmq.NewSocket(zmq.DEALER)
coordinator.poller.Add(coordinator.brokerSocket, zmq.POLLIN)
coordinator.poller.Add(coordinator.workerSocket, zmq.POLLIN)
return
}
// Binds will bind the coordinator instance to an endpoint.
func (coordinator *Coordinator) Bind(endpoint string) (err error) {
err = coordinator.workerSocket.Bind(endpoint)
if err != nil {
log.Fatalf("E: coordinator failed to bind at %s", endpoint)
}
log.Printf("C: Coordinator is active at %s", endpoint)
return
}
func (coordinator *Coordinator) Run() {
for {
polledSockets, err := coordinator.poller.Poll(coordinator.heartbeat)
if err != nil {
panic(err)
}
// Handle the broker and worker sockets in turn.
if len(polledSockets) > 0 {
for _, socket := range polledSockets {
switch s := socket.Socket; s {
case coordinator.brokerSocket:
coordinator.RecvFromBroker()
case coordinator.workerSocket:
coordinator.RecvFromWorkers()
}
}
}else {
coordinator.liveness--
if coordinator.liveness == 0 {
if coordinator.verbose {
log.Println("C: disconnected from broker, retrying...")
}
time.Sleep(coordinator.reconnect)
coordinator.ConnectToBroker()
}
}
// Send heartbeat if it's time.
if time.Now().After(coordinator.heartbeatAt) {
heartbeatProto, _ := coordinator.PackageProto(
mdapi_pb.CommandTypes_HEARTBEAT, []string{}, Args{})
coordinator.SendToEntity(heartbeatProto, mdapi_pb.Entities_BROKER,Args{})
coordinator.heartbeatAt = time.Now().Add(coordinator.heartbeat)
}
}
}
func (coordinator *Coordinator) Close() {
if coordinator.brokerSocket != nil {
coordinator.brokerSocket.Close()
coordinator.brokerSocket = nil
}
}
// ----------------- Broker Interface ------------------------
// PackageProto will marshal the given information into the correct bytes
// package.
func (coordinator *Coordinator) PackageProto(
commandType mdapi_pb.CommandTypes, msg []string,
args Args) (msgProto *mdapi_pb.WrapperCommand, err error){
msgProto = &mdapi_pb.WrapperCommand{
Header: &mdapi_pb.Header{
Type: commandType,
Entity: mdapi_pb.Entities_ACTOR,
Origin: coordinator.actorName,
Address: "Nil",
},
}
switch commandType {
case mdapi_pb.CommandTypes_READY:
msgProto.Command = &mdapi_pb.WrapperCommand_Ready{
Ready: &mdapi_pb.Ready{AvailableServices: coordinator.loadableServices},
}
case mdapi_pb.CommandTypes_REQUEST:
serviceName := args.ServiceName
msgProto.Command = &mdapi_pb.WrapperCommand_Request{
Request: &mdapi_pb.Request{
ServiceName: serviceName,
RequestBody: &mdapi_pb.Request_Body{Body: &mdapi_pb.Body{Body: msg},},
},
}
case mdapi_pb.CommandTypes_REPLY:
serviceName := args.ServiceName
replyAddress := args.ReplyAddress
msgProto.Command = &mdapi_pb.WrapperCommand_Reply{
Reply: &mdapi_pb.Reply{
ServiceName: serviceName,
ReplyAddress: replyAddress,
ReplyBody: &mdapi_pb.Reply_Body{Body: &mdapi_pb.Body{Body: msg},},
},
}
case mdapi_pb.CommandTypes_HEARTBEAT:
msgProto.Command = &mdapi_pb.WrapperCommand_Heartbeat{
Heartbeat: &mdapi_pb.Heartbeat{
AvailableServices: coordinator.loadableServices,
},
}
case mdapi_pb.CommandTypes_DISCONNECT:
// FIXME: Insert the expiration time here.
msgProto.Command = &mdapi_pb.WrapperCommand_Disconnect{}
default:
log.Fatalf("E: uknown commandType %q", commandType)
}
return
}
// ForwardProto will forward a mdapi_pb.WrapperCommand while maintaining the
// broker's header.
func (coordinator *Coordinator) ForwardProto(
msgProto *mdapi_pb.WrapperCommand) (forwadedProto *mdapi_pb.ForwardedCommand) {
forwadedProto = &mdapi_pb.ForwardedCommand{
Header: &mdapi_pb.Header{
Type: mdapi_pb.CommandTypes_FORWARDED,
Entity: mdapi_pb.Entities_ACTOR,
Origin: coordinator.actorName,
Address: coordinator.actorName,
},
}
forwadedProto.ForwardedCommand = msgProto
return
}
// SendToEntity sends a message to the specified entity.
func (coordinator *Coordinator) SendToEntity(msgProto *mdapi_pb.WrapperCommand,
entity mdapi_pb.Entities, args Args) (err error) {
commandType := msgProto.GetHeader().GetType()
var msgBytes []byte
if args.Forward {
msgBytes, err = proto.Marshal(coordinator.ForwardProto(msgProto))
}else {
msgBytes, err = proto.Marshal(msgProto)
}
if err != nil {
panic(err)
}
if coordinator.verbose {
forwardMap := map[bool]string{false: "sending", true: "forwarding"}
log.Printf("C: %s %s to %s\n", forwardMap[args.Forward], mdapi.
CommandMap[commandType], mdapi.EntitiesMap[entity])
}
switch entity {
case mdapi_pb.Entities_BROKER:
// Messages to workers can be forwaded, so include as option in
// SendMessage.
_, err = coordinator.brokerSocket.SendMessage(
util.Btou(args.Forward), msgBytes)
case mdapi_pb.Entities_WORKER:
// Messages to workers can be forwaded, so include as option in
// SendMessage.
byteId, _ := args.WorkerIdentity.MarshalBinary()
_, err = coordinator.workerSocket.SendMessage(
byteId, util.Btou(args.Forward), msgBytes)
default:
log.Fatal("E: unrecognized entity")
}
return
}
// ConnectToBroker attempts to connect or reconnect to the broker.
func (coordinator *Coordinator) ConnectToBroker() (err error) {
if coordinator.brokerSocket != nil {
coordinator.brokerSocket.Close()
coordinator.brokerSocket = nil
}
coordinator.brokerSocket, _ = zmq.NewSocket(zmq.DEALER)
coordinator.brokerSocket.Connect(coordinator.broker)
if coordinator.verbose {
log.Printf("C: connecting to broker at %s...\n", coordinator.broker)
}
coordinator.poller = zmq.NewPoller()
coordinator.poller.Add(coordinator.brokerSocket, zmq.POLLIN)
// Register coordinator with the broker.
readyProto, err := coordinator.PackageProto(
mdapi_pb.CommandTypes_READY, []string{}, Args{})
coordinator.SendToEntity(readyProto, mdapi_pb.Entities_BROKER, Args{})
// If liveness hits zero, queue is considered disconnected.
coordinator.liveness = heartbeatLiveness
coordinator.heartbeatAt = time.Now().Add(coordinator.heartbeat)
return
}
// RecvFromBroker waits for the next request.
func (coordinator *Coordinator) RecvFromBroker() {
var msgProto *mdapi_pb.WrapperCommand
recvBytes, err := coordinator.brokerSocket.RecvMessageBytes(0)
forwarded, _ := strconv.Atoi(string(recvBytes[0]))
if err != nil {
panic(err)
}
coordinator.liveness = heartbeatLiveness
var fromEntity mdapi_pb.Entities
// Try unmarshalling as WrapperCommand first, if error, try unmarshaling
// as ForwardedCommand. The resultant msgProto will always be
// WrapperCommand, i.e. will never receive forwaded-forwaded message.
if forwarded == 1 {
forwardedProto := &mdapi_pb.ForwardedCommand{}
if err = proto.Unmarshal(recvBytes[1], forwardedProto); err != nil {
log.Fatalln("E: failed to parse forwarded command:", err)
}
msgProto = forwardedProto.GetForwardedCommand()
fromEntity = forwardedProto.GetHeader().GetEntity()
}else {
msgProto = &mdapi_pb.WrapperCommand{}
if proto.Unmarshal(recvBytes[1], msgProto); err != nil {
log.Fatalln("E: failed to parse for wrapped command:", err)
}
fromEntity = msgProto.GetHeader().GetEntity()
}
if coordinator.verbose {
log.Printf("C: received message from broker: %q\n", msgProto)
}
// Don't try to handle errors, just assert noisily.
if fromEntity != mdapi_pb.Entities_BROKER {
panic("E: received message is not from a broker.")
}
command := msgProto.GetHeader().GetType()
switch command {
case mdapi_pb.CommandTypes_REQUEST:
// We have a request to process.
coordinator.DispatchRequests(msgProto)
case mdapi_pb.CommandTypes_HEARTBEAT:
// Do nothing on heartbeats.
case mdapi_pb.CommandTypes_DISCONNECT:
// FIXME: Not sure if the disconnect should correspond to reconnect.
coordinator.ConnectToBroker()
default:
log.Printf("E: invalid input message %q\n", command)
}
}
// ----------------- Worker Interface ------------------------
func (coordinator *Coordinator) DispatchRequests(
requestProto *mdapi_pb.WrapperCommand) {
serviceName := requestProto.GetRequest().GetServiceName()
id := uuid.New() /* TODO: Make this a channel so id can be made in the
spawn worker function. */
go coordinator.SpawnWorker(id)
// Forward the requestProto to the worker that was just spawned.
// Forward the request to the worker.
coordinator.SendToEntity(requestProto, mdapi_pb.Entities_WORKER, Args{
ServiceName: serviceName, WorkerIdentity: id, Forward: true,})
}
func (coordinator *Coordinator) SpawnWorker(id uuid.UUID) {
stringId := id.String()
worker, _ := NewWorker(
id, workersEndpoint, coordinator.verbose, stringId)
// Coordinator waits for worker to register itself.
// coordinator.workerSocket.RecvMessageBytes(0)
coordinator.runningWorkers[stringId] = worker
worker.Work()
}
func (coordinator *Coordinator) KillWorker(id uuid.UUID) {
stringId := id.String()
_, ok := coordinator.runningWorkers[stringId]
if !ok {
log.Printf("E: worker %s does not exist", stringId)
return
}
disconnectProto, _ := coordinator.PackageProto(
mdapi_pb.CommandTypes_DISCONNECT, []string{}, Args{})
coordinator.SendToEntity(
disconnectProto, mdapi_pb.Entities_WORKER, Args{WorkerIdentity: id})
delete(coordinator.runningWorkers, stringId)
}
func (coordinator *Coordinator) RecvFromWorkers() {
recvBytes, _ := coordinator.workerSocket.RecvMessageBytes(0)
msgProto := &mdapi_pb.WrapperCommand{}
if err := proto.Unmarshal(recvBytes[0], msgProto); err != nil {
log.Fatalln("E: failed to parse wrapper command:", err)
}
// Don't try to handle errors, just assert noisily.
if msgProto.GetHeader().GetEntity() != mdapi_pb.Entities_WORKER {
panic("E: received message is not from a worker.")
}
if coordinator.verbose {
log.Printf("C: received message from worker: %q\n", msgProto)
}
command := msgProto.GetHeader().GetType()
switch command {
case mdapi_pb.CommandTypes_READY:
// Do nothing on Ready.
case mdapi_pb.CommandTypes_REPLY:
// Work is complete, kill the worker and forward the message back to the
// broker unchanged.
id, _ := uuid.Parse(msgProto.GetHeader().GetOrigin())
coordinator.KillWorker(id)
coordinator.SendToEntity(
msgProto, mdapi_pb.Entities_BROKER, Args{Forward: true})
default:
log.Printf("E: invalid input message %q\n", command)
}
}
|
package leetcode
/*
* LeetCode T1704. 二分查找
* https://leetcode-cn.com/problems/binary-search/
*
* 给定一个 n 个元素有序的(升序)整型数组 nums 和一个目标值 target ,
* 写一个函数搜索 nums 中的 target,如果目标值存在返回下标,否则返回 -1。
*/
// 时间复杂度 O(logN)
func binarySearch(nums []int, target int) int {
l, r := 0, len(nums)-1
// 搜索区间 [0, len(nums)-1],循环结束条件 l > r
// 能覆盖住 nums[l] = nums[r] = target 的情况
for l <= r {
mid := l + (r-l)>>1
if nums[mid] == target {
return mid
}
if nums[mid] > target {
r = mid - 1 // mid 判断过了,所以 mid -1
} else {
l = mid + 1
}
}
return -1
}
// 参考 https://leetcode-cn.com/problems/binary-search/solution/er-fen-cha-zhao-xiang-jie-by-labuladong/
// 二分查找,寻找数组中相同元素的左边界 (不断收缩右边界)
// [1, 2, 2, 2, 3] -> 1
func binarySearchFindLeftBound(nums []int, target int) int {
numsLen := len(nums)
// 异常考虑
if nums[0] > target || nums[numsLen-1] < target {
return -1
}
l, r := 0, numsLen-1
for l <= r {
mid := l + (r-l)>>1
if nums[mid] == target {
r = mid - 1
} else if nums[mid] > target {
r = mid - 1
} else {
l = mid + 1
}
}
return l
}
// 二分查找,寻找数组中相同元素的右边界 (不断收缩左边界)
// [1, 2, 2, 2, 3] -> 3
func binarySearchFindRightBound(nums []int, target int) int {
numsLen := len(nums)
// 异常考虑
if nums[0] > target || nums[numsLen-1] < target {
return -1
}
l, r := 0, numsLen-1
for l <= r {
mid := l + (r-l)>>1
if target == nums[mid] {
l = mid + 1
} else if target > nums[mid] {
l = mid + 1
} else if target < nums[mid] {
r = mid - 1
}
}
return r
}
/*
* LeetCode T33. 搜索旋转排序数组
* https://leetcode-cn.com/problems/search-in-rotated-sorted-array/
*
* 假设按照升序排序的数组在预先未知的某个点上进行了旋转。
* ( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。
*
* 搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。
* 你可以假设数组中不存在重复的元素。
* 你的算法时间复杂度必须是 O(log n) 级别。
*/
func search(nums []int, target int) int {
l, r := 0, len(nums)-1
for l <= r {
mid := l + (r-l)>>1
if nums[mid] == target {
return mid
}
if nums[mid] >= nums[l] { // 左半边有序
if nums[l] <= target && nums[mid] > target { // 包含左边界
r = mid - 1
} else {
l = mid + 1
}
} else { // 右半边有序
if target > nums[mid] && target <= nums[r] { // 包含右边界
l = mid + 1
} else {
r = mid - 1
}
}
}
return -1
}
/*
* LeetCode T81. 搜索旋转排序数组 II
* https://leetcode-cn.com/problems/search-in-rotated-sorted-array-ii/
*/
// 该题跟上一题类似,不过允许数组里有重复元素
func search2(nums []int, target int) bool {
if len(nums) == 0 {
return false
}
l, r := 0, len(nums)-1
for l <= r {
mid := l + (r-l)>>1
if nums[mid] == target {
return true
}
if nums[mid] == nums[l] {
l++
continue
}
if nums[mid] > nums[l] { // 左半边有序
if nums[l] <= target && nums[mid] > target { // 包含左边界
r = mid - 1
} else {
l = mid + 1
}
} else { // 右半边有序
if target > nums[mid] && target <= nums[r] { // 包含右边界
l = mid + 1
} else {
r = mid - 1
}
}
}
return false
}
/*
* LeetCode T153. 寻找旋转排序数组中的最小值
* https://leetcode-cn.com/problems/find-minimum-in-rotated-sorted-array/
*
* 假设按照升序排序的数组在预先未知的某个点上进行了旋转。( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。
* 请找出其中最小的元素。
*
* 你可以假设数组中不存在重复元素。
* 示例 1:
* 输入: [3,4,5,1,2]
* 输出: 1
*/
// 特殊 case:已排序数组(包含单元素数组)
func findMin(nums []int) int {
l, r := 0, len(nums)-1
for l < r { // l < r 是为了规避已排序数组是 mid+1或者mid-1出现数组越界的情况
mid := l + (r-l)>>1
if nums[mid] > nums[mid+1] {
return nums[mid+1]
}
if nums[mid] < nums[mid-1] {
return nums[mid]
}
if nums[mid] > nums[r] {
l = mid + 1
} else {
r = mid - 1
}
}
return nums[l]
}
/*
* LeetCode T154. 寻找旋转排序数组中的最小值 II
* 153 题中,如果数组中有重复元素呢?
*/
// 如下图所示的旋转数组
// o o o
// o
// o
// o o o o o o o
// o o
// 可以发现 153 题是本题的一个特殊情况
// 在最坏情况下,也就是数组中包含相同元素时(nums[mid]==nums[r]),需要逐个遍历元素,复杂度为 O(N)
// 本题题解同样适用于上一题,只是在上一题中遇到特殊情况会提前结束循环
func findMin2(nums []int) int {
l, r := 0, len(nums)-1
// l = r 时退出,即变化点
for l < r {
mid := l + (r-l)>>1
if nums[mid] == nums[r] { // 此时无法知道应该缩减左半边还是右半边,为防止漏掉变化点, r--
r--
// 不能使用 mid 与 l 进行比较,无法区分 1,2,3,4,5,6 这种排序好数组的 case
} else if nums[mid] > nums[r] { // 缩减左半边
l = mid + 1
} else if nums[mid] < nums[r] { // 缩减右半边,mid 可能是变化点,防止漏掉,r = mid
r = mid
}
}
return nums[l]
}
/*
* LeetCode T69. x 的平方根
* https://leetcode-cn.com/problems/sqrtx/
* 实现 int sqrt(int x) 函数。
* 计算并返回 x 的平方根,其中 x 是非负整数。
* 由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。
* 示例:
* 输入: 4
* 输出: 2
* 输入: 8
* 输出: 2
* 说明: 8 的平方根是 2.82842...,
* 由于返回类型是整数,小数部分将被舍去。
*/
func MySqrt(x int) int {
l := 0
r := x
// 搜索区间是 [0, x]
for l <= r {
mid := l + (r-l)>>1
target := mid * mid
if target == x {
return mid
} else if target > x {
r = mid - 1
} else if target < x {
l = mid + 1
}
}
return r // 返回右边界
}
/*
* LeetCode T287. 寻找重复数
* https://leetcode-cn.com/problems/find-the-duplicate-number/
* 给定一个包含 n + 1 个整数的数组 nums,其数字都在 1 到 n 之间(包括 1 和 n),可知至少存在一个重复的整数。
* 假设只有一个重复的整数,找出这个重复的数。
* 示例 1:
* 输入: [1,3,4,2,2]
* 输出: 2
*
* 说明:
* 不能更改原数组(假设数组是只读的)。
* 只能使用额外的 O(1) 的空间。
* 时间复杂度小于 O(n^2) 。
* 数组中只有一个重复的数字,但它可能不止重复出现一次。
*/
// 方法 1:二分法
// 元素范围 [1,n],中位数 mid,根据 mid 与数组中小于等于 mid 的元素数量,判断缩减左半边还是右半边
// 时间复杂度 O(NlogN)--> 循环的复杂度是 O(N)
func FindDuplicate(nums []int) int {
if len(nums) == 0 {
return -1
}
l := 1
r := len(nums) - 1
for l < r {
mid := l + (r-l)>>1
lessCnt := 0
for _, i := range nums {
if i <= mid { // 包含 mid
lessCnt++
}
}
if lessCnt > mid { // 缩减右半边
r = mid // 有可能 mid 是那个重复的数字,所以 r = mid 而不是 mid -1
} else { // 缩减左半边
l = mid + 1
}
}
return l
}
// 方法 2:快慢指针法
// 数组下标n和数nums[n]建立一个映射关系f(n)
// 有重复的数,那么就肯定有多个索引指向同一个数,那么问题就转换成求有环链表的环入口
func FindDuplicate2(nums []int) int {
if len(nums) == 0 {
return -1
}
slow := nums[0]
fast := nums[nums[0]]
for slow != fast {
slow = nums[slow] // 慢指针
fast = nums[nums[fast]] // 快指针
}
curr1 := 0
curr2 := slow
for curr1 != curr2 {
curr1 = nums[curr1]
curr2 = nums[curr2]
}
return curr1
}
// 方法 3:先排序,再找重复的数,不符合约束条件
// 方法 4:借助哈希表,不符合约束条件
/*
* LeetCode T35. 搜索插入位置
* https://leetcode-cn.com/problems/search-insert-position/
* 给定一个排序数组和一个目标值,在数组中找到目标值,并返回其索引。
* 如果目标值不存在于数组中,返回它将会被按顺序插入的位置。
* 你可以假设数组中无重复元素。
* 示例:
* 输入: [1,3,5,6], 5
* 输出: 2
* 输入: [1,3,5,6], 7
* 输出: 4
* 输入: [1,3,5,6], 0
* 输出: 0
*/
func SearchInsert(nums []int, target int) int {
numsLen := len(nums)
if numsLen == 0 {
return 0
}
l := 0
r := numsLen - 1
for l <= r {
mid := l + (r-l)>>1
if target == nums[mid] {
return mid
} else if target > nums[mid] {
l = mid + 1
} else if target < nums[mid] {
r = mid - 1
}
}
// 数组中没有 target 时返回左边界
// case1:数组中的数都小于 target,l = numsLen + 1
// case2:数组中的数都小于 target,l = 0(起始值)
// case3:target 在数组中某两个数之间,l= 第一个大于 target 的数,此时 right = 最后一个小于 target 的数
return l
}
|
package operator
import (
"context"
"fmt"
"time"
operatorv1 "github.com/openshift/api/operator/v1"
operatorclient "github.com/openshift/cluster-dns-operator/pkg/operator/client"
operatorconfig "github.com/openshift/cluster-dns-operator/pkg/operator/config"
operatorcontroller "github.com/openshift/cluster-dns-operator/pkg/operator/controller"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
kconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// Operator is the scaffolding for the dns operator. It sets up dependencies
// and defines the topology of the operator and its managed components, wiring
// them together.
type Operator struct {
manager manager.Manager
caches []cache.Cache
client client.Client
}
// New creates (but does not start) a new operator from configuration.
func New(config operatorconfig.Config) (*Operator, error) {
kubeConfig, err := kconfig.GetConfig()
if err != nil {
return nil, fmt.Errorf("failed to get kube config: %v", err)
}
scheme := operatorclient.GetScheme()
operatorManager, err := manager.New(kubeConfig, manager.Options{
Scheme: scheme,
Namespace: "openshift-dns",
})
if err != nil {
return nil, fmt.Errorf("failed to create operator manager: %v", err)
}
// Create and register the operator controller with the operator manager.
cfg := operatorcontroller.Config{
KubeConfig: kubeConfig,
CoreDNSImage: config.CoreDNSImage,
OpenshiftCLIImage: config.OpenshiftCLIImage,
OperatorReleaseVersion: config.OperatorReleaseVersion,
}
if _, err := operatorcontroller.New(operatorManager, cfg); err != nil {
return nil, fmt.Errorf("failed to create operator controller: %v", err)
}
kubeClient, err := operatorclient.NewClient(kubeConfig)
if err != nil {
return nil, fmt.Errorf("failed to create kube client: %v", err)
}
return &Operator{
manager: operatorManager,
// TODO: These are only needed for the default dns stuff, which
// should be refactored away.
client: kubeClient,
}, nil
}
// Start creates the default DNS and then starts the operator
// synchronously until a message is received on the stop channel.
// TODO: Move the default DNS logic elsewhere.
func (o *Operator) Start(stop <-chan struct{}) error {
// Periodicaly ensure the default controller exists.
go wait.Until(func() {
if err := o.ensureDefaultDNS(); err != nil {
logrus.Errorf("failed to ensure default dns: %v", err)
}
}, 1*time.Minute, stop)
errChan := make(chan error)
// Start the manager.
go func() {
errChan <- o.manager.Start(stop)
}()
// Wait for the manager to exit or a stop signal.
select {
case <-stop:
return nil
case err := <-errChan:
return err
}
}
// ensureDefaultDNS creates the default dns if it doesn't already exist.
func (o *Operator) ensureDefaultDNS() error {
dns := &operatorv1.DNS{
ObjectMeta: metav1.ObjectMeta{
Name: operatorcontroller.DefaultDNSController,
},
}
if err := o.client.Get(context.TODO(), types.NamespacedName{Name: dns.Name}, dns); err != nil {
if !errors.IsNotFound(err) {
return err
}
if err := o.client.Create(context.TODO(), dns); err != nil {
return fmt.Errorf("failed to create default dns: %v", err)
}
logrus.Infof("created default dns: %s", dns.Name)
}
return nil
}
|
package main
// Everything is in terms of "per 100 g" (except for g, which is the amount of
// that thing that you're eating).
////////////////////////////////////////////////////////////////////////////////
type Grams float32
const (
kg = 1000
perPound = 0.220462
)
func (g Grams) Of(f *food) *food {
r := float32(g / f.g)
return &food{
name: f.name,
g: g,
dollars: r * f.dollars,
carbs: r * f.carbs,
protein: r * f.protein,
fat: r * f.fat,
fiber: r * f.fiber,
calories: r * f.calories,
}
}
var water = &food{
name: "water",
g: 1,
dollars: 0,
carbs: 0,
protein: 0,
fat: 0,
fiber: 0,
calories: 0,
}
var lentils = &food{
name: "lentils",
g: 0.1 * kg,
dollars: perPound * 2.39 / 5,
carbs: 20.13,
protein: 9.02,
fat: 0.38,
fiber: 7.9,
calories: 116,
}
var spinach = &food{
name: "spinach",
g: 0.1 * kg,
dollars: perPound * 4.29,
carbs: 3.63,
protein: 2.86,
fat: 0.39,
fiber: 2.2,
calories: 23,
}
var strawberries = &food{
name: "strawberries",
g: 0.1 * kg,
dollars: perPound * 1.67,
carbs: 9.13,
protein: 0.43,
fat: 0.11,
fiber: 2.1,
calories: 35,
}
var almonds = &food{
name: "almonds",
g: 0.1 * kg,
dollars: perPound * 5.63,
carbs: 21.55,
protein: 21.15,
fat: 49.93,
fiber: 12.5,
calories: 579,
}
var hempSeeds = &food{
name: "hemp seeds",
g: 0.1 * kg,
dollars: (13.69 / 793) * 100,
carbs: 8.67,
protein: 31.56,
fat: 48.75,
fiber: 4,
calories: 553,
}
var applesGala = &food{
name: "apples, gala",
g: 0.1 * kg,
dollars: perPound * 2.50,
carbs: 13.68,
protein: 0.25,
fat: 0.12,
fiber: 2.3,
calories: 57,
}
|
// +build !windows
package asyncexec_test
import (
"fmt"
"os/exec"
"github.com/aws-controllers-k8s/dev-tools/pkg/asyncexec"
)
func ExampleCmd_Run_withNoStream() {
cmd := asyncexec.New(exec.Command("echo", "Hello ACK"), 16)
cmd.Run()
cmd.Wait()
fmt.Println(cmd.ExitCode())
// Output: 0
}
func ExampleCmd_Run_withStream() {
cmd := asyncexec.New(exec.Command("echo", "Hello ACK"), 16)
cmd.Run()
done := make(chan struct{})
go func() {
for b := range cmd.StdoutStream() {
fmt.Println(string(b))
}
done <- struct{}{}
}()
go func() {
for b := range cmd.StderrStream() {
fmt.Println(string(b))
}
done <- struct{}{}
}()
defer func() { _, _ = <-done, <-done }()
cmd.Wait()
// Output: Hello ACK
}
|
package cmd_test
import (
"bytes"
"fmt"
"io/ioutil"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/provenance-io/provenance/cmd/provenanced/cmd"
"github.com/provenance-io/provenance/x/metadata/types"
)
func TestAddMetaAddressParser(t *testing.T) {
scopeUUID := uuid.New()
scopeID := types.ScopeMetadataAddress(scopeUUID)
sessionUUID := uuid.New()
sessionID := types.SessionMetadataAddress(scopeUUID, sessionUUID)
recordID := types.RecordMetadataAddress(scopeUUID, "this is a name")
contractSpecUUID := uuid.New()
contractSpecID := types.ContractSpecMetadataAddress(contractSpecUUID)
scopeSpecUUID := uuid.New()
scopeSpecID := types.ScopeSpecMetadataAddress(scopeSpecUUID)
tests := []struct {
name string
addr string
expected string
expectErr bool
}{
{
name: "test not an address",
addr: "not an id",
expected: "",
expectErr: true,
},
{
name: "test scope address",
addr: scopeID.String(),
expected: fmt.Sprintf("Type: Scope\n\nScope UUID: %s\n", scopeUUID),
expectErr: false,
},
{
name: "test session address",
addr: sessionID.String(),
expected: fmt.Sprintf("Type: Session\n\nScope Id: %s\nScope UUID: %s\nSession UUID: %s\n", scopeID, scopeUUID, sessionUUID),
expectErr: false,
},
{
name: "test record address",
addr: recordID.String(),
expected: fmt.Sprintf("Type: Record\n\nScope Id: %s\nScope UUID: %s\n", scopeID, scopeUUID),
expectErr: false,
},
{
name: "test contract spec id",
addr: contractSpecID.String(),
expected: fmt.Sprintf("Type: Contract Specification\n\nContract Specification UUID: %s\n", contractSpecUUID),
expectErr: false,
},
{
name: "test scope specification address",
addr: scopeSpecID.String(),
expected: fmt.Sprintf("Type: Scope Specification\n\nScope Specification UUID: %s\n", scopeSpecUUID),
expectErr: false,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
command := cmd.AddMetaAddressParser()
command.SetArgs([]string{
"parse", tc.addr})
b := bytes.NewBufferString("")
command.SetOut(b)
if tc.expectErr {
require.Error(t, command.Execute())
} else {
require.NoError(t, command.Execute())
out, err := ioutil.ReadAll(b)
require.NoError(t, err)
require.Equal(t, tc.expected, string(out))
}
})
}
}
func TestAddMetaAddressEncoder(t *testing.T) {
scopeUUID := uuid.New()
scopeID := types.ScopeMetadataAddress(scopeUUID)
sessionUUID := uuid.New()
sessionID := types.SessionMetadataAddress(scopeUUID, sessionUUID)
recordName := "this is a name"
recordID := types.RecordMetadataAddress(scopeUUID, recordName)
contractSpecUUID := uuid.New()
contractSpecID := types.ContractSpecMetadataAddress(contractSpecUUID)
scopeSpecUUID := uuid.New()
scopeSpecID := types.ScopeSpecMetadataAddress(scopeSpecUUID)
tests := []struct {
name string
args []string
expected string
expectErr bool
}{
{
name: "test scope address",
args: []string{"encode", "scope", scopeUUID.String()},
expected: scopeID.String(),
expectErr: false,
},
{
name: "test scope address too many args",
args: []string{"encode", "scope", scopeUUID.String(), scopeUUID.String()},
expected: "",
expectErr: true,
},
{
name: "test scope address invalid uuid",
args: []string{"encode", "scope", "not an id"},
expected: "",
expectErr: true,
},
{
name: "test session address",
args: []string{"encode", "session", scopeUUID.String(), sessionUUID.String()},
expected: sessionID.String(),
expectErr: false,
},
{
name: "test session address too few args",
args: []string{"encode", "session", scopeUUID.String()},
expected: "",
expectErr: true,
},
{
name: "test session address too many args",
args: []string{"encode", "session", scopeUUID.String(), sessionUUID.String(), sessionUUID.String()},
expected: "",
expectErr: true,
},
{
name: "test session address invalid first uuid",
args: []string{"encode", "session", "not a uuid", sessionUUID.String()},
expected: "",
expectErr: true,
},
{
name: "test session address invalid second uuid",
args: []string{"encode", "session", scopeUUID.String(), "not a uuid"},
expected: "",
expectErr: true,
},
{
name: "test record address",
args: []string{"encode", "record", scopeUUID.String(), recordName},
expected: recordID.String(),
expectErr: false,
},
{
name: "test record address too few args",
args: []string{"encode", "record", scopeUUID.String()},
expected: "",
expectErr: true,
},
{
name: "test record address too many args",
args: []string{"encode", "record", scopeUUID.String(), recordName, recordName},
expected: "",
expectErr: true,
},
{
name: "test record address invalid uuid",
args: []string{"encode", "record", "not a uuid", recordName},
expected: "",
expectErr: true,
},
{
name: "test contract spec id",
args: []string{"encode", "contract-specification", contractSpecUUID.String()},
expected: contractSpecID.String(),
expectErr: false,
},
{
name: "test contract spec id too many args",
args: []string{"encode", "contract-specification", contractSpecUUID.String(), contractSpecUUID.String()},
expected: "",
expectErr: true,
},
{
name: "test contract spec id invalid uuid",
args: []string{"encode", "contract-specification", "not a uuid"},
expected: "",
expectErr: true,
},
{
name: "test scope specification address",
args: []string{"encode", "scope-specification", scopeSpecUUID.String()},
expected: scopeSpecID.String(),
expectErr: false,
},
{
name: "test scope specification address too many args",
args: []string{"encode", "scope-specification", scopeSpecUUID.String(), scopeSpecUUID.String()},
expected: "",
expectErr: true,
},
{
name: "test scope specification address invalid uuid",
args: []string{"encode", "scope-specification", "not a uuid"},
expected: "",
expectErr: true,
},
{
name: "test scope invalid type",
args: []string{"encode", "invalid type", scopeUUID.String()},
expected: "",
expectErr: true,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
command := cmd.AddMetaAddressEncoder()
command.SetArgs(tc.args)
b := bytes.NewBufferString("")
command.SetOut(b)
if tc.expectErr {
require.Error(t, command.Execute())
} else {
require.NoError(t, command.Execute())
out, err := ioutil.ReadAll(b)
require.NoError(t, err)
require.Equal(t, tc.expected, string(out))
}
})
}
}
|
package cmd
import (
"github.com/spf13/cobra"
"go.uber.org/zap"
"os"
)
var dbType string
var dbHost string
var dbPort int32
var dbUser string
var dbPass string
var dbName string
var dbTable string
var outputFolder string
func init() {
genAPI.Flags().StringVarP(&dbType, "type", "", "mysql", "db type: mysql, postgres")
genAPI.Flags().StringVarP(&dbHost, "host", "", "localhost", "db host")
genAPI.Flags().Int32VarP(&dbPort, "port", "", 3306, "db port")
genAPI.Flags().StringVarP(&dbUser, "user", "", "root", "db user")
genAPI.Flags().StringVarP(&dbPass, "pass", "", "secret", "db pass")
genAPI.Flags().StringVarP(&dbName, "db", "", "db_name", "db name")
genAPI.Flags().StringVarP(&dbTable, "table", "", "table_name", "db name")
genAPI.Flags().StringVarP(&outputFolder, "out", "", "output", "output folder")
rootCmd.AddCommand(genAPI)
}
var rootCmd = &cobra.Command{
Use: "version",
Short: "",
Long: "",
Run: func(cmd *cobra.Command, args []string) {
// Do Stuff Here
},
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
zap.S().Error("Error when execute command, detail: ", err)
os.Exit(0)
}
}
|
package main
const (
// Created indicates the job has just been created
Created = iota
// ReceivingInputs means some inputs (>0) have been received
ReceivingInputs
// AllInputReceived means no further inputs will be received
AllInputReceived
// AllOutputReceived means all outputs have been received
AllOutputReceived
// ErrorState indicates this job is in error, no more interaction should occur
ErrorState
)
|
package algorithm
import (
. "gopkg.in/check.v1"
"testing"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestKMP(c *C) {
k := New("AAADAAA")
next := k.next
res := []int{0, 1, 2, 0, 1, 2, 3}
for i := range res {
c.Assert(next[i], Equals, res[i])
}
}
func (s *MySuite) TestMatch(c *C) {
k := New("ABCDABD")
off, ok := k.Match("BBC ABCDAB ABCDABCDABDE")
if ok {
c.Log("Success!")
} else {
c.Error("Fail!Expect 10(", off, ")")
}
}
|
// cap and pointer.
package main
import "fmt"
func main() {
ss := make([]string, 0, 1)
add := func(str string) {
fmt.Printf("append:%q\n", str)
ss = append(ss, str)
fmt.Printf("pointer:%p, len:%d, cap:%d\n\n", ss, len(ss), cap(ss))
}
fmt.Printf("pointer:%p, len:%d, cap:%d\n\n", ss, len(ss), cap(ss))
add("hello") // cap:1 pointer not changed
add("world") // cap:2 pointer changed
add("foo") // cap:4 pointer changed
add("bar") // cap:4 pointer not changed
}
|
package httpapi
import (
"time"
"github.com/pkg/errors"
)
// Config defines the http configuration.
type Config struct {
ListenAddress string `json:"listen-address" yaml:"listen-address"`
RequestTimeout time.Duration `json:"request-timeout" yaml:"request-timeout"`
TLS *TLSConfig `json:"tls" yaml:"tls"`
EnsureDeployerOrigin bool `json:"ensure-deployer-origin" yaml:"ensure-deployer-origin"`
DeployerMACSecret string `json:"deployer-mac-secret" yaml:"deployer-mac-secret"`
}
// SetDefault sets sane default for monitor's config.
func (c *Config) SetDefault() {
c.ListenAddress = ":8080"
c.RequestTimeout = 3 * time.Second
}
// Validate makes sure config has valid values.
func (c *Config) Validate() error {
if c.ListenAddress == "" {
return errors.New("listen address can't be empty")
}
if c.RequestTimeout < 500*time.Millisecond {
return errors.New("request timeout should be higher than 500ms")
}
if c.EnsureDeployerOrigin {
if c.DeployerMACSecret == "" {
return errors.New("since deployer origin will be ensured, deployer secret can't be empty")
}
}
return nil
}
// TLSConfig defines the tls configuration.
type TLSConfig struct {
CertFile string `json:"cert-file" yaml:"cert-file"`
KeyFile string `json:"key-file" yaml:"key-file"`
}
// Validate makes sure config has valid values.
func (c *TLSConfig) Validate() error {
if c.CertFile == "" {
return errors.New("cert file can't be empty")
}
if c.KeyFile == "" {
return errors.New("cert file can't be empty")
}
return nil
}
|
package eaptls
import (
"encoding/binary"
"errors"
"github.com/titanous/weap/eap"
)
type PacketFlag byte
const (
FlagLength PacketFlag = 1 << 7
FlagMore PacketFlag = 1 << 6
FlagStart PacketFlag = 1 << 5
)
type PacketHeader struct {
Outer eap.PacketHeader
Flags PacketFlag
Length uint32
}
func (h *PacketHeader) Encode(buf []byte, dataLen int) []byte {
h.Outer.Type = eap.TypeTLS
buf = h.Outer.Encode(buf, dataLen+h.EncodedLen())
buf = append(buf, byte(h.Flags))
if h.Flags&FlagLength != 0 {
buf = append(buf,
byte(h.Length>>24),
byte(h.Length>>16),
byte(h.Length>>8),
byte(h.Length),
)
}
return buf
}
func (h *PacketHeader) EncodedLen() int {
l := 1 // flag (1 byte)
if h.Flags&FlagLength != 0 {
l += 4 // (length 4 bytes)
}
return l
}
type Packet struct {
PacketHeader
Data []byte
}
func (p *Packet) Encode(buf []byte) []byte {
buf = p.PacketHeader.Encode(buf, len(p.Data))
return append(buf, p.Data...)
}
func DecodePacket(in *eap.Packet) (*Packet, error) {
if in.Type != eap.TypeTLS {
return nil, errors.New("eaptls: not a TLS packet")
}
if len(in.Data) < 1 {
return nil, errors.New("eaptls: missing flags")
}
out := &Packet{
PacketHeader: PacketHeader{
Outer: in.PacketHeader,
Flags: PacketFlag(in.Data[0]),
},
Data: in.Data[1:],
}
if out.Flags&FlagLength != 0 {
if len(out.Data) < 4 {
return nil, errors.New("eaptls: missing TLS length")
}
out.Length = binary.BigEndian.Uint32(in.Data)
out.Data = out.Data[4:]
}
return out, nil
}
|
package main
import "testing"
func TestMain(t *testing.T) {
t.Log("teste")
}
|
package task
import (
"DataApi.Go/database/orm"
"DataApi.Go/lib/common"
"github.com/jinzhu/gorm"
)
func QueryAdSenseReportList(db *gorm.DB, accountId []string, startDate int, endDate int) []common.JSON{
caAccountId := common.GetCaAccountIds(accountId)
reportList := orm.SelectAdSenseReport(db, caAccountId, common.ConvertTime(startDate), common.ConvertTime(endDate))
return reportList
}
func QueryAdSenseRevenueList(db *gorm.DB, accountId []string, startDate int, endDate int) []common.JSON{
reportList := orm.SelectAdSenseRevenue(db, accountId, common.ConvertTime(startDate), common.ConvertTime(endDate))
return reportList
}
func QueryAdSenseDomainList(db *gorm.DB) map[string][]string {
response := orm.SelectAdSenseDomainMapping(db)
return response
}
|
package main
import "fmt"
/**
Go does not have classes. However, you can define methods on types.
A method is a function with a special receiver argument.
The receiver appears in its own argument list between the func keyword and the method name.
In this example, the getName method has a receiver of type Vertex named v.
Remember: a method is just a function with a receiver argument.
You can only declare a method with a receiver whose type is defined in the same package as the method.
You cannot declare a method with a receiver whose type is defined in another package (which includes the built-in types such as int).
*/
type IStudent interface {
getStudentName() string
getStandard() int
getStudentSection() string
updateStudentLastName(lastName string)
printStudentDetails()
}
type Student struct {
firstName string
lastName string
standard int
section string
}
func (s Student) getStudentName() string {
return s.firstName + " " + s.lastName
}
func (s Student) getStandard() int {
return s.standard
}
func (s Student) getStudentSection() string {
return s.section
}
// pointer receivers is always used when you need to update the receivers value
func (s *Student) updateStudentLastName(lastName string) {
if s == nil {
fmt.Println("student can not be nil")
return
}
s.lastName = lastName
}
func (s Student) printStudentDetails() {
fmt.Println("studentName", s.getStudentName(), " standard:", s.getStandard(), " section:", s.getStudentSection())
}
func describe(i IStudent) {
if i == nil {
// nil interface values neither hold value nor concrete type
return
}
fmt.Printf("type = %T value = %v\n", i, i)
}
func main(){
// nil concept
var xyz IStudent
var t *Student
xyz = t
xyz.updateStudentLastName("mishra")
// interface concept
var student IStudent = &Student{
firstName: "madhav",
lastName: "dwivedi",
standard: 12,
section: "A",
}
student.printStudentDetails()
student.updateStudentLastName("Dhar Dwivedi")
student.printStudentDetails()
describe(student)
}
|
package img
import (
log "github.com/sirupsen/logrus"
"image"
"image/color"
"image/draw"
"image/png"
"os"
)
const (
wRect = 200
hRect = 200
)
// hLine draws a horizontal line.
func hLine(rectImg *image.RGBA, color color.RGBA, x, y, length , width int) {
rectLine := image.NewRGBA(image.Rect(x, y, x+length, y+width))
draw.Draw(rectImg, rectLine.Bounds(), &image.Uniform{color}, image.ZP, draw.Src)
}
// vLine draws a veritcal line.
func vLine(rectImg *image.RGBA, color color.RGBA, x, y, length , width int) {
rectLine := image.NewRGBA(image.Rect(x, y, x+width, y+length))
draw.Draw(rectImg, rectLine.Bounds(), &image.Uniform{color}, image.ZP, draw.Src)
}
// CreateImg create image with lines.
func CreateImg() {
green := color.RGBA{0, 255, 0, 255}
rectImg := image.NewRGBA(image.Rect(0, 0, wRect, hRect))
draw.Draw(rectImg, rectImg.Bounds(), &image.Uniform{green}, image.ZP, draw.Src)
path := "hw6/img/"
file, err := os.Create(path + "rectangle.png")
if err != nil {
log.Fatalf("Failed create file: %s", err)
}
defer file.Close()
for i:= 5; i < wRect; i++{
if i % 25 == 0{
hLine(rectImg,color.RGBA{0,10,200,255}, 5,i, 190, 5)
vLine(rectImg,color.RGBA{0,10,200,255}, i ,5, 190, 5)
}
}
png.Encode(file, rectImg)
}
|
package cmd
import (
"fmt"
"os"
"github.com/dnephin/dobi/config"
"github.com/dnephin/dobi/logging"
"github.com/dnephin/dobi/tasks"
"github.com/dnephin/dobi/tasks/client"
docker "github.com/fsouza/go-dockerclient"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const (
// DefaultDockerAPIVersion is the default version of the docker API to use
DefaultDockerAPIVersion = "1.25"
)
var (
version = "0.15.0"
gitsha = "unknown"
buildDate = ""
)
type dobiOptions struct {
filename string
verbose bool
quiet bool
noBindMount bool
tasks []string
version bool
}
// NewRootCommand returns a new root command
func NewRootCommand() *cobra.Command {
var opts dobiOptions
cmd := &cobra.Command{
Use: "dobi [flags] RESOURCE[:ACTION] [RESOURCE[:ACTION]...]",
Short: "A build automation tool for Docker applications",
SilenceUsage: true,
SilenceErrors: true,
TraverseChildren: true,
Args: cobra.ArbitraryArgs,
RunE: func(cmd *cobra.Command, args []string) error {
opts.tasks = args
return runDobi(opts)
},
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
initLogging(opts.verbose, opts.quiet)
return nil
},
}
flags := cmd.Flags()
flags.StringVarP(&opts.filename, "filename", "f", "dobi.yaml", "Path to config file")
flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Verbose")
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Quiet")
flags.BoolVar(
&opts.noBindMount,
"no-bind-mount",
defaultBoolValue("DOBI_NO_BIND_MOUNT"),
"Provide mounts as a layer in an image instead of a bind mount")
flags.BoolVar(&opts.version, "version", false, "Print version and exit")
flags.SetInterspersed(false)
cmd.AddCommand(
newListCommand(&opts),
newCleanCommand(&opts),
)
return cmd
}
func runDobi(opts dobiOptions) error {
if opts.version {
printVersion()
return nil
}
conf, err := config.Load(opts.filename)
if err != nil {
return err
}
client, err := buildClient()
if err != nil {
return fmt.Errorf("failed to create client: %s", err)
}
return tasks.Run(tasks.RunOptions{
Client: client,
Config: conf,
Tasks: opts.tasks,
Quiet: opts.quiet,
BindMount: !opts.noBindMount,
})
}
func initLogging(verbose, quiet bool) {
logger := logging.Log
if verbose {
logger.Level = log.DebugLevel
}
if quiet {
logger.Level = log.WarnLevel
}
logger.Out = os.Stderr
formatter := &logging.Formatter{}
log.SetFormatter(formatter)
logger.Formatter = formatter
}
func buildClient() (client.DockerClient, error) {
apiVersion := os.Getenv("DOCKER_API_VERSION")
if apiVersion == "" {
apiVersion = DefaultDockerAPIVersion
}
// TODO: args for client
client, err := docker.NewVersionedClientFromEnv(apiVersion)
if err != nil {
return nil, err
}
log.Debug("Docker client created")
return client, nil
}
func printVersion() {
fmt.Printf("dobi version %v (build: %v, date: %s)\n", version, gitsha, buildDate)
}
func defaultBoolValue(key string) bool {
return os.Getenv(key) != ""
}
|
package fuse
import (
"os"
"testing"
"syscall"
)
func TestOsErrorToFuseError(t *testing.T) {
errNo := OsErrorToFuseError(os.EPERM)
if errNo != syscall.EPERM {
t.Errorf("Wrong conversion %v != %v", errNo, syscall.EPERM)
}
e := os.NewSyscallError("syscall", syscall.EPERM)
errNo = OsErrorToFuseError(e)
if errNo != syscall.EPERM {
t.Errorf("Wrong conversion %v != %v", errNo, syscall.EPERM)
}
e = os.Remove("this-file-surely-does-not-exist")
errNo = OsErrorToFuseError(e)
if errNo != syscall.ENOENT {
t.Errorf("Wrong conversion %v != %v", errNo, syscall.ENOENT)
}
}
|
package lastpass
import (
"io"
"net/url"
"strconv"
"github.com/smartystreets/scanners/csv"
)
// CSVScanner provides a clean interface
// to scan LastPass exported CSV files.
type CSVScanner struct {
*csv.Scanner
}
// NewCSVScanner will construct a CSVScanner
// that wraps the provided io.Reader.
func NewCSVScanner(r io.Reader) *CSVScanner {
inner := csv.NewScanner(r)
inner.Scan() // skip header
return &CSVScanner{Scanner: inner}
}
// Record will return the most recently
// scanned Entry
func (s *CSVScanner) Record() Entry {
fields := s.Scanner.Record()
u, _ := url.Parse(fields[0])
fav, _ := strconv.Atoi(fields[5])
return Entry{
URL: *u,
Username: fields[1],
Password: fields[2],
Extra: fields[3],
Name: fields[4],
Fav: fav,
}
}
|
package LeetCode
var CousinsInBinaryTreeInput1 = &TreeNode{
Val:1,
Left:&TreeNode{
Val:2,
Left:&TreeNode{
Val:4,
},
},
Right:&TreeNode{
Val:3,
},
}
var CousinsInBinaryTreeInput2 = &TreeNode{
Val:1,
Left:&TreeNode{
Val:2,
Right:&TreeNode{
Val:4,
},
},
Right:&TreeNode{
Val:3,
Right:&TreeNode{
Val:5,
},
},
}
func FindInBinaryTree(root *TreeNode, value int, deep int) (int, *TreeNode) { // return (deep, )
if root == nil {
return 0,nil
}
if root.Left != nil {
if root.Left.Val == value {
return deep,root
}
rd,rp := FindInBinaryTree(root.Left,value,deep+1)
if rp != nil {
return rd,rp
}
}
if root.Right != nil {
if root.Right.Val == value {
return deep, root
}
rd,rp := FindInBinaryTree(root.Right,value,deep+1)
if rp != nil {
return rd,rp
}
}
return 0,nil
}
func IsCousinsInBinaryTree(root *TreeNode, x int, y int) bool {
if root == nil {
return false
}
deep1,parent1 := FindInBinaryTree(root,x,0)
deep2,parent2 := FindInBinaryTree(root,y,0)
return (deep1 == deep2) && (parent1 != parent2) && (parent1 != nil) && (parent2 != nil)
} |
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adminpause
import (
"testing"
"time"
ddlctrl "github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util/logutil"
)
const dbTestLease = 600 * time.Millisecond
// Logger is the global logger in this package
var Logger = logutil.BgLogger()
func prepareDomain(t *testing.T) (*domain.Domain, *testkit.TestKit, *testkit.TestKit) {
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, dbTestLease)
stmtKit := testkit.NewTestKit(t, store)
adminCommandKit := testkit.NewTestKit(t, store)
ddlctrl.ReorgWaitTimeout = 10 * time.Millisecond
stmtKit.MustExec("set @@global.tidb_ddl_reorg_batch_size = 2")
stmtKit.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 1")
stmtKit = testkit.NewTestKit(t, store)
stmtKit.MustExec("use test")
return dom, stmtKit, adminCommandKit
}
|
package main
import "fmt"
func main() {
// Similar for loop syntax to other languages
for i := 0; i < 5; i++ {
fmt.Println("Hello!")
}
// There is no while loop keyword. The for construct can be used
j := 1
for j < 9 || j%2 != 0 {
fmt.Println("We are in a while loop this will loop for a bit")
j++
}
fmt.Println("We are out of the loop")
}
|
package main
import (
"fmt"
"time"
)
func main() {
fmt.Println("in main()")
go longWait()
go shortWait()
fmt.Println("about to sleep in main()")
time.Sleep(60 * 1e9)
fmt.Println("At the end of time")
}
func longWait() {
fmt.Println("beginning longwait")
time.Sleep(30 * 1e9)
fmt.Println("end of longWait")
}
func shortWait() {
fmt.Println("beginning shortwait")
time.Sleep(20 * 1e9)
fmt.Println("end of shortwait")
}
|
package main
import (
"go-package/demo/interface/productrepo"
)
func main() {
// 选择"aliCloud"
env := "aliCloud"
// 根据env新建一个repo
repo := productrepo.New(env)
// 在新建的repo上进行存储
repo.StoreProduct("HuaWei mate 40", 105)
}
|
// Copyright 2017 by caixw, All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
// Package types 一些公用类型的定义
package types
import (
"github.com/tanxiaolong/apidoc/locale"
"github.com/tanxiaolong/apidoc/vars"
)
// Sanitizer 配置项的检测接口
type Sanitizer interface {
Sanitize() *OptionsError
}
// OptionsError 提供对配置项错误的描述
type OptionsError struct {
Field string
Message string
}
func (err *OptionsError) Error() string {
return locale.Sprintf(locale.OptionsError, vars.ConfigFilename, err.Field, err.Message)
}
|
package topology
import (
"path/filepath"
"time"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/repowatch"
hyper_proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
func watch(topologyRepository, localRepositoryDir, topologyDir string,
checkInterval time.Duration,
logger log.DebugLogger) (<-chan *Topology, error) {
directoryChannel, err := repowatch.Watch(topologyRepository,
localRepositoryDir, checkInterval, "fleet-manager/topology-watcher",
logger)
if err != nil {
return nil, err
}
topologyChannel := make(chan *Topology, 1)
go handleNotifications(directoryChannel, topologyChannel, topologyDir,
logger)
return topologyChannel, nil
}
func handleNotifications(directoryChannel <-chan string,
topologyChannel chan<- *Topology, topologyDir string,
logger log.DebugLogger) {
var prevTopology *Topology
for dir := range directoryChannel {
params := Params{
Logger: logger,
TopologyDir: filepath.Join(dir, topologyDir),
}
if topology, err := load(params); err != nil {
logger.Println(err)
} else if prevTopology.equal(topology) {
logger.Debugln(1, "Ignoring unchanged configuration")
} else {
topologyChannel <- topology
prevTopology = topology
}
}
}
func (subnet *Subnet) shrink() {
subnet.Subnet.Shrink()
subnet.FirstAutoIP = hyper_proto.ShrinkIP(subnet.FirstAutoIP)
subnet.LastAutoIP = hyper_proto.ShrinkIP(subnet.LastAutoIP)
for index, ip := range subnet.ReservedIPs {
if len(ip) == 16 {
ip = ip.To4()
if ip != nil {
subnet.ReservedIPs[index] = ip
}
}
}
}
|
//go:build !release
// +build !release
//go:generate go run assets_generate.go
package data
import (
"net/http"
"os"
)
// Assets contains project assets.
var Assets http.FileSystem
func init() {
dir := os.Getenv("OPENSHIFT_INSTALL_DATA")
if dir == "" {
dir = "data"
}
Assets = http.Dir(dir)
}
|
package main
import (
"io/ioutil"
"github.com/go-rod/rod"
"github.com/go-rod/rod/lib/proto"
"github.com/ysmood/kit"
)
// This example demonstrates how to take a screenshot of a specific element and
// of the entire browser viewport, as well as using `kit`
// to store it into a file.
func main() {
browser := rod.New().Connect()
// capture screenshot of an element
browser.Page("https://google.com").Element("#main").Screenshot("elementScreenshot.png")
// capture entire browser viewport, returning png with quality=90
buf, err := browser.Page("https://brank.as/").ScreenshotE(true, &proto.PageCaptureScreenshot{
Format: "png",
Quality: 90,
})
kit.E(err)
kit.E(ioutil.WriteFile("fullScreenshot.png", buf, 0644))
}
|
package fsm
import (
"strconv"
"math/rand"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/swf"
. "github.com/sclasen/swfsm/sugar"
)
//DecisionInterceptor allows manipulation of the decision task and the outcome at key points in the task lifecycle.
type DecisionInterceptor interface {
BeforeTask(decision *swf.PollForDecisionTaskOutput)
BeforeDecision(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome)
AfterDecision(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome)
}
//FuncInterceptor is a DecisionInterceptor that you can set handler funcs on. if any are unset, they are no-ops.
type FuncInterceptor struct {
BeforeTaskFn func(decision *swf.PollForDecisionTaskOutput)
BeforeDecisionFn func(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome)
AfterDecisionFn func(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome)
}
//BeforeTask runs the BeforeTaskFn if not nil
func (i *FuncInterceptor) BeforeTask(decision *swf.PollForDecisionTaskOutput) {
if i.BeforeTaskFn != nil {
i.BeforeTaskFn(decision)
}
}
//BeforeDecision runs the BeforeDecisionFn if not nil
func (i *FuncInterceptor) BeforeDecision(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
if i.BeforeDecisionFn != nil {
i.BeforeDecisionFn(decision, ctx, outcome)
}
}
//AfterDecision runs the AfterDecisionFn if not nil
func (i *FuncInterceptor) AfterDecision(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
if i.AfterDecisionFn != nil {
i.AfterDecisionFn(decision, ctx, outcome)
}
}
type ComposedDecisionInterceptor struct {
interceptors []DecisionInterceptor
}
func NewComposedDecisionInterceptor(interceptors ...DecisionInterceptor) DecisionInterceptor {
c := &ComposedDecisionInterceptor{}
for _, i := range interceptors {
if i != nil {
c.interceptors = append(c.interceptors, i)
}
}
return c
}
func (c *ComposedDecisionInterceptor) BeforeTask(decision *swf.PollForDecisionTaskOutput) {
for _, i := range c.interceptors {
i.BeforeTask(decision)
}
}
func (c *ComposedDecisionInterceptor) BeforeDecision(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
for _, i := range c.interceptors {
i.BeforeDecision(decision, ctx, outcome)
}
}
func (c *ComposedDecisionInterceptor) AfterDecision(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
for _, i := range c.interceptors {
i.AfterDecision(decision, ctx, outcome)
}
}
// DedupeWorkflowCompletes returns an interceptor that executes after a decision and removes
// any duplicate swf.DecisionTypeCompleteWorkflowExecution decisions from the outcome.
// Duplicates are removed from the beginning of the input list, so that
// the last complete decision is the one that remains in the list.
func DedupeWorkflowCompletes() DecisionInterceptor {
return DedupeDecisions(swf.DecisionTypeCompleteWorkflowExecution)
}
// DedupeWorkflowCancellations returns an interceptor that executes after a decision and removes
// any duplicate swf.DecisionTypeCancelWorkflowExecution decisions from the outcome.
// Duplicates are removed from the beginning of the input list, so that
// the last cancel decision is the one that remains in the list.
func DedupeWorkflowCancellations() DecisionInterceptor {
return DedupeDecisions(swf.DecisionTypeCancelWorkflowExecution)
}
// DedupeWorkflowFailures returns an interceptor that executes after a decision and removes
// any duplicate swf.DecisionTypeFailWorkflowExecution decisions from the outcome.
// Duplicates are removed from the beginning of the input list, so that
// the last failure decision is the one that remains in the list.
func DedupeWorkflowFailures() DecisionInterceptor {
return DedupeDecisions(swf.DecisionTypeFailWorkflowExecution)
}
// DedupeWorkflowCloseDecisions returns an interceptor that executes after a decision and removes
// any duplicate workflow close decisions (cancel, complete, fail) from the outcome.
// Duplicates are removed from the beginning of the input list, so that
// the last failure decision is the one that remains in the list.
func DedupeWorkflowCloseDecisions() DecisionInterceptor {
return NewComposedDecisionInterceptor(
DedupeWorkflowCompletes(),
DedupeWorkflowCancellations(),
DedupeWorkflowFailures(),
)
}
// DedupeDecisions returns an interceptor that executes after a decision and removes
// any duplicate decisions of the specified type from the outcome.
// Duplicates are removed from the beginning of the input list, so that
// the last decision of the specified type is the one that remains in the list.
//
// e.g. An outcome with a list of decisions [a, a, b, a, c] where the type to dedupe was 'a' would
// result in an outcome with a list of decisions [b, a, c]
func DedupeDecisions(decisionType string) DecisionInterceptor {
return &FuncInterceptor{
AfterDecisionFn: func(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
in := outcome.Decisions
out := []*swf.Decision{}
specifiedTypeEncountered := false
// iterate backwards so we can grab the last decision
for i := len(in) - 1; i >= 0; i-- {
currentDecision := in[i]
// keep last instance of decisions of the specified type
if *currentDecision.DecisionType == decisionType && !specifiedTypeEncountered {
specifiedTypeEncountered = true
// prepend
out = append([]*swf.Decision{currentDecision}, out...)
} else if *currentDecision.DecisionType != decisionType {
// prepend
out = append([]*swf.Decision{currentDecision}, out...)
}
}
outcome.Decisions = out
},
}
}
// MoveWorkflowCloseDecisionsToEnd returns an interceptor that executes after a decision and moves
// any workflow close decisions (complete, fail, cancel) to the end of an outcome's decision list.
//
// Note: SWF responds with a 400 error if a workflow close decision is not the last decision
// in the list of decisions.
func MoveWorkflowCloseDecisionsToEnd() DecisionInterceptor {
return NewComposedDecisionInterceptor(
MoveDecisionsToEnd(swf.DecisionTypeFailWorkflowExecution),
MoveDecisionsToEnd(swf.DecisionTypeCancelWorkflowExecution),
MoveDecisionsToEnd(swf.DecisionTypeCompleteWorkflowExecution),
)
}
// MoveDecisionsToEnd returns an interceptor that executes after a decision and moves
// any decisions of the specified type to the end of an outcome's decision list.
//
// e.g. An outcome with a list of decisions [a, a, b, a, c] where the type to move was 'a' would
// result in an outcome with a list of decisions [b, c, a, a, a]
func MoveDecisionsToEnd(decisionType string) DecisionInterceptor {
return &FuncInterceptor{
AfterDecisionFn: func(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
in := outcome.Decisions
out := []*swf.Decision{}
decisionsToMove := []*swf.Decision{}
for i, currentDecision := range in {
if *currentDecision.DecisionType == decisionType {
// don't append currentDecision because it's value changes on each iteration
decisionsToMove = append(decisionsToMove, in[i])
} else {
out = append(out, in[i])
}
}
out = append(out, decisionsToMove...)
outcome.Decisions = out
},
}
}
// RemoveLowerPriorityDecisions returns an interceptor that executes after a decision and removes
// any lower priority decisions from an outcome if a higher priority decision exists.
// The decisionTypes passed to this function should be listed in highest to
// lowest priority order.
//
// e.g. An outcome with a list of decisions [a, a, b, a, c] where the priority
// was a > b > c would return [a, a, a]
func RemoveLowerPriorityDecisions(prioritizedDecisionTypes ...string) DecisionInterceptor {
return &FuncInterceptor{
AfterDecisionFn: func(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
in := outcome.Decisions
out := []*swf.Decision{}
var indexOfHighestPriorityDecision *int
// Find highest priority item that is in list
for _, currentDecision := range in {
if index := indexOfString(prioritizedDecisionTypes, *currentDecision.DecisionType); index != -1 {
if indexOfHighestPriorityDecision == nil || index < *indexOfHighestPriorityDecision {
indexOfHighestPriorityDecision = aws.Int(index)
}
}
}
// Leave lower priority items off the final decision list
for _, currentDecision := range in {
index := indexOfString(prioritizedDecisionTypes, *currentDecision.DecisionType)
if index != -1 && indexOfHighestPriorityDecision != nil && index > *indexOfHighestPriorityDecision {
continue
}
out = append(out, currentDecision)
}
outcome.Decisions = out
},
}
}
func indexOfString(stringSlice []string, testString string) int {
index := -1
for i, currentString := range stringSlice {
if currentString == testString {
index = i
break
}
}
return index
}
//ManagedContinuations is an interceptor that will handle most of the mechanics of automatically continuing workflows.
//
//For workflows without persistent, heartbeating activities, it should do everything.
//
//ContinueSignal: How to continue fsms with persistent activities.
//How it works
//FSM in steady+activity, listens for ContinueTimer.
//OnTimer, cancel activity, transition to continuing.
//In continuing, OnActivityCanceled send ContinueSignal.
//Interceptor handles ContinueSignal, if 0,0,0,0 Continue, else start ContinueTimer.
//In continuing, OnStarted re-starts the activity, transition back to steady.
func ManagedContinuations(historySize int, workflowAgeInSec int, timerRetrySeconds int) DecisionInterceptor {
return ManagedContinuationsWithJitter(historySize, 0, workflowAgeInSec, 0, timerRetrySeconds)
}
//To avoid stampedes of workflows that are started at the same time being continued at the same time
//ManagedContinuationsWithJitter will schedule the initial continue randomly between
//workflowAgeInSec and workflowAgeInSec + maxAgeJitterInSec
//and will attempt to continue workflows with more than between
//historySize and historySize + maxSizeJitter events
func ManagedContinuationsWithJitter(historySize int, maxSizeJitter int, workflowAgeInSec int, maxAgeJitterInSec int, timerRetrySeconds int) DecisionInterceptor {
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
//dont blow up on bad values
if maxSizeJitter <= 0 {
maxSizeJitter = 1
}
if maxAgeJitterInSec <= 0 {
maxAgeJitterInSec = 1
}
return &FuncInterceptor{
AfterDecisionFn: func(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
for _, d := range outcome.Decisions {
if *d.DecisionType == swf.DecisionTypeCompleteWorkflowExecution ||
*d.DecisionType == swf.DecisionTypeCancelWorkflowExecution ||
*d.DecisionType == swf.DecisionTypeFailWorkflowExecution {
logf(ctx, "fn=managed-continuations at=terminating-decision")
return //we have a terminating event, dont continue
}
}
//if prevStarted = 0 this is the first decision of the workflow, so start the continue timer.
if *decision.PreviousStartedEventId == int64(0) {
logf(ctx, "fn=managed-continuations at=workflow-start %d", *decision.PreviousStartedEventId)
outcome.Decisions = append(outcome.Decisions, &swf.Decision{
DecisionType: S(swf.DecisionTypeStartTimer),
StartTimerDecisionAttributes: &swf.StartTimerDecisionAttributes{
TimerId: S(ContinueTimer),
StartToFireTimeout: S(strconv.Itoa(workflowAgeInSec + rng.Intn(maxAgeJitterInSec))),
},
})
}
//was the ContinueTimer fired?
continueTimerFired := false
for _, h := range decision.Events {
if *h.EventType == swf.EventTypeTimerFired {
if *h.TimerFiredEventAttributes.TimerId == ContinueTimer {
continueTimerFired = true
}
}
}
//was the ContinueSignal fired?
continueSignalFired := false
for _, h := range decision.Events {
if *h.EventType == swf.EventTypeWorkflowExecutionSignaled {
if *h.WorkflowExecutionSignaledEventAttributes.SignalName == ContinueSignal {
continueSignalFired = true
}
}
}
eventCount := *decision.Events[0].EventId
historySizeExceeded := int64(historySize+rng.Intn(maxSizeJitter)) < eventCount
//if we pass history size or if we see ContinuteTimer or ContinueSignal fired
if continueTimerFired || continueSignalFired || historySizeExceeded {
logf(ctx, "fn=managed-continuations at=attempt-continue continue-timer=%t continue-signal=%t history-size=%t", continueTimerFired, continueSignalFired, historySizeExceeded)
//if we can safely continue
decisions := len(outcome.Decisions)
activities := len(ctx.Correlator().Activities)
signals := len(ctx.Correlator().Signals)
children := len(ctx.Correlator().Children)
cancels := len(ctx.Correlator().Cancellations)
if decisions == 0 && activities == 0 && signals == 0 && children == 0 && cancels == 0 {
logf(ctx, "fn=managed-continuations at=able-to-continue action=add-continue-decision events=%d", eventCount)
outcome.Decisions = append(outcome.Decisions, ctx.ContinueWorkflowDecision(ctx.State, ctx.stateData)) //stateData safe?
} else {
//re-start the timer for timerRetrySecs
logf(ctx, "fn=managed-continuations at=unable-to-continue decisions=%d activities=%d signals=%d children=%d cancels=%d events=%d action=start-continue-timer-retry", decisions, activities, signals, children, cancels, eventCount)
if continueTimerFired || !ctx.Correlator().TimerScheduled(ContinueTimer) {
outcome.Decisions = append(outcome.Decisions, &swf.Decision{
DecisionType: S(swf.DecisionTypeStartTimer),
StartTimerDecisionAttributes: &swf.StartTimerDecisionAttributes{
TimerId: S(ContinueTimer),
StartToFireTimeout: S(strconv.Itoa(timerRetrySeconds)),
},
})
}
}
}
},
}
}
func StartCancelInterceptor() DecisionInterceptor {
return &FuncInterceptor{
AfterDecisionFn: func(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
outcome.Decisions = handleStartCancelTypes(outcome.Decisions, ctx)
},
}
}
type StartCancelPair struct {
idField string
startDecision string
cancelDecision string
startId func(d *swf.Decision) *string
cancelId func(d *swf.Decision) *string
}
var startCancelPairs = []*StartCancelPair{
&StartCancelPair{
idField: "workflow",
startDecision: swf.DecisionTypeStartChildWorkflowExecution,
cancelDecision: swf.DecisionTypeRequestCancelExternalWorkflowExecution,
startId: func(d *swf.Decision) *string { return d.StartChildWorkflowExecutionDecisionAttributes.WorkflowId },
cancelId: func(d *swf.Decision) *string {
return d.RequestCancelExternalWorkflowExecutionDecisionAttributes.WorkflowId
},
},
&StartCancelPair{
idField: "activity",
startDecision: swf.DecisionTypeScheduleActivityTask,
cancelDecision: swf.DecisionTypeRequestCancelActivityTask,
startId: func(d *swf.Decision) *string { return d.ScheduleActivityTaskDecisionAttributes.ActivityId },
cancelId: func(d *swf.Decision) *string {
return d.RequestCancelActivityTaskDecisionAttributes.ActivityId
},
},
&StartCancelPair{
idField: "timer",
startDecision: swf.DecisionTypeStartTimer,
cancelDecision: swf.DecisionTypeCancelTimer,
startId: func(d *swf.Decision) *string { return d.StartTimerDecisionAttributes.TimerId },
cancelId: func(d *swf.Decision) *string {
return d.CancelTimerDecisionAttributes.TimerId
},
},
}
func handleStartCancelTypes(in []*swf.Decision, ctx *FSMContext) []*swf.Decision {
for _, scp := range startCancelPairs {
in = scp.removeStartBeforeCancel(in, ctx)
}
return in
}
func (s *StartCancelPair) removeStartBeforeCancel(in []*swf.Decision, ctx *FSMContext) []*swf.Decision {
var out []*swf.Decision
for _, decision := range in {
switch *decision.DecisionType {
case s.cancelDecision:
cancelId := aws.StringValue(s.cancelId(decision))
if cancelId == "" {
continue
}
i := s.decisionsContainStartCancel(out, &cancelId)
if i >= 0 {
startDecision := out[i]
startId := aws.StringValue(s.startId(startDecision))
if startId == "" {
continue
}
out = append(out[:i], out[i+1:]...)
logf(ctx, "fn=remove-start-before-cancel at=start-cancel-detected status=removing-start-cancel workflow=%s run=%s start-%s=%s cancel-%s=%s", aws.StringValue(ctx.WorkflowId), aws.StringValue(ctx.RunId), s.idField, startId, s.idField, cancelId)
} else {
out = append(out, decision)
}
default:
out = append(out, decision)
}
}
return out
}
// This ensures the pairs are of the same workflow/activity/timer ID
func (s *StartCancelPair) decisionsContainStartCancel(in []*swf.Decision, cancelId *string) int {
for i, d := range in {
if *d.DecisionType == s.startDecision && *s.startId(d) == *cancelId {
return i
}
}
return -1
}
func CloseDecisionTypes() []string {
return []string{
swf.DecisionTypeCompleteWorkflowExecution,
swf.DecisionTypeCancelWorkflowExecution,
swf.DecisionTypeFailWorkflowExecution,
// swf.DecisionTypeContinueAsNewWorkflowExecution is technically a close type,
// but swfsm currently doesn't really treat it as one and there's
// a lot of special logic, so excluding it from here for now
}
}
func CloseDecisionIncompatableDecisionTypes() []string {
return []string{
// TODO: flush out this list with other incompatible types
// TODO: see https://console.aws.amazon.com/support/home?#/case/?caseId=5223303261&displayId=5223303261&language=en
swf.DecisionTypeScheduleActivityTask,
swf.DecisionTypeStartTimer,
}
}
// CloseWorkflowRemoveIncompatibleDecisionInterceptor checks for
// incompatible decisions with a Complete workflow decision, and if
// found removes it from the outcome.
func CloseWorkflowRemoveIncompatibleDecisionInterceptor() DecisionInterceptor {
return &FuncInterceptor{
AfterDecisionFn: func(decision *swf.PollForDecisionTaskOutput, ctx *FSMContext, outcome *Outcome) {
closingDecisionFound := false
// Search for close decisions
for _, d := range outcome.Decisions {
if stringsContain(CloseDecisionTypes(), *d.DecisionType) {
closingDecisionFound = true
}
}
// If we have a complete decision, search for banned decisions and drop them.
if closingDecisionFound {
var decisions []*swf.Decision
for _, d := range outcome.Decisions {
if stringsContain(CloseDecisionIncompatableDecisionTypes(), *d.DecisionType) {
logf(ctx, "fn=CloseWorkflowRemoveIncompatibleDecisionInterceptor at=remove decision-type=%s", *d.DecisionType)
continue
}
decisions = append(decisions, d)
}
outcome.Decisions = decisions
}
},
}
}
|
package models_test
import (
"testing"
"github.com/cloudfoundry-incubator/notifications/models"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestModelsSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Models Suite")
}
func TruncateTables() {
err := models.Database().Connection().TruncateTables()
if err != nil {
panic(err)
}
}
|
/*
Your challenge is to make an infinite loading screen, that looks like this:
Loading...\
Or, to be more specific:
Take no input.
Output Loading..., with a trailing space, but no trailing newline.
Infinitely cycle through the chars |, /, - and \: every 0.25 seconds, overwrite the last one with the next in the sequence. You can overwrite just the last character, or delete and rewrite the whole line, as long Loading... remains unchanged.
Rules
The output text must look exactly as specified. Trailing newlines/spaces are acceptable.
You should not wait 0.25 seconds before initially showing output - the first frame should be printed as soon as the program is run.
Your program should be able to run indefinitely. For example, if you use a counter for frames, the counter should never cause an error by exceeding the maximum in your language.
Although the waiting period between each "frame" should be 0.25 seconds, obviously this will never be exact - an error margin of 10% or so is allowed.
You may submit a function, but it must print to stdout.
You can submit an answer in a non-console (but still text-based) environment, as long as it is capable of producing the loading animation.
This is code-golf, so the shortest solution (in bytes) wins. Standard code-golf loopholes apply.
If possible, please provide a gif of your loading screen in action.
*/
package main
import (
"fmt"
"time"
)
func main() {
tab := []rune{'|', '/', '-', '\\'}
fmt.Printf("Loading...")
for i := 0; ; i = (i + 1) & 3 {
fmt.Printf("%c", tab[i])
time.Sleep(250 * time.Millisecond)
fmt.Printf("\b")
}
}
|
func reverse(x int) int {
if x == 0 {
return 0
}
if x < -2147483648 || x > 2147483647 {
return 0
}
minusFlag := 0
if x < 0 {
minusFlag = 1
x = x * -1
}
result := 0
temp := []int{}
for x > 0 {
lastDigit := x % 10
temp = append(temp, lastDigit)
x = x / 10
}
times2 := len(temp) - 1
times := 1
for i := 0; i < len(temp); i++ {
times = 1
for j := 0; j < times2; j++ {
times = times * 10
}
result = result + (temp[i] * times)
times2--
}
if result < -2147483648 || result > 2147483647 {
return 0
}
if minusFlag == 1 {
result = result * -1
}
return result
}
|
package entities
// Players is a pool of players
// with additionnal filters.
type Players []*Player
// Count returns the total number of players
// as an unsigned number.
//
func (p Players) Count() uint {
return uint(len(p))
}
// Humans returns ONLY the players
// who are considered as humans.
//
func (p Players) Humans() Players {
totalPlayers := p.Count()
capacity := totalPlayers - computeWerewolvesCount(totalPlayers)
humans := make(Players, 0, capacity)
for _, player := range p {
if player.IsHuman() {
humans = append(humans, player)
}
}
return humans
}
// Werewolves returns ONLY the players
// who are considered as werewolves.
//
func (p Players) Werewolves() Players {
totalPlayers := p.Count()
capacity := computeWerewolvesCount(totalPlayers)
werewolves := make(Players, 0, capacity)
for _, player := range p {
if player.IsWerewolf() {
werewolves = append(werewolves, player)
}
}
return werewolves
}
// Alive returns ONLY the players
// who are still alive.
//
func (p Players) Alive() Players {
alivePlayers := make(Players, 0, len(p))
for _, player := range p {
if player.IsAlive() {
alivePlayers = append(alivePlayers, player)
}
}
return alivePlayers
}
|
package server
import (
"github.com/pantonshire/nlpewee/core"
pb "github.com/pantonshire/nlpewee/proto"
)
func serializeTokenizeResponse(sentences []core.Sentence) *pb.TokenizeResponse {
sentenceMsgs := make([]*pb.Sentence, len(sentences))
for i, sentence := range sentences {
sentenceMsgs[i] = serializeSentence(sentence)
}
return &pb.TokenizeResponse{
Sentences: sentenceMsgs,
}
}
func serializeSentence(sentence core.Sentence) *pb.Sentence {
tokens := sentence.Tokens()
tokenMsgs := make([]*pb.Token, len(tokens))
for i, token := range tokens {
tokenMsgs[i] = serializeToken(token)
}
entities := sentence.Entities()
entityMsgs := make([]*pb.Entity, len(entities))
for i, entity := range entities {
entityMsgs[i] = serializeEntity(entity)
}
return &pb.Sentence{
Tokens: tokenMsgs,
Entities: entityMsgs,
}
}
func serializeToken(token core.Token) *pb.Token {
msg := &pb.Token{
Full: serializeText(token.Full()),
Stem: serializeText(token.Stem()),
Label: token.Label(),
}
rawTag := token.Tag()
if tag, ok := serializeTag(rawTag); ok {
msg.PosTag = &pb.Token_Tag{Tag: tag}
} else {
msg.PosTag = &pb.Token_Other{Other: rawTag}
}
return msg;
}
func serializeText(text core.Text) *pb.Text {
return &pb.Text{
Raw: text.Raw(),
Cleaned: text.Cleaned(),
}
}
func serializeEntity(entity core.Entity) *pb.Entity {
return &pb.Entity{
Text: entity.Text(),
Label: entity.Label(),
}
}
func serializeTag(tag string) (pb.Tag, bool) {
switch tag {
case "(":
return pb.Tag_L_PAREN, true
case ")":
return pb.Tag_R_PAREN, true
case ",":
return pb.Tag_COMMA, true
case ":":
return pb.Tag_COLON, true
case ".":
return pb.Tag_PERIOD, true
case "''":
return pb.Tag_CLOSING_QUOTE, true
case "``":
return pb.Tag_OPENING_QUOTE, true
case "#":
return pb.Tag_NUMBER_SIGN, true
case "$":
return pb.Tag_CURRENCY, true
case "CC":
return pb.Tag_CC, true
case "CD":
return pb.Tag_CD, true
case "DT":
return pb.Tag_DT, true
case "EX":
return pb.Tag_EX, true
case "FW":
return pb.Tag_FW, true
case "IN":
return pb.Tag_IN, true
case "JJ":
return pb.Tag_JJ, true
case "JJR":
return pb.Tag_JJR, true
case "JJS":
return pb.Tag_JJS, true
case "LS":
return pb.Tag_LS, true
case "MD":
return pb.Tag_MD, true
case "NN":
return pb.Tag_NN, true
case "NNP":
return pb.Tag_NNP, true
case "NNPS":
return pb.Tag_NNPS, true
case "NNS":
return pb.Tag_NNS, true
case "PDT":
return pb.Tag_PDT, true
case "POS":
return pb.Tag_POS, true
case "PRP":
return pb.Tag_PRP, true
case "PRP$":
return pb.Tag_PRPS, true
case "RB":
return pb.Tag_RB, true
case "RBR":
return pb.Tag_RBR, true
case "RBS":
return pb.Tag_RBS, true
case "RP":
return pb.Tag_RP, true
case "SYM":
return pb.Tag_SYM, true
case "TO":
return pb.Tag_TO, true
case "UH":
return pb.Tag_UH, true
case "VB":
return pb.Tag_VB, true
case "VBD":
return pb.Tag_VBD, true
case "VBG":
return pb.Tag_VBG, true
case "VBN":
return pb.Tag_VBN, true
case "VBP":
return pb.Tag_VBP, true
case "VBZ":
return pb.Tag_VBZ, true
case "WDT":
return pb.Tag_WDT, true
case "WP":
return pb.Tag_WP, true
case "WP$":
return pb.Tag_WPS, true
case "WRB":
return pb.Tag_WRB, true
default:
return 0, false
}
}
|
package main
import (
"os"
"github.com/sirkon/message"
)
func loggerr(err error) {
if os.Getenv("COMPLETELOG") == "" {
return
}
message.Error(err)
}
|
package relay
import (
"context"
"encoding/base64"
"fmt"
"time"
"unicode/utf8"
skafka "github.com/segmentio/kafka-go"
"google.golang.org/grpc"
"github.com/batchcorp/collector-schemas/build/go/protos/records"
"github.com/batchcorp/collector-schemas/build/go/protos/services"
"github.com/batchcorp/plumber/backends/kafka/types"
)
// handleKafka sends a Kafka relay message to the GRPC server
func (r *Relay) handleKafka(ctx context.Context, conn *grpc.ClientConn, messages []interface{}) error {
sinkRecords, err := r.convertMessagesToKafkaSinkRecords(messages)
if err != nil {
return fmt.Errorf("unable to convert messages to kafka sink records: %s", err)
}
client := services.NewGRPCCollectorClient(conn)
return r.CallWithRetry(ctx, "AddKafkaRecord", func(ctx context.Context) error {
_, err := client.AddKafkaRecord(ctx, &services.KafkaSinkRecordRequest{
Records: sinkRecords,
}, grpc.MaxCallSendMsgSize(MaxGRPCMessageSize))
return err
})
}
// validateKafkaRelayMessage ensures all necessary values are present for a Kafka relay message
func (r *Relay) validateKafkaRelayMessage(msg *types.RelayMessage) error {
if msg == nil {
return ErrMissingMessage
}
if msg.Value == nil {
return ErrMissingMessageValue
}
return nil
}
// convertKafkaMessageToProtobufRecord creates a records.KafkaSinkRecord from a kafka.Message which can then
// be sent to the GRPC server
func (r *Relay) convertMessagesToKafkaSinkRecords(messages []interface{}) ([]*records.KafkaSinkRecord, error) {
sinkRecords := make([]*records.KafkaSinkRecord, 0)
for i, v := range messages {
relayMessage, ok := v.(*types.RelayMessage)
if !ok {
return nil, fmt.Errorf("unable to type assert incoming message as RelayMessage (index: %d)", i)
}
if err := r.validateKafkaRelayMessage(relayMessage); err != nil {
return nil, fmt.Errorf("unable to validate kafka relay message (index: %d): %s", i, err)
}
sinkRecords = append(sinkRecords, &records.KafkaSinkRecord{
Topic: relayMessage.Value.Topic,
Key: relayMessage.Value.Key,
Value: relayMessage.Value.Value,
Timestamp: time.Now().UTC().UnixNano(),
Offset: relayMessage.Value.Offset,
Partition: int32(relayMessage.Value.Partition),
Headers: convertKafkaHeaders(relayMessage.Value.Headers),
ForceDeadLetter: r.DeadLetter,
})
}
return sinkRecords, nil
}
func convertKafkaHeaders(kafkaHeaders []skafka.Header) []*records.KafkaHeader {
if len(kafkaHeaders) == 0 {
return nil
}
sinkRecordHeaders := make([]*records.KafkaHeader, 0)
for _, h := range kafkaHeaders {
v := string(h.Value)
// gRPC will fail the call if the value isn't valid utf-8
// TODO: ship original header value so they can be sent back correctly in a replay
if !utf8.ValidString(v) {
v = base64.StdEncoding.EncodeToString(h.Value)
}
sinkRecordHeaders = append(sinkRecordHeaders, &records.KafkaHeader{
Key: h.Key,
Value: v,
})
}
return sinkRecordHeaders
}
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package worker
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"github.com/pkg/errors"
"github.com/diegobernardes/flare"
infraURL "github.com/diegobernardes/flare/infra/url"
"github.com/diegobernardes/flare/infra/wildcard"
"github.com/diegobernardes/flare/infra/worker"
)
// Delivery do the heavy lifting by discovering if the given subscription need or not to receive the
// document.
type Delivery struct {
pusher worker.Pusher
resourceRepository flare.ResourceRepositorier
subscriptionRepository flare.SubscriptionRepositorier
httpClient *http.Client
}
// Push the signal to delivery the document.
func (d *Delivery) Push(
ctx context.Context, subscription *flare.Subscription, document *flare.Document, action string,
) error {
content, err := d.marshal(subscription, document, action)
if err != nil {
return errors.Wrap(err, "error during trigger")
}
if err = d.pusher.Push(ctx, content); err != nil {
return errors.Wrap(err, "error during message delivery")
}
return nil
}
// Process the message.
func (d *Delivery) Process(ctx context.Context, rawContent []byte) error {
subscription, document, action, err := d.unmarshal(rawContent)
if err != nil {
return errors.Wrap(err, "error during content unmarshal")
}
err = d.subscriptionRepository.Trigger(ctx, action, document, subscription, d.trigger)
if err != nil {
return errors.Wrap(err, "error during subscription trigger")
}
return nil
}
// Init initialize the Delivery.
func (d *Delivery) Init(options ...func(*Delivery)) error {
for _, option := range options {
option(d)
}
if d.pusher == nil {
return errors.New("pusher not found")
}
if d.resourceRepository == nil {
return errors.New("resource repository not found")
}
if d.subscriptionRepository == nil {
return errors.New("subscription repository not found")
}
if d.httpClient == nil {
return errors.New("httpClient not found")
}
return nil
}
func (d *Delivery) marshal(
subscription *flare.Subscription, document *flare.Document, action string,
) ([]byte, error) {
id, err := infraURL.String(document.ID)
if err != nil {
return nil, errors.Wrap(err, "error during document.ID unmarshal")
}
rawContent := struct {
Action string `json:"action"`
DocumentID string `json:"documentID"`
ResourceID string `json:"resourceID"`
SubscriptionID string `json:"subscriptionID"`
}{
Action: action,
DocumentID: id,
ResourceID: document.Resource.ID,
SubscriptionID: subscription.ID,
}
content, err := json.Marshal(rawContent)
if err != nil {
return nil, errors.Wrap(err, "error during message marshal")
}
return content, nil
}
func (d *Delivery) unmarshal(
rawContent []byte,
) (*flare.Subscription, *flare.Document, string, error) {
type content struct {
Action string `json:"action"`
DocumentID string `json:"documentID"`
ResourceID string `json:"resourceID"`
SubscriptionID string `json:"subscriptionID"`
}
var value content
if err := json.Unmarshal(rawContent, &value); err != nil {
return nil, nil, "", errors.Wrap(err, "error during content unmarshal")
}
id, err := url.Parse(value.DocumentID)
if err != nil {
return nil, nil, "", errors.Wrap(err, "error during parse documentID")
}
return &flare.Subscription{ID: value.SubscriptionID},
&flare.Document{ID: *id, Resource: flare.Resource{ID: value.ResourceID}},
value.Action,
nil
}
func (d *Delivery) buildContent(
resource *flare.Resource,
document *flare.Document,
documentEndpoint *url.URL,
sub flare.Subscription,
kind string,
) ([]byte, error) {
var content map[string]interface{}
if !sub.Content.Envelope {
content = document.Content
} else {
id, err := infraURL.String(document.ID)
if err != nil {
return nil, errors.Wrap(err, "error during document.ID unmarshal")
}
content = map[string]interface{}{
"id": id,
"action": kind,
"updatedAt": document.UpdatedAt.String(),
}
if len(sub.Data) > 0 {
values := wildcard.ExtractValue(resource.Endpoint.Path, documentEndpoint.Path)
for key, rawValue := range sub.Data {
value, ok := rawValue.(string)
if !ok {
continue
}
sub.Data[key] = wildcard.Replace(value, values)
}
content["data"] = sub.Data
}
if sub.Content.Document {
content["document"] = document.Content
}
}
result, err := json.Marshal(content)
if err != nil {
return nil, errors.Wrap(err, "error during response generate")
}
return result, nil
}
func (d *Delivery) trigger(
ctx context.Context,
document *flare.Document,
subscription *flare.Subscription,
action string,
) error {
req, err := d.buildRequest(ctx, document, subscription, action)
if err != nil {
return err
}
resp, err := d.httpClient.Do(req)
if err != nil {
return errors.Wrap(err, "error during http request")
}
for _, status := range subscription.Delivery.Success {
if status == resp.StatusCode {
return nil
}
}
for _, status := range subscription.Delivery.Discard {
if status == resp.StatusCode {
return nil
}
}
return errors.Errorf(
"success and discard status don't match with the response value '%d'", resp.StatusCode,
)
}
func (d *Delivery) buildRequest(
ctx context.Context,
document *flare.Document,
subscription *flare.Subscription,
action string,
) (*http.Request, error) {
resource, err := d.resourceRepository.FindByID(ctx, document.Resource.ID)
if err != nil {
return nil, err
}
content, err := d.buildContent(resource, document, &document.ID, *subscription, action)
if err != nil {
return nil, errors.Wrap(err, "error during content build")
}
rawAddr := subscription.Endpoint.URL
headers := subscription.Endpoint.Headers
method := subscription.Endpoint.Method
endpointAction, ok := subscription.Endpoint.Action[action]
if ok {
if endpointAction.Method != "" {
method = endpointAction.Method
}
if len(endpointAction.Headers) > 0 {
headers = endpointAction.Headers
}
if endpointAction.URL != nil {
rawAddr = endpointAction.URL
}
}
addr, err := d.buildEndpoint(resource, &document.ID, rawAddr)
if err != nil {
return nil, errors.Wrap(err, "error during endpoint generate")
}
req, err := d.buildRequestHTTP(ctx, content, addr, method, headers)
if err != nil {
return nil, err
}
return req, nil
}
func (d *Delivery) buildRequestHTTP(
ctx context.Context,
content []byte,
addr, method string,
headers http.Header,
) (*http.Request, error) {
buf := bytes.NewBuffer(content)
req, err := http.NewRequest(method, addr, buf)
if err != nil {
return nil, errors.Wrap(err, "error during http request create")
}
req = req.WithContext(ctx)
req.Header = headers
if req.Header == nil {
req.Header = make(http.Header)
}
contentType := req.Header.Get("content-type")
if contentType == "" && len(content) > 0 {
req.Header.Add("Content-Type", "application/json")
}
return req, nil
}
func (d *Delivery) buildEndpoint(
resource *flare.Resource,
endpoint *url.URL,
rawSubscriptionEndpoint fmt.Stringer,
) (string, error) {
values := wildcard.ExtractValue(resource.Endpoint.Path, endpoint.Path)
subscriptionEndpoint, err := url.QueryUnescape(rawSubscriptionEndpoint.String())
if err != nil {
return "", errors.Wrap(err, "error during subscription endpoint unescape")
}
return wildcard.Replace(subscriptionEndpoint, values), nil
}
// DeliveryResourceRepository set the resource repository.
func DeliveryResourceRepository(repository flare.ResourceRepositorier) func(*Delivery) {
return func(d *Delivery) { d.resourceRepository = repository }
}
// DeliverySubscriptionRepository set the subscription repository.
func DeliverySubscriptionRepository(repository flare.SubscriptionRepositorier) func(*Delivery) {
return func(d *Delivery) { d.subscriptionRepository = repository }
}
// DeliveryPusher set the output of the messages.
func DeliveryPusher(pusher worker.Pusher) func(*Delivery) {
return func(d *Delivery) { d.pusher = pusher }
}
// DeliveryHTTPClient set the default HTTP client to send the document changes.
func DeliveryHTTPClient(client *http.Client) func(*Delivery) {
return func(d *Delivery) { d.httpClient = client }
}
|
package controller
import (
"coconut/middleware"
"coconut/model"
"coconut/util"
"coconut/serializer"
"net/http"
"github.com/gin-gonic/gin"
)
func CreateUser(c *gin.Context) {
v := model.UserValidator{}
if err := v.Bind(c); err != nil {
c.JSON(http.StatusUnprocessableEntity, util.NewValidatorError(err))
return
}
if err := model.SaveData(&v.UserModel); err != nil {
c.JSON(http.StatusUnprocessableEntity, gin.H{"status": http.StatusUnprocessableEntity, "message": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"id": v.UserModel.ID,
"name": v.UserModel.Username,
})
}
func GetUser(c *gin.Context) {
username := c.Query("username")
password := c.Query("password")
user, err := model.FindUserByName(username)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"status": http.StatusNotFound, "message": "no user found"})
} else {
if user.CheckPassword(password) != nil {
c.JSON(http.StatusUnauthorized, gin.H{"message": "密码错误"})
} else {
c.JSON(http.StatusOK, gin.H{"email": user.Email})
}
}
}
func UserLogin(c *gin.Context) {
v := model.LoginValidator{}
if err := v.Bind(c); err != nil {
c.JSON(http.StatusUnprocessableEntity, util.NewValidatorError(err))
return
}
user, err := model.FindUserByEmail(v.UserModel.Email)
if err != nil {
c.JSON(http.StatusForbidden, gin.H{"message": "Not Registered email or invalid password"})
return
}
if user.CheckPassword(v.UserTmp.Password) != nil {
c.JSON(http.StatusForbidden, gin.H{"message": "Invalid password"})
return
}
middleware.UpdateContextCurrentUser(c, user.ID)
serializer := serializer.UserSerializer{c}
c.JSON(http.StatusOK, gin.H{"user": serializer.Response()})
}
|
package sobjects
var SObjectsImplementations = map[string]SObject {
"AcceptedEventRelation": &AcceptedEventRelation{},
"Account": &Account{},
"AccountCleanInfo": &AccountCleanInfo{},
"AccountContactRole": &AccountContactRole{},
"AccountFeed": &AccountFeed{},
"AccountHistory": &AccountHistory{},
"AccountPartner": &AccountPartner{},
"AccountShare": &AccountShare{},
"ActionLinkGroupTemplate": &ActionLinkGroupTemplate{},
"ActionLinkTemplate": &ActionLinkTemplate{},
"ActivityHistory": &ActivityHistory{},
"AdditionalNumber": &AdditionalNumber{},
"AggregateResult": &AggregateResult{},
"Announcement": &Announcement{},
"ApexClass": &ApexClass{},
"ApexComponent": &ApexComponent{},
"ApexEmailNotification": &ApexEmailNotification{},
"ApexLog": &ApexLog{},
"ApexPage": &ApexPage{},
"ApexPageInfo": &ApexPageInfo{},
"ApexTestQueueItem": &ApexTestQueueItem{},
"ApexTestResult": &ApexTestResult{},
"ApexTestResultLimits": &ApexTestResultLimits{},
"ApexTestRunResult": &ApexTestRunResult{},
"ApexTestSuite": &ApexTestSuite{},
"ApexTrigger": &ApexTrigger{},
"AppDefinition": &AppDefinition{},
"AppMenuItem": &AppMenuItem{},
"AppTabMember": &AppTabMember{},
"Asset": &Asset{},
"AssetFeed": &AssetFeed{},
"AssetHistory": &AssetHistory{},
"AssetRelationship": &AssetRelationship{},
"AssetRelationshipFeed": &AssetRelationshipFeed{},
"AssetRelationshipHistory": &AssetRelationshipHistory{},
"AssetShare": &AssetShare{},
"AssetTokenEvent": &AssetTokenEvent{},
"AssignmentRule": &AssignmentRule{},
"AsyncApexJob": &AsyncApexJob{},
"AttachedContentDocument": &AttachedContentDocument{},
"Attachment": &Attachment{},
"AuraDefinition": &AuraDefinition{},
"AuraDefinitionBundle": &AuraDefinitionBundle{},
"AuraDefinitionBundleInfo": &AuraDefinitionBundleInfo{},
"AuraDefinitionInfo": &AuraDefinitionInfo{},
"AuthConfig": &AuthConfig{},
"AuthConfigProviders": &AuthConfigProviders{},
"AuthProvider": &AuthProvider{},
"AuthSession": &AuthSession{},
"BackgroundOperation": &BackgroundOperation{},
"BrandTemplate": &BrandTemplate{},
"BusinessHours": &BusinessHours{},
"BusinessProcess": &BusinessProcess{},
"CallCenter": &CallCenter{},
"Campaign": &Campaign{},
"CampaignFeed": &CampaignFeed{},
"CampaignHistory": &CampaignHistory{},
"CampaignMember": &CampaignMember{},
"CampaignMemberStatus": &CampaignMemberStatus{},
"CampaignShare": &CampaignShare{},
"Case": &Case{},
"CaseComment": &CaseComment{},
"CaseContactRole": &CaseContactRole{},
"CaseFeed": &CaseFeed{},
"CaseHistory": &CaseHistory{},
"CaseShare": &CaseShare{},
"CaseSolution": &CaseSolution{},
"CaseStatus": &CaseStatus{},
"CaseTeamMember": &CaseTeamMember{},
"CaseTeamRole": &CaseTeamRole{},
"CaseTeamTemplate": &CaseTeamTemplate{},
"CaseTeamTemplateMember": &CaseTeamTemplateMember{},
"CaseTeamTemplateRecord": &CaseTeamTemplateRecord{},
"CategoryData": &CategoryData{},
"CategoryNode": &CategoryNode{},
"ChatterActivity": &ChatterActivity{},
"ChatterExtension": &ChatterExtension{},
"ChatterExtensionConfig": &ChatterExtensionConfig{},
"ClientBrowser": &ClientBrowser{},
"CollaborationGroup": &CollaborationGroup{},
"CollaborationGroupFeed": &CollaborationGroupFeed{},
"CollaborationGroupMember": &CollaborationGroupMember{},
"CollaborationGroupMemberRequest": &CollaborationGroupMemberRequest{},
"CollaborationGroupRecord": &CollaborationGroupRecord{},
"CollaborationInvitation": &CollaborationInvitation{},
"ColorDefinition": &ColorDefinition{},
"CombinedAttachment": &CombinedAttachment{},
"Community": &Community{},
"ConnectedApplication": &ConnectedApplication{},
"Contact": &Contact{},
"ContactCleanInfo": &ContactCleanInfo{},
"ContactFeed": &ContactFeed{},
"ContactHistory": &ContactHistory{},
"ContactShare": &ContactShare{},
"ContentAsset": &ContentAsset{},
"ContentBody": &ContentBody{},
"ContentDistribution": &ContentDistribution{},
"ContentDistributionView": &ContentDistributionView{},
"ContentDocument": &ContentDocument{},
"ContentDocumentFeed": &ContentDocumentFeed{},
"ContentDocumentHistory": &ContentDocumentHistory{},
"ContentDocumentLink": &ContentDocumentLink{},
"ContentDocumentSubscription": &ContentDocumentSubscription{},
"ContentFolder": &ContentFolder{},
"ContentFolderItem": &ContentFolderItem{},
"ContentFolderLink": &ContentFolderLink{},
"ContentFolderMember": &ContentFolderMember{},
"ContentNotification": &ContentNotification{},
"ContentTagSubscription": &ContentTagSubscription{},
"ContentUserSubscription": &ContentUserSubscription{},
"ContentVersion": &ContentVersion{},
"ContentVersionComment": &ContentVersionComment{},
"ContentVersionHistory": &ContentVersionHistory{},
"ContentVersionRating": &ContentVersionRating{},
"ContentWorkspace": &ContentWorkspace{},
"ContentWorkspaceDoc": &ContentWorkspaceDoc{},
"ContentWorkspaceMember": &ContentWorkspaceMember{},
"ContentWorkspacePermission": &ContentWorkspacePermission{},
"ContentWorkspaceSubscription": &ContentWorkspaceSubscription{},
"Contract": &Contract{},
"ContractContactRole": &ContractContactRole{},
"ContractFeed": &ContractFeed{},
"ContractHistory": &ContractHistory{},
"ContractStatus": &ContractStatus{},
"CorsWhitelistEntry": &CorsWhitelistEntry{},
"CronJobDetail": &CronJobDetail{},
"CronTrigger": &CronTrigger{},
"CspTrustedSite": &CspTrustedSite{},
"CustomBrand": &CustomBrand{},
"CustomBrandAsset": &CustomBrandAsset{},
"CustomHttpHeader": &CustomHttpHeader{},
"CustomObjectUserLicenseMetrics": &CustomObjectUserLicenseMetrics{},
"CustomPermission": &CustomPermission{},
"CustomPermissionDependency": &CustomPermissionDependency{},
"DandBCompany": &DandBCompany{},
"Dashboard": &Dashboard{},
"DashboardComponent": &DashboardComponent{},
"DashboardComponentFeed": &DashboardComponentFeed{},
"DashboardFeed": &DashboardFeed{},
"DataAssessmentFieldMetric": &DataAssessmentFieldMetric{},
"DataAssessmentMetric": &DataAssessmentMetric{},
"DataAssessmentValueMetric": &DataAssessmentValueMetric{},
"DataStatistics": &DataStatistics{},
"DataType": &DataType{},
"DatacloudAddress": &DatacloudAddress{},
"DatacloudCompany": &DatacloudCompany{},
"DatacloudContact": &DatacloudContact{},
"DatacloudDandBCompany": &DatacloudDandBCompany{},
"DatacloudOwnedEntity": &DatacloudOwnedEntity{},
"DatacloudPurchaseUsage": &DatacloudPurchaseUsage{},
"DatasetExport": &DatasetExport{},
"DatasetExportEvent": &DatasetExportEvent{},
"DatasetExportPart": &DatasetExportPart{},
"DeclinedEventRelation": &DeclinedEventRelation{},
"Document": &Document{},
"DocumentAttachmentMap": &DocumentAttachmentMap{},
"Domain": &Domain{},
"DomainSite": &DomainSite{},
"DuplicateRecordItem": &DuplicateRecordItem{},
"DuplicateRecordSet": &DuplicateRecordSet{},
"DuplicateRule": &DuplicateRule{},
"EmailCapture": &EmailCapture{},
"EmailDomainKey": &EmailDomainKey{},
"EmailMessage": &EmailMessage{},
"EmailMessageRelation": &EmailMessageRelation{},
"EmailServicesAddress": &EmailServicesAddress{},
"EmailServicesFunction": &EmailServicesFunction{},
"EmailStatus": &EmailStatus{},
"EmailTemplate": &EmailTemplate{},
"EmbeddedServiceDetail": &EmbeddedServiceDetail{},
"EntityDefinition": &EntityDefinition{},
"EntityParticle": &EntityParticle{},
"EntitySubscription": &EntitySubscription{},
"Event": &Event{},
"EventBusSubscriber": &EventBusSubscriber{},
"EventFeed": &EventFeed{},
"EventLogFile": &EventLogFile{},
"EventRelation": &EventRelation{},
"ExternalDataSource": &ExternalDataSource{},
"ExternalDataUserAuth": &ExternalDataUserAuth{},
"FeedAttachment": &FeedAttachment{},
"FeedComment": &FeedComment{},
"FeedItem": &FeedItem{},
"FeedLike": &FeedLike{},
"FeedPollChoice": &FeedPollChoice{},
"FeedPollVote": &FeedPollVote{},
"FeedRevision": &FeedRevision{},
"FeedSignal": &FeedSignal{},
"FeedTrackedChange": &FeedTrackedChange{},
"FieldDefinition": &FieldDefinition{},
"FieldPermissions": &FieldPermissions{},
"FileSearchActivity": &FileSearchActivity{},
"FiscalYearSettings": &FiscalYearSettings{},
"FlexQueueItem": &FlexQueueItem{},
"FlowInterview": &FlowInterview{},
"FlowInterviewShare": &FlowInterviewShare{},
"FlowRecordRelation": &FlowRecordRelation{},
"FlowStageRelation": &FlowStageRelation{},
"Folder": &Folder{},
"FolderedContentDocument": &FolderedContentDocument{},
"ForecastShare": &ForecastShare{},
"GrantedByLicense": &GrantedByLicense{},
"Group": &Group{},
"GroupMember": &GroupMember{},
"Holiday": &Holiday{},
"IconDefinition": &IconDefinition{},
"Idea": &Idea{},
"IdeaComment": &IdeaComment{},
"IdpEventLog": &IdpEventLog{},
"InstalledMobileApp": &InstalledMobileApp{},
"KnowledgeableUser": &KnowledgeableUser{},
"Lead": &Lead{},
"LeadCleanInfo": &LeadCleanInfo{},
"LeadFeed": &LeadFeed{},
"LeadHistory": &LeadHistory{},
"LeadShare": &LeadShare{},
"LeadStatus": &LeadStatus{},
"LightningComponentBundle": &LightningComponentBundle{},
"LightningComponentResource": &LightningComponentResource{},
"LightningComponentTag": &LightningComponentTag{},
"LightningToggleMetrics": &LightningToggleMetrics{},
"LightningUsageByAppTypeMetrics": &LightningUsageByAppTypeMetrics{},
"LightningUsageByBrowserMetrics": &LightningUsageByBrowserMetrics{},
"LightningUsageByFlexiPageMetrics": &LightningUsageByFlexiPageMetrics{},
"LightningUsageByPageMetrics": &LightningUsageByPageMetrics{},
"ListEmail": &ListEmail{},
"ListEmailRecipientSource": &ListEmailRecipientSource{},
"ListEmailShare": &ListEmailShare{},
"ListView": &ListView{},
"ListViewChart": &ListViewChart{},
"ListViewChartInstance": &ListViewChartInstance{},
"LoginGeo": &LoginGeo{},
"LoginHistory": &LoginHistory{},
"LoginIp": &LoginIp{},
"LookedUpFromActivity": &LookedUpFromActivity{},
"Macro": &Macro{},
"MacroHistory": &MacroHistory{},
"MacroInstruction": &MacroInstruction{},
"MacroShare": &MacroShare{},
"MailmergeTemplate": &MailmergeTemplate{},
"MatchingRule": &MatchingRule{},
"MatchingRuleItem": &MatchingRuleItem{},
"Name": &Name{},
"NamedCredential": &NamedCredential{},
"Note": &Note{},
"NoteAndAttachment": &NoteAndAttachment{},
"OauthToken": &OauthToken{},
"ObjectPermissions": &ObjectPermissions{},
"OpenActivity": &OpenActivity{},
"Opportunity": &Opportunity{},
"OpportunityCompetitor": &OpportunityCompetitor{},
"OpportunityContactRole": &OpportunityContactRole{},
"OpportunityFeed": &OpportunityFeed{},
"OpportunityFieldHistory": &OpportunityFieldHistory{},
"OpportunityHistory": &OpportunityHistory{},
"OpportunityLineItem": &OpportunityLineItem{},
"OpportunityPartner": &OpportunityPartner{},
"OpportunityShare": &OpportunityShare{},
"OpportunityStage": &OpportunityStage{},
"Order": &Order{},
"OrderFeed": &OrderFeed{},
"OrderHistory": &OrderHistory{},
"OrderItem": &OrderItem{},
"OrderItemFeed": &OrderItemFeed{},
"OrderItemHistory": &OrderItemHistory{},
"OrderShare": &OrderShare{},
"OrgDeleteRequest": &OrgDeleteRequest{},
"OrgDeleteRequestShare": &OrgDeleteRequestShare{},
"OrgLifecycleNotification": &OrgLifecycleNotification{},
"OrgWideEmailAddress": &OrgWideEmailAddress{},
"Organization": &Organization{},
"OutgoingEmail": &OutgoingEmail{},
"OutgoingEmailRelation": &OutgoingEmailRelation{},
"OwnedContentDocument": &OwnedContentDocument{},
"OwnerChangeOptionInfo": &OwnerChangeOptionInfo{},
"PackageLicense": &PackageLicense{},
"Partner": &Partner{},
"PartnerRole": &PartnerRole{},
"Period": &Period{},
"PermissionSet": &PermissionSet{},
"PermissionSetAssignment": &PermissionSetAssignment{},
"PermissionSetLicense": &PermissionSetLicense{},
"PermissionSetLicenseAssign": &PermissionSetLicenseAssign{},
"PicklistValueInfo": &PicklistValueInfo{},
"PlatformAction": &PlatformAction{},
"PlatformCachePartition": &PlatformCachePartition{},
"PlatformCachePartitionType": &PlatformCachePartitionType{},
"Pricebook2": &Pricebook2{},
"Pricebook2History": &Pricebook2History{},
"PricebookEntry": &PricebookEntry{},
"ProcessDefinition": &ProcessDefinition{},
"ProcessInstance": &ProcessInstance{},
"ProcessInstanceHistory": &ProcessInstanceHistory{},
"ProcessInstanceNode": &ProcessInstanceNode{},
"ProcessInstanceStep": &ProcessInstanceStep{},
"ProcessInstanceWorkitem": &ProcessInstanceWorkitem{},
"ProcessNode": &ProcessNode{},
"Product2": &Product2{},
"Product2Feed": &Product2Feed{},
"Product2History": &Product2History{},
"Profile": &Profile{},
"Publisher": &Publisher{},
"PushTopic": &PushTopic{},
"QueueSobject": &QueueSobject{},
"QuickText": &QuickText{},
"QuickTextHistory": &QuickTextHistory{},
"QuickTextShare": &QuickTextShare{},
"QuoteTemplateRichTextData": &QuoteTemplateRichTextData{},
"RecentlyViewed": &RecentlyViewed{},
"RecordAction": &RecordAction{},
"RecordType": &RecordType{},
"RelationshipDomain": &RelationshipDomain{},
"RelationshipInfo": &RelationshipInfo{},
"Report": &Report{},
"ReportFeed": &ReportFeed{},
"SamlSsoConfig": &SamlSsoConfig{},
"Scontrol": &Scontrol{},
"SearchActivity": &SearchActivity{},
"SearchLayout": &SearchLayout{},
"SearchPromotionRule": &SearchPromotionRule{},
"SecureAgent": &SecureAgent{},
"SecureAgentPlugin": &SecureAgentPlugin{},
"SecureAgentPluginProperty": &SecureAgentPluginProperty{},
"SecureAgentsCluster": &SecureAgentsCluster{},
"SecurityCustomBaseline": &SecurityCustomBaseline{},
"SessionPermSetActivation": &SessionPermSetActivation{},
"SetupAuditTrail": &SetupAuditTrail{},
"SetupEntityAccess": &SetupEntityAccess{},
"Site": &Site{},
"SiteFeed": &SiteFeed{},
"SiteHistory": &SiteHistory{},
"Solution": &Solution{},
"SolutionFeed": &SolutionFeed{},
"SolutionHistory": &SolutionHistory{},
"SolutionStatus": &SolutionStatus{},
"Stamp": &Stamp{},
"StampAssignment": &StampAssignment{},
"StaticResource": &StaticResource{},
"StreamingChannel": &StreamingChannel{},
"StreamingChannelShare": &StreamingChannelShare{},
"TabDefinition": &TabDefinition{},
"Task": &Task{},
"TaskFeed": &TaskFeed{},
"TaskPriority": &TaskPriority{},
"TaskStatus": &TaskStatus{},
"TenantUsageEntitlement": &TenantUsageEntitlement{},
"TestSuiteMembership": &TestSuiteMembership{},
"ThirdPartyAccountLink": &ThirdPartyAccountLink{},
"TodayGoal": &TodayGoal{},
"TodayGoalShare": &TodayGoalShare{},
"Topic": &Topic{},
"TopicAssignment": &TopicAssignment{},
"TopicFeed": &TopicFeed{},
"TopicUserEvent": &TopicUserEvent{},
"TransactionSecurityPolicy": &TransactionSecurityPolicy{},
"UndecidedEventRelation": &UndecidedEventRelation{},
"User": &User{},
"UserAppInfo": &UserAppInfo{},
"UserAppMenuCustomization": &UserAppMenuCustomization{},
"UserAppMenuCustomizationShare": &UserAppMenuCustomizationShare{},
"UserAppMenuItem": &UserAppMenuItem{},
"UserEntityAccess": &UserEntityAccess{},
"UserFeed": &UserFeed{},
"UserFieldAccess": &UserFieldAccess{},
"UserLicense": &UserLicense{},
"UserListView": &UserListView{},
"UserListViewCriterion": &UserListViewCriterion{},
"UserLogin": &UserLogin{},
"UserPackageLicense": &UserPackageLicense{},
"UserPermissionAccess": &UserPermissionAccess{},
"UserPreference": &UserPreference{},
"UserProvAccount": &UserProvAccount{},
"UserProvAccountStaging": &UserProvAccountStaging{},
"UserProvMockTarget": &UserProvMockTarget{},
"UserProvisioningConfig": &UserProvisioningConfig{},
"UserProvisioningLog": &UserProvisioningLog{},
"UserProvisioningRequest": &UserProvisioningRequest{},
"UserProvisioningRequestShare": &UserProvisioningRequestShare{},
"UserRecordAccess": &UserRecordAccess{},
"UserRole": &UserRole{},
"UserShare": &UserShare{},
"VerificationHistory": &VerificationHistory{},
"VisualforceAccessMetrics": &VisualforceAccessMetrics{},
"Vote": &Vote{},
"WaveAutoInstallRequest": &WaveAutoInstallRequest{},
"WaveCompatibilityCheckItem": &WaveCompatibilityCheckItem{},
"WebLink": &WebLink{},
}
|
package v1beta7
import (
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/config"
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/util"
next "github.com/devspace-cloud/devspace/pkg/devspace/config/versions/v1beta8"
"github.com/devspace-cloud/devspace/pkg/util/log"
)
// Upgrade upgrades the config
func (c *Config) Upgrade(log log.Logger) (config.Config, error) {
nextConfig := &next.Config{}
err := util.Convert(c, nextConfig)
if err != nil {
return nil, err
}
// Kaniko: Flags -> Args
// Kubectl: Flags -> ApplyArgs
// Convert image configs
for key, value := range c.Images {
if value == nil {
continue
}
if value.Build != nil {
if value.Build.Kaniko != nil && len(value.Build.Kaniko.Flags) > 0 {
if nextConfig.Images[key].Build == nil {
nextConfig.Images[key].Build = &next.BuildConfig{}
}
if nextConfig.Images[key].Build.Kaniko == nil {
nextConfig.Images[key].Build.Kaniko = &next.KanikoConfig{}
}
nextConfig.Images[key].Build.Kaniko.Args = value.Build.Kaniko.Flags
}
}
}
// Convert deployment configs
for idx, value := range c.Deployments {
if value == nil {
continue
}
if value.Kubectl != nil && len(value.Kubectl.Flags) > 0 {
nextConfig.Deployments[idx].Kubectl.ApplyArgs = value.Kubectl.Flags
}
}
return nextConfig, nil
}
// UpgradeVarPaths upgrades the config
func (c *Config) UpgradeVarPaths(varPaths map[string]string, log log.Logger) error {
return nil
}
|
package model
func (i *InstanceStruct) IsMyName(name string) bool {
if i.InstanceAlias != "" && name == i.InstanceAlias {
return true
}
if i.InstanceName == name {
return true
}
return false
}
|
package admin
import (
md "github.com/ebikode/eLearning-core/model"
)
// ValidationFields struct to return for validation
type ValidationFields struct {
Phone string `json:"phone,omitempty"`
FirstName string `json:"first_name,omitempty"`
LastName string `json:"last_name,omitempty"`
Email string `json:"email,omitempty"`
Password string `json:"password,omitempty"`
Role string `json:"role,omitempty"`
Avatar string `json:"avatar,omitempty"`
}
// AdminRepository provides access to the admin storage.
type AdminRepository interface {
// Check if a default admin has been created.
CheckAdminCreated() bool
// Get returns the admin with given ID.
Get(string) *md.Admin
// GetDashbordData returns the admin Dashboar data.
GetDashbordData() *md.DashbordData
// Authenticate a admin
Authenticate(string) (*md.Admin, error)
// Saves a given admin to the repository.
Store(md.Admin) (md.Admin, error)
// Update a given admin in the repository.
Update(*md.Admin) (*md.Admin, error)
// Delete a given admin in the repository.
Delete(md.Admin, bool) (bool, error)
}
|
package controllers
import (
"github.com/astaxie/beego"
"BeegoTest/models"
"fmt"
"strconv"
)
type ApplicationsController struct {
beego.Controller
}
func (App *ApplicationsController) URLMapping(){
App.Mapping("List", App.List)
}
// @router /apps [get]
func (App *ApplicationsController) List(){
apps, err := models.GetFrist10()
if err != nil {
beego.Critical("Error: ", err)
}
App.Ctx.Output.JSON(apps, false, false)
}
// @router /apps/panic [get]
func (App *ApplicationsController) TestPanic(){
defer func (){
beego.Info("defer 1")
}()
defer func (){
if r := recover(); r != nil {
beego.Info("recover with:", r)
App.Ctx.Output.JSON(r, false, false)
}
beego.Info("defer with recover")
}()
defer func (){
beego.Info("defer 2")
}()
panic("Panic test")
}
// @router /apps/:id [get]
func (App *ApplicationsController) GetOne(){
id, _ := strconv.Atoi(App.Ctx.Input.Param(":id"))
app, err := models.GetOneById(id)
fmt.Println(app)
if err != nil {
}
App.Ctx.Output.JSON(app, false, false)
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"github.com/gdhagger/go-buildkite/buildkite"
joonix "github.com/joonix/log"
log "github.com/sirupsen/logrus"
flag "github.com/spf13/pflag"
"gopkg.in/yaml.v1"
)
var apiToken string
var org string
var configFile string
var logLevel string
var logFormat string
var client *buildkite.Client
type pipeline struct {
Name string `yaml:"name"`
Repository string `yaml:"repository"`
Steps []buildkite.Step `yaml:"steps"`
ProviderSettings *buildkite.GitHubSettings `yaml:"provider_settings"`
}
func (p pipeline) asCreatePipeline() *buildkite.CreatePipeline {
r := buildkite.CreatePipeline{
Name: p.Name,
Repository: p.Repository,
Steps: make([]buildkite.Step, len(p.Steps)),
ProviderSettings: p.ProviderSettings,
}
for i := range p.Steps {
r.Steps[i] = p.Steps[i]
}
return &r
}
func (p pipeline) asUpdatedPipeline(r *buildkite.Pipeline) *buildkite.Pipeline {
for i := range p.Steps {
r.Steps[i] = &p.Steps[i]
}
r.Provider.Settings = p.ProviderSettings
return r
}
type autoconf struct {
Pipelines []*pipeline `yaml:"pipelines"`
}
func init() {
flag.StringVar(&apiToken, "token", "", "Buildkite API token")
flag.StringVar(&org, "org", "", "Buildkite organisation slug")
flag.StringVar(&configFile, "config", ".buildkite/autoconf.yaml", "Configuration file")
flag.StringVar(&logLevel, "log-level", log.DebugLevel.String(), "Logging level")
flag.StringVar(&logFormat, "log-format", "fluentd", "Logging format, one of text, json or fluentd")
flag.Parse()
switch logFormat {
case "text":
log.SetFormatter(&log.TextFormatter{})
case "json":
log.SetFormatter(&log.JSONFormatter{})
case "fluentd":
log.SetFormatter(&joonix.FluentdFormatter{})
}
level, err := log.ParseLevel(logLevel)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
log.SetLevel(level)
config, err := buildkite.NewTokenConfig(apiToken, true)
if err != nil {
log.Fatalf("client config failed: %s", err)
}
client = buildkite.NewClient(config.Client())
}
func main() {
contextLog := log.WithFields(log.Fields{
"filename": configFile,
})
contextLog.Debug("Opening config file")
autoconfFile, _ := os.Open(configFile)
defer autoconfFile.Close()
autoconfBytes, _ := ioutil.ReadAll(autoconfFile)
var autoconfData autoconf
contextLog.Debug("Unmarshalling config YAML")
_ = yaml.Unmarshal(autoconfBytes, &autoconfData)
for _, p := range autoconfData.Pipelines {
contextLog := contextLog.WithField("pipeline", p.Name)
contextLog.Debug("Checking for existing pipeline")
existingPipe, _, _ := client.Pipelines.Get(org, p.Name)
if existingPipe != nil {
up := p.asUpdatedPipeline(existingPipe)
upJSON, _ := json.Marshal(up)
contextLog.WithField("pipeline_data", string(upJSON)).Debug("Updating existing pipeline")
_, err := client.Pipelines.Update(org, up)
if err != nil {
contextLog.Error(err)
contextLog.Error(string(upJSON))
}
} else {
cp := p.asCreatePipeline()
cpJSON, _ := json.Marshal(cp)
contextLog.WithField("pipeline_data", string(cpJSON)).Debug("Creating new pipeline")
_, _, err := client.Pipelines.Create(org, cp)
if err != nil {
contextLog.Error(err)
contextLog.Error(string(cpJSON))
}
}
}
}
|
package logger
import (
"fmt"
"io"
"os"
"sync"
"time"
"github.com/fatih/color"
)
// LogLevel is The severity level of the logs
// 0 is the lowest severity
type LogLevel int
// LogDispatcher dispatches logs to multiple Loggers
type LogDispatcher interface {
Log(level LogLevel, msg string)
Register(name string, backend Logger)
Unregister(name string)
}
// A Logger formats and writes log entries
type Logger interface {
LogFormatter
LogWriter
Log(timestamp time.Time, level LogLevel, msg string)
SetLevel(level LogLevel)
}
const (
// LevelCritical represents the Critical Log Level
LevelCritical = 100
// LevelError represents the Error log level
LevelError = 200
// LevelWarning represents the Warning log level
LevelWarning = 300
// LevelInfo represents the Informational logging level
LevelInfo = 400
// LevelDebug represents the Debug log level
LevelDebug = 500
// LevelStderr represents the Stderr log level
LevelStderr = LevelWarning + 6
// LevelStdout represents the Stdout log level
LevelStdout = LevelWarning + 7
// LevelSkip represents the Skip log level
LevelSkip = LevelWarning + 1
// LevelPass represents the Pass log level
LevelPass = LevelWarning + 2
// LevelCancel represents the Cancel log level
LevelCancel = LevelWarning + 3
// LevelFail represents the Fail log level
LevelFail = LevelWarning + 4
// LevelSummary represents the Summary log level
LevelSummary = LevelWarning + 5
)
// LevelNames maps LogLevels to a string representation of their names
var LevelNames = map[LogLevel]string{
LevelCritical: "CRITICAL",
LevelError: "ERROR",
LevelWarning: "WARNING",
LevelInfo: "INFO",
LevelDebug: "DEBUG",
LevelStderr: "STDERR",
LevelStdout: "STDOUT",
LevelSkip: "SKIP",
LevelPass: "PASS",
LevelCancel: "CANCEL",
LevelFail: "FAIL",
LevelSummary: "SUMMARY",
}
type logDispatcher struct {
Backends map[string]Logger
sync.RWMutex
}
// Log dispatches a Log entry to each backend
func (d *logDispatcher) Log(level LogLevel, msg string) {
d.RLock()
defer d.RUnlock()
timestamp := time.Now()
for _, b := range d.Backends {
b.Log(timestamp, level, msg)
}
}
func (d *logDispatcher) Register(name string, backend Logger) {
d.Lock()
defer d.Unlock()
d.Backends[name] = backend
}
func (d *logDispatcher) Unregister(name string) {
d.Lock()
defer d.Unlock()
delete(d.Backends, name)
}
// NewLogDispatcher returns a new LogDispatcher with the provided backend Loggers
func NewLogDispatcher(backends map[string]Logger) LogDispatcher {
return &logDispatcher{Backends: backends}
}
// LogFormatter formats log entries in to a string
type LogFormatter interface {
Format(timestamp time.Time, level LogLevel, msg string) string
}
// LogWriter writes log entries somewhere
type LogWriter interface {
Write(entry string)
}
type logger struct {
LogFormatter
LogWriter
levelFilter LogLevel
}
// Log formats a log entry and writes it
func (l *logger) Log(timestamp time.Time, level LogLevel, msg string) {
if level <= l.levelFilter {
entry := l.Format(timestamp, level, msg)
l.Write(entry)
}
}
// SetLevel sets maximum logging level
func (l *logger) SetLevel(level LogLevel) {
l.levelFilter = level
}
type ioLogWriter struct {
writer io.Writer
}
// Write writes to the underlying io.Writer
func (i ioLogWriter) Write(entry string) {
n, err := io.WriteString(i.writer, entry)
if err != nil {
panic(err)
}
if n == 0 {
panic("Wrote 0 bytes to file")
}
}
// ColourMap maps logLevels to colorizing functions
type ColourMap map[LogLevel]func(...interface{}) string
type consoleLogFormatter struct {
coloured bool
colourMap ColourMap
}
var defaultColourMap = ColourMap{
LevelCritical: color.New(color.FgRed, color.Bold).SprintFunc(),
LevelError: color.New(color.FgRed).SprintFunc(),
LevelWarning: color.New(color.FgYellow).SprintFunc(),
LevelInfo: color.New(color.FgBlue).SprintFunc(),
LevelDebug: color.New(color.FgWhite).SprintFunc(),
LevelStderr: color.New(color.FgRed).SprintFunc(),
LevelSkip: color.New(color.FgYellow, color.Bold).SprintFunc(),
LevelPass: color.New(color.FgGreen, color.Bold).SprintFunc(),
LevelCancel: color.New(color.FgMagenta, color.Bold).SprintFunc(),
LevelFail: color.New(color.FgRed, color.Bold).SprintFunc(),
}
// Format formats the log for writing to console
func (c consoleLogFormatter) Format(timestamp time.Time, level LogLevel, msg string) string {
l := fmt.Sprintf("[%-8s]", LevelNames[level])
if c.coloured {
if cFunc, ok := c.colourMap[level]; ok {
l = cFunc(l)
}
}
var s string
switch level {
case LevelPass, LevelFail, LevelSkip, LevelSummary, LevelCancel:
s = fmt.Sprintf("%s %s\n", l, msg)
default:
// Format is time.RFC3339Nano but with trailing zeroes preserved on the nanosecond field (s/9/0/)
s = fmt.Sprintf("%s %s: %s\n", l, timestamp.Format("2006-01-02T15:04:05.000000000Z07:00"), msg)
}
return s
}
// NewConsoleLogger returns a new logger that logs to stderr in console log format
func NewConsoleLogger(coloured bool, colourMap *ColourMap) Logger {
var clf LogFormatter
if coloured {
if colourMap == nil {
colourMap = &defaultColourMap
}
clf = consoleLogFormatter{
coloured: coloured,
colourMap: *colourMap,
}
} else {
clf = consoleLogFormatter{}
}
return &logger{
clf,
ioLogWriter{
writer: os.Stderr,
},
LevelWarning,
}
}
// NewFileLogger returns a new logger that logs to file in a console log format
func NewFileLogger(f *os.File) Logger {
return &logger{
consoleLogFormatter{
coloured: false,
},
ioLogWriter{
writer: f,
},
LevelWarning,
}
}
|
package msp
import (
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/factory"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/secp256k1"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/sw"
"bytes"
"crypto/elliptic"
"fmt"
"strconv"
)
const (
ECDSAP224 = 0
ECDSAP256 = 1
ECDSAP384 = 2
ECDSAP521 = 3
SM2 = 4
RSA1024 = 5
RSA2048 = 6
ED25519 = 7
)
//byte转16进制字符串
func ByteToHex(data []byte) string {
buffer := new(bytes.Buffer)
for _, b := range data {
s := strconv.FormatInt(int64(b&0xff), 16)
if len(s) == 1 {
buffer.WriteString("0")
}
buffer.WriteString(s)
}
return buffer.String()
}
//16进制字符串转[]byte
func HexToByte(hex string) []byte {
length := len(hex) / 2
slice := make([]byte, length)
rs := []rune(hex)
for i := 0; i < length; i++ {
s := string(rs[i*2 : i*2+2])
value, _ := strconv.ParseInt(s, 16, 10)
slice[i] = byte(value & 0xFF)
}
return slice
}
func GeneratePriKey(algType int) (bccsp.Key, error) {
bccspInstance := factory.GetDefault()
switch algType {
case ECDSAP256: //ECDSAP256-SHA256
priKey, err := bccspInstance.KeyGen(&bccsp.ECDSAP256K1KeyGenOpts{Temporary: true})
if err != nil {
return nil, err
}
return priKey, nil
default:
fmt.Printf("algType not support : %v\n", algType)
return nil, fmt.Errorf("algType not support")
}
return nil, nil
}
func GetPriKey(algType int, key bccsp.Key) (interface{}, error) {
switch algType {
case ECDSAP256:
priKey := key.(*sw.Ecdsa256K1PrivateKey).PrivKey
return priKey, nil
default:
fmt.Printf("algType not support : %v\n", algType)
return nil, fmt.Errorf("algType not support")
}
return nil, nil
}
func GetPriKeyBytes(algType int, key bccsp.Key) ([]byte, error) {
switch algType {
case ECDSAP256:
priKey := key.(*sw.Ecdsa256K1PrivateKey).PrivKey.D.Bytes()
return priKey, nil
default:
fmt.Printf("algType not support : %v\n", algType)
return nil, fmt.Errorf("algType not support")
}
return nil, nil
}
func GetPubKey(algType int, key bccsp.Key) (interface{}, error) {
switch algType {
case ECDSAP256:
pubKey := key.(*sw.Ecdsa256K1PublicKey).PubKey
return pubKey, nil
default:
fmt.Printf("algType not support : %v\n", algType)
return nil, fmt.Errorf("algType not support")
}
return nil, nil
}
func GetPubKeyBytes(algType int, key bccsp.Key) ([]byte, error) {
switch algType {
case ECDSAP256:
pk := key.(*sw.Ecdsa256K1PublicKey)
pubkeyBytes := elliptic.Marshal(secp256k1.S256(), pk.PubKey.X, pk.PubKey.Y)
return pubkeyBytes, nil
default:
fmt.Printf("algType not support : %v\n", algType)
return nil, fmt.Errorf("algType not support")
}
return nil, nil
}
|
package traefik_plugin_geoip2_test
import (
"context"
traefik_plugin_geoip2 "github.com/negasus/traefik-plugin-geoip2"
"net/http"
"net/http/httptest"
"testing"
)
func TestGeoIP2_WrongNew(t *testing.T) {
cfg := traefik_plugin_geoip2.CreateConfig()
ctx := context.Background()
_, err := traefik_plugin_geoip2.New(ctx, nil, cfg, "geoip2-plugin")
if err == nil {
t.Fatal("expect error")
}
}
func TestGeoIP2_Default(t *testing.T) {
cfg := traefik_plugin_geoip2.CreateConfig()
cfg.Filename = "GeoLite2-Country.mmdb"
ctx := context.Background()
next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {})
handler, err := traefik_plugin_geoip2.New(ctx, next, cfg, "geoip2-plugin")
if err != nil {
t.Fatal(err)
}
tests := []struct {
remoteAddr string
country string
}{
{remoteAddr: "4.0.0.0", country: "US"},
{remoteAddr: "109.194.11.1", country: "RU"},
{remoteAddr: "1.6.0.0", country: "IN"},
{remoteAddr: "2.0.0.0", country: "FR"},
{remoteAddr: "192.168.1.1", country: ""},
{remoteAddr: "127.0.0.1", country: ""},
{remoteAddr: "WRONG VALUE", country: ""},
}
for _, tt := range tests {
t.Run(tt.remoteAddr, func(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost", nil)
if err != nil {
t.Fatal(err)
}
req.RemoteAddr = tt.remoteAddr
handler.ServeHTTP(recorder, req)
assertHeader(t, req, "X-Country", tt.country)
})
}
}
func TestGeoIP2_FromHeader(t *testing.T) {
cfg := traefik_plugin_geoip2.CreateConfig()
cfg.Filename = "GeoLite2-Country.mmdb"
cfg.FromHeader = "X-Real-IP"
ctx := context.Background()
next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {})
handler, err := traefik_plugin_geoip2.New(ctx, next, cfg, "geoip2-plugin")
if err != nil {
t.Fatal(err)
}
tests := []struct {
remoteAddr string
xRealIP string
country string
}{
{remoteAddr: "4.0.0.0", xRealIP: "", country: ""},
{remoteAddr: "4.0.0.0", xRealIP: "2.0.0.0", country: "FR"},
}
for _, tt := range tests {
t.Run(tt.remoteAddr, func(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost", nil)
if err != nil {
t.Fatal(err)
}
req.RemoteAddr = tt.remoteAddr
req.Header.Add("X-Real-IP", tt.xRealIP)
handler.ServeHTTP(recorder, req)
assertHeader(t, req, "X-Country", tt.country)
})
}
}
func TestGeoIP2_CountryHeader(t *testing.T) {
cfg := traefik_plugin_geoip2.CreateConfig()
cfg.Filename = "GeoLite2-Country.mmdb"
cfg.CountryHeader = "X-Custom"
ctx := context.Background()
next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {})
handler, err := traefik_plugin_geoip2.New(ctx, next, cfg, "geoip2-plugin")
if err != nil {
t.Fatal(err)
}
tests := []struct {
remoteAddr string
country string
}{
{remoteAddr: "4.0.0.0", country: "US"},
}
for _, tt := range tests {
t.Run(tt.remoteAddr, func(t *testing.T) {
recorder := httptest.NewRecorder()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost", nil)
if err != nil {
t.Fatal(err)
}
req.RemoteAddr = tt.remoteAddr
handler.ServeHTTP(recorder, req)
assertHeader(t, req, "X-Country", "")
assertHeader(t, req, "X-Custom", tt.country)
})
}
}
func assertHeader(t *testing.T, req *http.Request, key, expected string) {
t.Helper()
if req.Header.Get(key) != expected {
t.Errorf("invalid header value: '%s'", req.Header.Get(key))
}
}
|
package _257_Binary_Tree_Paths
import (
"fmt"
"strings"
)
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func binaryTreePaths(root *TreeNode) []string {
if root == nil {
return []string{}
}
var ret []string
binaryTreePathsRecursion(root, []string{}, &ret)
return ret
}
func binaryTreePathsRecursion(node *TreeNode, trace []string, ret *[]string) {
trace = append(trace, fmt.Sprint(node.Val))
if node.Left == nil && node.Right == nil {
*ret = append(*ret, strings.Join(trace, "->"))
return
}
if node.Left != nil {
binaryTreePathsRecursion(node.Left, trace, ret)
}
if node.Right != nil {
binaryTreePathsRecursion(node.Right, trace, ret)
}
}
|
// +build nobootstrap
package update
import (
"archive/tar"
"compress/bzip2"
"io"
"net/http"
"os"
"path"
"core"
)
const url = "https://bitbucket.org/squeaky/portable-pypy/downloads/pypy-5.6-linux_x86_64-portable.tar.bz2"
// DownloadPyPy attempts to download a standalone PyPy distribution.
// We use this to try to deal with the case where there is no loadable interpreter.
// It also simplifies installation instructions on Linux where we can't use upstream packages
// often because they aren't built with --enable-shared.
func DownloadPyPy(config *core.Configuration) bool {
log.Notice("Checking if we've got a PyPy instance ready...")
dest := core.GenDir + "/_remote/_pypy"
so := path.Join(dest, "bin/libpypy-c.so")
if core.PathExists(so) {
log.Notice("Found PyPy at %s", so)
return false
}
log.Warning("Attempting to download a portable PyPy distribution...")
return downloadPyPy(dest)
}
func downloadPyPy(destination string) bool {
if err := os.RemoveAll(destination); err != nil {
log.Error("Can't remove %s: %s", destination, err)
return false
}
resp, err := http.Get(url)
if err != nil {
log.Error("Failed to download PyPy: %s", err)
return false
}
defer resp.Body.Close()
bzreader := bzip2.NewReader(resp.Body)
tarball := tar.NewReader(bzreader)
for {
hdr, err := tarball.Next()
if err == io.EOF {
break // End of archive
} else if err != nil {
log.Error("Error reading tarball: %s", err)
return false
} else if err := writeTarFile(hdr, tarball, destination); err != nil {
log.Error("Error extracting tarball: %s", err)
return false
}
}
log.Notice("Downloaded PyPy successfully")
return true
}
|
package slack
import (
"encoding/json"
"fmt"
"testing"
"net/http"
"github.com/stretchr/testify/assert"
)
// Dialogs
var simpleDialog = `{
"callback_id":"ryde-46e2b0",
"title":"Request a Ride",
"submit_label":"Request",
"notify_on_cancel":true
}`
var simpleTextElement = `{
"label": "testing label",
"name": "testing name",
"type": "text",
"placeholder": "testing placeholder",
"optional": true,
"value": "testing value",
"max_length": 1000,
"min_length": 10,
"hint": "testing hint",
"subtype": "email"
}`
var simpleSelectElement = `{
"label": "testing label",
"name": "testing name",
"type": "select",
"placeholder": "testing placeholder",
"optional": true,
"value": "testing value",
"data_source": "users",
"selected_options": [],
"options": [{"label": "option 1", "value": "1"}],
"option_groups": []
}`
func unmarshalDialog() (*Dialog, error) {
dialog := &Dialog{}
// Unmarshall the simple dialog json
if err := json.Unmarshal([]byte(simpleDialog), &dialog); err != nil {
return nil, err
}
// Unmarshall and append the text element
textElement := &TextInputElement{}
if err := json.Unmarshal([]byte(simpleTextElement), &textElement); err != nil {
return nil, err
}
// Unmarshall and append the select element
selectElement := &DialogInputSelect{}
if err := json.Unmarshal([]byte(simpleSelectElement), &selectElement); err != nil {
return nil, err
}
dialog.Elements = []DialogElement{
textElement,
selectElement,
}
return dialog, nil
}
func TestSimpleDialog(t *testing.T) {
dialog, err := unmarshalDialog()
assert.Nil(t, err)
assertSimpleDialog(t, dialog)
}
func TestCreateSimpleDialog(t *testing.T) {
dialog := &Dialog{}
dialog.CallbackID = "ryde-46e2b0"
dialog.Title = "Request a Ride"
dialog.SubmitLabel = "Request"
dialog.NotifyOnCancel = true
textElement := &TextInputElement{}
textElement.Label = "testing label"
textElement.Name = "testing name"
textElement.Type = "text"
textElement.Placeholder = "testing placeholder"
textElement.Optional = true
textElement.Value = "testing value"
textElement.MaxLength = 1000
textElement.MinLength = 10
textElement.Hint = "testing hint"
textElement.Subtype = "email"
selectElement := &DialogInputSelect{}
selectElement.Label = "testing label"
selectElement.Name = "testing name"
selectElement.Type = "select"
selectElement.Placeholder = "testing placeholder"
selectElement.Optional = true
selectElement.Value = "testing value"
selectElement.DataSource = "users"
selectElement.SelectedOptions = []DialogSelectOption{}
selectElement.Options = []DialogSelectOption{
{Label: "option 1", Value: "1"},
}
selectElement.OptionGroups = []DialogOptionGroup{}
dialog.Elements = []DialogElement{
textElement,
selectElement,
}
assertSimpleDialog(t, dialog)
}
func assertSimpleDialog(t *testing.T, dialog *Dialog) {
assert.NotNil(t, dialog)
// Test the main dialog fields
assert.Equal(t, "ryde-46e2b0", dialog.CallbackID)
assert.Equal(t, "Request a Ride", dialog.Title)
assert.Equal(t, "Request", dialog.SubmitLabel)
assert.Equal(t, true, dialog.NotifyOnCancel)
// Test the text element is correctly parsed
textElement := dialog.Elements[0].(*TextInputElement)
assert.Equal(t, "testing label", textElement.Label)
assert.Equal(t, "testing name", textElement.Name)
assert.Equal(t, InputTypeText, textElement.Type)
assert.Equal(t, "testing placeholder", textElement.Placeholder)
assert.Equal(t, true, textElement.Optional)
assert.Equal(t, "testing value", textElement.Value)
assert.Equal(t, 1000, textElement.MaxLength)
assert.Equal(t, 10, textElement.MinLength)
assert.Equal(t, "testing hint", textElement.Hint)
assert.Equal(t, InputSubtypeEmail, textElement.Subtype)
// Test the select element is correctly parsed
selectElement := dialog.Elements[1].(*DialogInputSelect)
assert.Equal(t, "testing label", selectElement.Label)
assert.Equal(t, "testing name", selectElement.Name)
assert.Equal(t, InputTypeSelect, selectElement.Type)
assert.Equal(t, "testing placeholder", selectElement.Placeholder)
assert.Equal(t, true, selectElement.Optional)
assert.Equal(t, "testing value", selectElement.Value)
assert.Equal(t, DialogDataSourceUsers, selectElement.DataSource)
assert.Equal(t, []DialogSelectOption{}, selectElement.SelectedOptions)
assert.Equal(t, "option 1", selectElement.Options[0].Label)
assert.Equal(t, "1", selectElement.Options[0].Value)
assert.Equal(t, 0, len(selectElement.OptionGroups))
}
// Callbacks
var simpleCallback = `{
"type": "dialog_submission",
"submission": {
"name": "Sigourney Dreamweaver",
"email": "sigdre@example.com",
"phone": "+1 800-555-1212",
"meal": "burrito",
"comment": "No sour cream please",
"team_channel": "C0LFFBKPB",
"who_should_sing": "U0MJRG1AL"
},
"callback_id": "employee_offsite_1138b",
"team": {
"id": "T1ABCD2E12",
"domain": "coverbands"
},
"user": {
"id": "W12A3BCDEF",
"name": "dreamweaver"
},
"channel": {
"id": "C1AB2C3DE",
"name": "coverthon-1999"
},
"action_ts": "936893340.702759",
"token": "M1AqUUw3FqayAbqNtsGMch72",
"response_url": "https://hooks.slack.com/app/T012AB0A1/123456789/JpmK0yzoZDeRiqfeduTBYXWQ"
}`
func unmarshalCallback(j string) (*DialogCallback, error) {
callback := &DialogCallback{}
if err := json.Unmarshal([]byte(j), &callback); err != nil {
return nil, err
}
return callback, nil
}
func TestSimpleCallback(t *testing.T) {
callback, err := unmarshalCallback(simpleCallback)
assert.Nil(t, err)
assertSimpleCallback(t, callback)
}
func assertSimpleCallback(t *testing.T, callback *DialogCallback) {
assert.NotNil(t, callback)
assert.Equal(t, InteractionTypeDialogSubmission, callback.Type)
assert.Equal(t, "employee_offsite_1138b", callback.CallbackID)
assert.Equal(t, "T1ABCD2E12", callback.Team.ID)
assert.Equal(t, "coverbands", callback.Team.Domain)
assert.Equal(t, "C1AB2C3DE", callback.Channel.ID)
assert.Equal(t, "coverthon-1999", callback.Channel.Name)
assert.Equal(t, "W12A3BCDEF", callback.User.ID)
assert.Equal(t, "dreamweaver", callback.User.Name)
assert.Equal(t, "936893340.702759", callback.ActionTs)
assert.Equal(t, "M1AqUUw3FqayAbqNtsGMch72", callback.Token)
assert.Equal(t, "https://hooks.slack.com/app/T012AB0A1/123456789/JpmK0yzoZDeRiqfeduTBYXWQ", callback.ResponseURL)
assert.Equal(t, "Sigourney Dreamweaver", callback.Submission["name"])
assert.Equal(t, "sigdre@example.com", callback.Submission["email"])
assert.Equal(t, "+1 800-555-1212", callback.Submission["phone"])
assert.Equal(t, "burrito", callback.Submission["meal"])
assert.Equal(t, "No sour cream please", callback.Submission["comment"])
assert.Equal(t, "C0LFFBKPB", callback.Submission["team_channel"])
assert.Equal(t, "U0MJRG1AL", callback.Submission["who_should_sing"])
}
// Suggestion Callbacks
var simpleSuggestionCallback = `{
"type": "dialog_suggestion",
"token": "W3VDvuzi2nRLsiaDOsmJranO",
"action_ts": "1528203589.238335",
"team": {
"id": "T24BK35ML",
"domain": "hooli-hq"
},
"user": {
"id": "U900MV5U7",
"name": "gbelson"
},
"channel": {
"id": "C012AB3CD",
"name": "triage-platform"
},
"name": "external_data",
"value": "test",
"callback_id": "bugs"
}`
func unmarshalSuggestionCallback(j string) (*InteractionCallback, error) {
callback := &InteractionCallback{}
if err := json.Unmarshal([]byte(j), &callback); err != nil {
return nil, err
}
return callback, nil
}
func TestSimpleSuggestionCallback(t *testing.T) {
callback, err := unmarshalSuggestionCallback(simpleSuggestionCallback)
assert.Nil(t, err)
assertSimpleSuggestionCallback(t, callback)
}
func assertSimpleSuggestionCallback(t *testing.T, callback *InteractionCallback) {
assert.NotNil(t, callback)
assert.Equal(t, InteractionTypeDialogSuggestion, callback.Type)
assert.Equal(t, "W3VDvuzi2nRLsiaDOsmJranO", callback.Token)
assert.Equal(t, "1528203589.238335", callback.ActionTs)
assert.Equal(t, "T24BK35ML", callback.Team.ID)
assert.Equal(t, "hooli-hq", callback.Team.Domain)
assert.Equal(t, "U900MV5U7", callback.User.ID)
assert.Equal(t, "gbelson", callback.User.Name)
assert.Equal(t, "C012AB3CD", callback.Channel.ID)
assert.Equal(t, "triage-platform", callback.Channel.Name)
assert.Equal(t, "external_data", callback.Name)
assert.Equal(t, "test", callback.Value)
assert.Equal(t, "bugs", callback.CallbackID)
}
func openDialogHandler(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "application/json")
response, _ := json.Marshal(struct {
SlackResponse
}{
SlackResponse: SlackResponse{Ok: true},
})
rw.Write(response)
}
func TestOpenDialog(t *testing.T) {
http.HandleFunc("/dialog.open", openDialogHandler)
once.Do(startServer)
api := New("testing-token", OptionAPIURL("http://"+serverAddr+"/"))
dialog, err := unmarshalDialog()
if err != nil {
t.Errorf("Unexpected error: %s", err)
return
}
err = api.OpenDialog("TXXXXXXXX", *dialog)
if err != nil {
t.Errorf("Unexpected error: %s", err)
return
}
err = api.OpenDialog("", *dialog)
if err == nil {
t.Errorf("Did not error with empty trigger, %s", err)
return
}
}
const (
triggerID = "trigger_xyz"
callbackID = "callback_xyz"
notifyOnCancel = false
title = "Dialog_title"
submitLabel = "Send"
token = "xoxa-123-123-123-213"
)
func _mocDialog() *Dialog {
triggerID := triggerID
callbackID := callbackID
notifyOnCancel := notifyOnCancel
title := title
submitLabel := submitLabel
return &Dialog{
TriggerID: triggerID,
CallbackID: callbackID,
NotifyOnCancel: notifyOnCancel,
Title: title,
SubmitLabel: submitLabel,
}
}
func TestDialogCreate(t *testing.T) {
dialog := _mocDialog()
if dialog == nil {
t.Errorf("Should be able to construct a dialog")
t.Fail()
}
}
func ExampleDialog() {
dialog := _mocDialog()
fmt.Println(*dialog)
// Output:
// {trigger_xyz callback_xyz Dialog_title Send false []}
}
|
package check
import (
"fmt"
"github.com/MintegralTech/juno/debug"
"github.com/MintegralTech/juno/document"
"github.com/MintegralTech/juno/index"
"github.com/MintegralTech/juno/marshal"
)
type OrChecker struct {
c []Checker
aDebug *debug.Debug
}
func NewOrChecker(c []Checker) *OrChecker {
if c == nil {
return nil
}
return &OrChecker{
c: c,
}
}
func (o *OrChecker) Check(id document.DocId) bool {
if o == nil {
return true
}
for _, cValue := range o.c {
if cValue == nil {
continue
}
if cValue.Check(id) {
return true
}
}
if o.aDebug != nil {
o.aDebug.AddDebugMsg(fmt.Sprintf("%d in orChecker check result: false", id))
}
return false
}
func (o *OrChecker) Marshal() map[string]interface{} {
res := make(map[string]interface{}, 1)
var tmp []map[string]interface{}
for _, v := range o.c {
tmp = append(tmp, v.Marshal())
}
res["or_check"] = tmp
return res
}
func (o *OrChecker) MarshalV2() *marshal.MarshalInfo {
if o == nil {
return nil
}
info := &marshal.MarshalInfo{
Operation: "or_check",
Nodes: make([]*marshal.MarshalInfo, 0),
}
for _, v := range o.c {
m := v.MarshalV2()
if m != nil {
info.Nodes = append(info.Nodes, m)
}
}
return info
}
func (o *OrChecker) UnmarshalV2(idx index.Index, info *marshal.MarshalInfo) Checker {
if info == nil {
return nil
}
var c []Checker
uq := &unmarshalV2{}
for _, v := range info.Nodes {
m := uq.UnmarshalV2(idx, v)
if m != nil {
c = append(c, m.(Checker))
}
}
return NewOrChecker(c)
}
func (o *OrChecker) Unmarshal(idx index.Index, res map[string]interface{}) Checker {
value, ok := res["or_check"]
if !ok {
return nil
}
var checks []Checker
uq := &unmarshal{}
for _, v := range value.([]map[string]interface{}) {
if c := uq.Unmarshal(idx, v); c != nil {
checks = append(checks, c)
}
}
return NewOrChecker(checks)
}
func (o *OrChecker) DebugInfo() *debug.Debug {
if o.aDebug != nil {
for _, v := range o.c {
if v.DebugInfo() != nil {
o.aDebug.AddDebug(v.DebugInfo())
}
}
return o.aDebug
}
return nil
}
func (o *OrChecker) SetDebug(level int) {
o.aDebug = debug.NewDebug(level, "OrCheck")
for _, v := range o.c {
v.SetDebug(level)
}
}
|
package boltdb
import (
"bytes"
bolt "go.etcd.io/bbolt"
)
type Iterator struct {
cursor *bolt.Cursor // boltDB 用于迭代遍历的游标
prefix []byte // 指定遍历的前缀,用于前缀搜索
start []byte // 指定遍历左区间,用于区间搜索
end []byte // 指定遍历右区间,用于区间搜索
valid bool // 遍历是否终止
key []byte // 遍历到当前位置的 key 值
val []byte // 遍历到当前位置的 val 值
}
// 将游标定位到 k 的位置
// 不同的引擎实现原理不一样,以 boltDB 为例,其实现在 b+tree 上,会从根部开始二分查找
// 会有两种情况,如果找到了,返回当前的 k-v,seek 流程结束
// 如果不存在,boltDB 会返回 k 由
func (i *Iterator) Seek(k []byte) {
if i.start != nil && bytes.Compare(k, i.start) < 0 {
k = i.start
}
if i.prefix != nil && !bytes.HasPrefix(k, i.prefix) {
if bytes.Compare(k, i.prefix) < 0 {
k = i.prefix
} else {
i.valid = false
return
}
}
i.key, i.val = i.cursor.Seek(k)
i.updateValid()
}
// 移动游标到下一个位置,并更新遍历状态是否终止
func (i *Iterator) Next() {
i.key, i.val = i.cursor.Next()
i.updateValid()
}
func (i *Iterator) updateValid() {
i.valid = i.key != nil
if i.valid {
if i.prefix != nil {
i.valid = bytes.HasPrefix(i.key, i.prefix)
} else if i.end != nil {
i.valid = bytes.Compare(i.key, i.end) < 0
}
}
}
func (i *Iterator) Current() ([]byte, []byte, bool) {
return i.key, i.val, i.valid
}
func (i *Iterator) Key() []byte {
return i.key
}
func (i *Iterator) Value() []byte {
return i.val
}
func (i *Iterator) Valid() bool {
return i.valid
}
// boltDB 的迭代器不需要单独关闭,统一由 tx.close() 控制
func (i *Iterator) Close() error {
return nil
}
|
package options
// Options : options of this program
type Options struct {
Width int `short:"w" long:"width" description:"Field width" default:"160"`
Height int `short:"h" long:"height" description:"Field height" default:"90"`
AliveRate float64 `short:"a" long:"alive-rate" description:"The rate of alive cells in initialization" default:"50"`
MutRate float64 `short:"m" long:"mut-rate" description:"The rate of mutation" default:"1.0"`
Generation int `short:"g" long:"generation" description:"Reguration of generations"`
Keyword string `short:"k" long:"keyword" description:"Keyword for twitter"`
Location string `short:"l" long:"location" description:"Tweet location(Default: Japan)" default:"132.2,29.9,146.2,39.0,138.4,33.5,146.1,46.20"`
Debug bool `short:"d" long:"debug" description:"Debug mode"`
Table bool `short:"t" long:"table" description:"View type"`
Speed bool `short:"s" long:"speed" description:"Spead mode"`
Chaos bool `short:"c" long:"chaos" description:"Chaos mode"`
Fujiwara bool `short:"f" long:"fujiwara" description:"Fujiwara mode"`
TNOK bool `short:"i" long:"tnok" description:"TNOK mode"`
Nyanpass bool `short:"n" long:"nyanpass" description:"Nyanpass mode"`
Bot bool `short:"b" long:"bot" description:"Bot mode skip user confirmation"`
ConsumerKey string `long:"consumer-key" description:"Twitter consumer key"`
ConsumerSecret string `long:"consumer-secret" description:"Twitter consumer secret"`
AccessToken string `long:"access-token" description:"Twitter access token"`
AccessTokenSecret string `long:"access-token-secret" description:"Twitter access token secret"`
}
|
package handler
import (
"database/sql"
"encoding/json"
"net/http"
"time"
"github.com/seongminnpark/nooler-server/internal/pkg/model"
"github.com/seongminnpark/nooler-server/internal/pkg/util"
)
type DeviceHandler struct {
DB *sql.DB
}
func (handler *DeviceHandler) GetDevice(w http.ResponseWriter, r *http.Request) {
tokenHeader := r.Header.Get("access_token")
// Extract uuid from token.
var token model.Token
if err := token.Decode(tokenHeader); err != nil {
util.RespondWithError(w, http.StatusUnauthorized, "Invalid token")
return
}
// Fetch device information.
device := model.Device{UUID: token.UUID}
if err := device.GetDevice(handler.DB); err != nil {
switch err {
case sql.ErrNoRows:
util.RespondWithError(w, http.StatusNotFound, "User not found")
default:
util.RespondWithError(w, http.StatusInternalServerError, err.Error())
}
return
}
responseJSON := make(map[string]string)
responseJSON["token"] = tokenHeader
responseJSON["uuid"] = device.UUID
responseJSON["owner"] = device.Owner
responseJSON["location"] = device.Location
util.RespondWithJSON(w, http.StatusOK, responseJSON)
}
func (handler *DeviceHandler) CreateDevice(w http.ResponseWriter, r *http.Request) {
// Cast user info from request to form object.
var form model.AddDeviceForm
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&form); err != nil {
util.RespondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
// Check if new info is valid.
if !util.ValidUUID(form.Owner) {
util.RespondWithError(w, http.StatusBadRequest, "Invalid uuid")
return
}
// Chcek if user by uuid exists.
var existingUser model.User
existingUser.UUID = form.Owner
if err := existingUser.GetUser(handler.DB); err != nil {
util.RespondWithError(w, http.StatusUnauthorized, "Owner ID does not exist")
return
}
// Create new uuid for device.
uuid, uuidErr := util.CreateUUID()
if uuidErr != nil {
util.RespondWithError(w, http.StatusInternalServerError, uuidErr.Error())
return
}
// Create new user instance.
device := model.Device{UUID: uuid, Owner: form.Owner, Location: form.Location}
// Generate new token.
token := model.Token{
UUID: device.UUID,
Exp: time.Now().Add(time.Hour * 24).Unix()}
// Encode into string.
tokenString, encodeErr := token.Encode()
if encodeErr != nil {
util.RespondWithError(w, http.StatusInternalServerError, encodeErr.Error())
return
}
// Create new device.
if err := device.CreateDevice(handler.DB); err != nil {
util.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
// // Create new uuid for action.
// actionUUID, uuidErr := util.CreateUUID()
// if uuidErr != nil {
// util.RespondWithError(w, http.StatusInternalServerError, uuidErr.Error())
// return
// }
// // Create new action.
// action := model.Action{UUID: actionUUID, User: device.Owner, Device: uuid, Active: true}
// // Store action.
// if err := action.CreateAction(handler.DB); err != nil {
// util.RespondWithError(w, http.StatusInternalServerError, err.Error())
// return
// }
responseJSON := make(map[string]string)
responseJSON["token"] = tokenString
util.RespondWithJSON(w, http.StatusCreated, responseJSON)
}
|
package chain
import (
"strings"
abci "github.com/hashrs/blockchain/core/consensus/dpos-pbft/abci/types"
"github.com/hashrs/blockchain/core/consensus/dpos-pbft/libs/log"
sdk "github.com/hashrs/blockchain/framework/chain-app/types"
starter "github.com/hashrs/blockchain/framework/chain-starter"
dbm "github.com/hashrs/blockchain/libs/state-db/tm-db"
//import greeter types
main_net "github.com/hashrs/blockchain/chain/x/greeter"
cli "github.com/hashrs/blockchain/framework/chain-cli"
)
const ChainName = "HashRs"
var (
// ModuleBasics holds the AppModuleBasic struct of all modules included in the app
ModuleBasics = starter.ModuleBasics
)
// Add the keeper and its key to our app struct
type ChainApp struct {
*starter.AppStarter // helloChainApp extends starter.AppStarter
greeterKey *sdk.KVStoreKey // the store key for the greeter module
greeterKeeper main_net.Keeper // the keeper for the greeter module
}
// NewHelloChainApp returns a fully constructed SDK application
func NewChain(logger log.Logger, db dbm.DB) abci.Application {
// pass greeter's AppModuleBasic to be included in the ModuleBasicsManager
appStarter := starter.NewAppStarter(ChainName, logger, db, main_net.AppModuleBasic{})
// create the key for greeter's store
greeterKey := sdk.NewKVStoreKey(main_net.StoreKey)
// construct the keeper
greeterKeeper := main_net.NewKeeper(greeterKey, appStarter.Cdc)
// compose our app with greeter
var app = &ChainApp{
appStarter,
greeterKey,
greeterKeeper,
}
// Add greeters' complete AppModule to the ModuleManager
greeterMod := main_net.NewAppModule(greeterKeeper)
app.Mm.Modules[greeterMod.Name()] = greeterMod
// create a subspace for greeter's data in the main store.
app.MountStore(greeterKey, sdk.StoreTypeDB)
// do some final configuration...
app.InitializeStarter()
return app
}
func NewNode() {
params := starter.NewServerCommandParams(
strings.ToLower(ChainName)+"-node",
ChainName+"Chain Node",
starter.NewAppCreator(NewChain),
starter.NewAppExporter(NewChain),
)
serverCmd := starter.NewServerCommand(params)
// prepare and add flags
executor := cli.PrepareBaseCmd(serverCmd, "HR", starter.DefaultNodeHome)
err := executor.Execute()
if err != nil {
panic(err)
}
}
func NewCli() {
starter.BuildModuleBasics(ChainName, main_net.AppModuleBasic{})
rootCmd := starter.NewCLICommand(ChainName)
txCmd := starter.TxCmd(starter.Cdc)
queryCmd := starter.QueryCmd(starter.Cdc)
// add more Tx and Query commands
ModuleBasics.AddTxCommands(txCmd, starter.Cdc)
ModuleBasics.AddQueryCommands(queryCmd, starter.Cdc)
rootCmd.AddCommand(txCmd, queryCmd)
executor := cli.PrepareMainCmd(rootCmd, "HR", starter.DefaultCLIHome)
err := executor.Execute()
if err != nil {
panic(err)
}
}
|
// -------------------------------------------------------------------
//
// salter: Tool for bootstrap salt clusters in EC2
//
// Copyright (c) 2013-2014 Orchestrate, Inc. All Rights Reserved.
//
// This file is provided to you under the Apache License,
// Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// -------------------------------------------------------------------
package main
func teardown() error {
// Setup a channel for queuing requests for teardown and another
// for shutdown notification
teardownQueue := make(chan *Node)
shutdownQueue := make(chan bool)
// Spin up goroutines to start tearing down nodes; we limit
// total number of goroutines to be nice to AWS
for i := 0; i < ARG_PARALLEL; i++ {
go func() {
for node := range teardownQueue {
teardownNode(node)
}
shutdownQueue <- true
}()
}
// Walk all the target nodes, queuing up teardown requests
for _, node := range G_TARGETS {
teardownQueue <- node
}
// All done queuing
close(teardownQueue)
// Wait for each of the goroutines to shutdown
for i := 0; i < ARG_PARALLEL; i++ {
<-shutdownQueue
}
return nil
}
func teardownNode(node *Node) {
err := node.Update()
if err != nil {
printf("%s: not terminated; %+v\n", node.Name, err)
return
}
err = node.Terminate()
if err != nil {
printf("%s: not terminated; %+v\n", node.Name, err)
return
}
// TODO: Revoke key from master
}
|
package atcoder
import (
"fmt"
"github.com/PuerkitoBio/goquery"
"io"
"strings"
)
func ParseTasksPage(r io.Reader) ([]string, error) {
doc, err := goquery.NewDocumentFromReader(r)
if err != nil {
return nil, err
}
selector := doc.Find("tbody > tr > td:first-child > a")
paths := make([]string, selector.Length())
selector.Each(func(i int, selection *goquery.Selection) {
attr, _ := selection.Attr("href")
paths[i] = attr
})
return paths, nil
}
func ParseTaskPage(r io.Reader) (*Task, error) {
doc, err := goquery.NewDocumentFromReader(r)
if err != nil {
return nil, err
}
selector := doc.Find("span.lang-ja .part > section > h3")
if selector.Length() == 0 {
return nil, fmt.Errorf("examples not found")
}
var inputSelections []*goquery.Selection
var outputSelections []*goquery.Selection
selector.Each(func(_ int, selection *goquery.Selection) {
if strings.HasPrefix(selection.Text(), "入力例") {
inputSelections = append(inputSelections, selection.Parent().Find("pre"))
}
if strings.HasPrefix(selection.Text(), "出力例") {
outputSelections = append(outputSelections, selection.Parent().Find("pre"))
}
})
if len(inputSelections) != len(outputSelections) {
return nil, fmt.Errorf("input & output count mismatch")
}
examples := make([]*Example, len(inputSelections))
for i := range inputSelections {
examples[i] = &Example{
In: strings.TrimSpace(inputSelections[i].Text()),
Exp: strings.TrimSpace(outputSelections[i].Text()),
}
}
name := doc.Find("title").Text()
return &Task{
Name: name,
Examples: examples,
}, nil
}
func ParseCSRFToken(r io.Reader) (string, error) {
doc, err := goquery.NewDocumentFromReader(r)
if err != nil {
return "", err
}
tokenEl := doc.Find("input[name=csrf_token]")
token, exists := tokenEl.Attr("value")
if !exists {
return "", fmt.Errorf("value not found")
}
return token, nil
} |
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
const ()
func checkErr(err error) {
if err != nil {
log.Fatalf("Error: %v", err)
}
}
func cyclePos(movePos int, pivot int) int {
if movePos > pivot {
return movePos - 2 * (movePos - pivot)
} else {
return movePos
}
}
func collision(pos int, height int) bool {
cycleSize := 2 * (height - 1)
return 0 == cyclePos(pos % cycleSize, height - 1)
}
func main() {
f, err := os.Open("input.txt")
checkErr(err)
defer f.Close()
scanner := bufio.NewScanner(f)
var cost int
for scanner.Scan() {
line := strings.Split(scanner.Text(), ": ")
pos, err := strconv.Atoi(line[0])
checkErr(err)
height, err := strconv.Atoi(line[1])
checkErr(err)
if collision(pos, height) {
cost += pos * height
}
}
checkErr(scanner.Err())
fmt.Println(cost)
}
|
// Package tlit is a package for encoding russian and ukrainian letters with latin ones.
package tlit // import "github.com/blackalegator/tlit"
|
package main
import (
"github.com/fatih/color"
"os"
)
type mlCLI struct {
printColor [6]*color.Color
server string
verbose bool
silence bool
debug bool
}
var currentColorTheme = "default"
var (
colRegular = 0
colInfo = 1
colWarn = 2
colError = 3
colSuccess = 4
colDebug = 5
)
func (m *mlCLI) init() error {
m.setColors()
//
return nil
}
func (m *mlCLI) printf(col int, format string, args ...interface{}) {
if m.silence {
return
}
colorp := m.printColor[0]
if col > 0 && col < len(m.printColor) {
colorp = m.printColor[col]
}
if !m.verbose && col == colInfo {
return
}
if !m.debug && col == colDebug {
return
}
colorp.Printf(format, args...)
}
func (m *mlCLI) Fatal(format string, args ...interface{}) {
m.printf(colError, format, args...)
os.Exit(1)
}
func (m *mlCLI) pError(format string, args ...interface{}) {
m.printf(colError, format, args...)
}
func (m *mlCLI) pWarn(format string, args ...interface{}) {
m.printf(colWarn, format, args...)
}
func (m *mlCLI) pInfo(format string, args ...interface{}) {
m.printf(colInfo, format, args...)
}
func (m *mlCLI) pSuccess(format string, args ...interface{}) {
m.printf(colSuccess, format, args...)
}
func (m *mlCLI) pRegular(format string, args ...interface{}) {
m.printf(colRegular, format, args...)
}
func (m *mlCLI) pDebug(format string, args ...interface{}) {
m.printf(colDebug, format, args...)
}
func (m *mlCLI) setColors() {
theme := config.colorTheme
if theme == "dark" {
m.printColor[0] = color.New(color.FgHiWhite)
m.printColor[1] = color.New(color.FgHiBlack)
m.printColor[2] = color.New(color.FgYellow)
m.printColor[3] = color.New(color.FgRed)
m.printColor[4] = color.New(color.FgGreen)
m.printColor[5] = color.New(color.FgHiBlack)
} else {
m.printColor[0] = color.New(color.FgMagenta)
m.printColor[1] = color.New(color.FgHiBlack)
m.printColor[2] = color.New(color.FgYellow)
m.printColor[3] = color.New(color.FgRed)
m.printColor[4] = color.New(color.FgGreen)
m.printColor[5] = color.New(color.FgHiBlack)
}
//add theme as you want.
}
|
package main
//暴力dfs,会超时
//func canJump(nums []int) bool {
// if len(nums) == 0 {
// return false
// }
// return dfs(nums, 0)
//}
//
//func dfs(nums []int, now int) bool {
// if now >= len(nums) - 1 {
// return true
// }
// if nums[now] == 0 {
// return false
// }
// for i := nums[now]; i >= 1; i-- {
// if dfs(nums, now + i) {
// return true
// }
// }
// return false
//}
// 贪心 从左向右
// func canJump(nums []int) bool {
// if len(nums) <= 1 {
// return true
// }
// reach := nums[0]
// for i := 0; i < len(nums) && i <= reach; i++ {
// if i + nums[i] > reach {
// reach = i + nums[i]
// }
// }
// return reach >= len(nums) - 1
//}
// 贪心 从右向左
func canJump(nums []int) bool {
last := len(nums) - 1
for i := last; i >= 0; i-- {
if nums[i]+i >= last {
last = i
}
}
return last == 0
}
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build go1.7
package shutdown
import (
"context"
"fmt"
"testing"
"time"
xcontext "golang.org/x/net/context"
)
// otherContext is a Context that's not one of the types defined in context.go.
// This lets us test code paths that differ based on the underlying type of the
// Context.
type otherContext struct {
context.Context
}
func TestCancelCtx(t *testing.T) {
reset()
SetTimeout(time.Second)
defer close(startTimer(t))
c1, cc := CancelCtx(context.Background())
defer cc()
if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
t.Errorf("c1.String() = %q want %q", got, want)
}
o := otherContext{c1}
c2, cc := context.WithCancel(o)
defer cc()
contexts := []context.Context{c1, o, c2}
for i, c := range contexts {
if d := c.Done(); d == nil {
t.Errorf("c[%d].Done() == %v want non-nil", i, d)
}
if e := c.Err(); e != nil {
t.Errorf("c[%d].Err() == %v want nil", i, e)
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
}
Shutdown()
time.Sleep(100 * time.Millisecond) // let cancellation propagate
for i, c := range contexts {
select {
case <-c.Done():
default:
t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
}
if e := c.Err(); e != context.Canceled {
t.Errorf("c[%d].Err() == %v want %v", i, e, context.Canceled)
}
}
}
func TestCancelCtxN(t *testing.T) {
reset()
SetTimeout(time.Second)
defer close(startTimer(t))
stages := []Stage{StagePS, Stage1, Stage2, Stage3}
contexts := []context.Context{}
for _, stage := range stages {
c1, cc := CancelCtxN(context.Background(), stage)
defer cc()
if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
t.Errorf("c1.String() = %q want %q", got, want)
}
o := otherContext{c1}
c2, cc := context.WithCancel(o)
defer cc()
contexts = append(contexts, c1, o, c2)
}
for i, c := range contexts {
if d := c.Done(); d == nil {
t.Errorf("c[%d].Done() == %v want non-nil", i, d)
}
if e := c.Err(); e != nil {
t.Errorf("c[%d].Err() == %v want nil", i, e)
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
}
Shutdown()
time.Sleep(100 * time.Millisecond) // let cancellation propagate
for i, c := range contexts {
select {
case <-c.Done():
default:
t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
}
if e := c.Err(); e != context.Canceled {
t.Errorf("c[%d].Err() == %v want %v", i, e, context.Canceled)
}
}
}
func TestCancelCtxNShutdown(t *testing.T) {
reset()
SetTimeout(time.Second)
defer close(startTimer(t))
stages := []Stage{StagePS, Stage1, Stage2, Stage3}
contexts := []context.Context{}
for _, stage := range stages {
c1, cancel1 := CancelCtxN(context.Background(), stage)
o := otherContext{c1}
c2, cc := context.WithCancel(o)
defer cc()
contexts = append(contexts, c1, o, c2)
cancel1()
}
time.Sleep(100 * time.Millisecond) // let cancellation propagate
for i, c := range contexts {
select {
case <-c.Done():
default:
t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
}
if e := c.Err(); e != context.Canceled {
t.Errorf("c[%d].Err() == %v want %v", i, e, context.Canceled)
}
}
// Ensure shutdown is not blocking
Shutdown()
}
func TestCancelCtxX(t *testing.T) {
reset()
SetTimeout(time.Second)
defer close(startTimer(t))
c1, _ := CancelCtx(xcontext.Background())
if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
t.Errorf("c1.String() = %q want %q", got, want)
}
o := otherContext{c1}
c2, cc := xcontext.WithCancel(o)
defer cc()
contexts := []xcontext.Context{c1, o, c2}
for i, c := range contexts {
if d := c.Done(); d == nil {
t.Errorf("c[%d].Done() == %v want non-nil", i, d)
}
if e := c.Err(); e != nil {
t.Errorf("c[%d].Err() == %v want nil", i, e)
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
}
Shutdown()
time.Sleep(100 * time.Millisecond) // let cancellation propagate
for i, c := range contexts {
select {
case <-c.Done():
default:
t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
}
if e := c.Err(); e != context.Canceled {
t.Errorf("c[%d].Err() == %#v want %#v", i, e, context.Canceled)
}
}
}
|
package kube
import (
"testing"
"github.com/google/go-cmp/cmp"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
sfv1alpha1 "github.com/openshift/splunk-forwarder-operator/api/v1alpha1"
)
const (
instanceName = "test"
instanceNamespace = "openshift-test"
image = "test-image"
hfImage = "test-hf-image"
imageTag = "0.0.1"
imageDigest = "sha256:2452a3f01e840661ee1194777ed5a9185ceaaa9ec7329ed364fa2f02be22a701"
heavyDigest = "sha256:49b40c2c5d79913efb7eff9f3bf9c7348e322f619df10173e551b2596913d52a"
)
// splunkForwarderInstance returns (a pointer to) a SplunkForwarder CR as input to
// GenerateDaemonSet. Parameters:
// - useHeavy: The UseHeavyForwarder field is set to this value.
// - useTag: If true, the ImageTag field is set.
// - useDigest: If true, the ImageDigest field is set.
// - useHFDigest: If true, the HeavyForwarderDigest field is set.
func splunkForwarderInstance(useHeavy, useTag, useDigest, useHFDigest bool) *sfv1alpha1.SplunkForwarder {
spec := sfv1alpha1.SplunkForwarderSpec{
UseHeavyForwarder: useHeavy,
HeavyForwarderReplicas: 0,
SplunkLicenseAccepted: true,
HeavyForwarderSelector: "infra",
Image: image,
HeavyForwarderImage: hfImage,
}
if useTag {
spec.ImageTag = imageTag
}
if useDigest {
spec.ImageDigest = imageDigest
}
if useHFDigest {
spec.HeavyForwarderDigest = heavyDigest
}
return &sfv1alpha1.SplunkForwarder{
ObjectMeta: metav1.ObjectMeta{
Name: instanceName,
Namespace: instanceNamespace,
Generation: 10,
},
Spec: spec,
}
}
// DoDiff deep-compares two `runtime.Object`s and fails the `t`est with a useful message (showing
// the diff) if they differ.
func DeepEqualWithDiff(t *testing.T, expected, actual runtime.Object) {
diff := cmp.Diff(expected, actual)
if diff != "" {
t.Fatal("Objects differ: -expected, +actual\n", diff)
}
}
|
package main
import (
"bitbucket.org/tebeka/selenium"
"flag"
"fmt"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"log"
"net/http"
"strconv"
)
type Screenshot struct {
Url string
Data []byte
}
func main() {
mongoHost := flag.String("mongo-host", "127.0.0.1", "mongo host")
httpPort := flag.Int("port", 8080, "http port")
webDriverUrl := flag.String("web-driver-url", "http://127.0.0.1:4444", "web driver url")
flag.Parse()
wd, err := webDriver(*webDriverUrl)
if err != nil {
log.Fatal(err)
}
defer wd.Quit()
session, err := mgo.Dial(*mongoHost)
if err != nil {
log.Fatal(err)
}
defer session.Close()
http.HandleFunc("/screenshot", screenshotHandler(wd, session))
port := strconv.Itoa(*httpPort)
fmt.Println("Listening on port", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
func webDriver(url string) (selenium.WebDriver, error) {
caps := selenium.Capabilities{}
wd, err := selenium.NewRemote(caps, url)
if err != nil {
return nil, err
}
return wd, err
}
func screenshotHandler(wd selenium.WebDriver, session *mgo.Session) http.HandlerFunc {
c := session.DB("screener").C("screenshots")
return func(w http.ResponseWriter, r *http.Request) {
url := r.URL.Query().Get("url")
if url == "" {
http.Error(w, "parameter 'url' is required", 400)
return
}
var data []byte
shouldRefresh := r.URL.Query().Get("refresh") == "true"
if shouldRefresh {
var err error
data, err = refresh(wd, c, url)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
}
data, err := getOrRefresh(wd, c, url)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
writeData(w, data)
}
}
func loadUrl(wd selenium.WebDriver, url string) ([]byte, error) {
err := wd.Get(url)
if err != nil {
return nil, err
}
data, err := wd.Screenshot()
if err != nil {
return nil, err
}
return data, nil
}
func writeData(w http.ResponseWriter, data []byte) {
w.Header().Set("Content-Type", "image/png")
w.Write(data)
}
func refresh(wd selenium.WebDriver, c *mgo.Collection, url string) ([]byte, error) {
data, err := loadUrl(wd, url)
if err != nil {
return nil, err
}
if err := c.Insert(&Screenshot{url, data}); err != nil {
return nil, err
}
return data, nil
}
func getOrRefresh(wd selenium.WebDriver, c *mgo.Collection, url string) ([]byte, error) {
existing := Screenshot{}
if err := c.Find(bson.M{"url": url}).One(&existing); err != nil {
if err == mgo.ErrNotFound {
return refresh(wd, c, url)
}
return nil, err
}
return existing.Data, nil
}
|
package s3
import (
"context"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/sirupsen/logrus"
"github.com/yunify/qscamel/constants"
"github.com/yunify/qscamel/model"
"github.com/yunify/qscamel/utils"
)
// Deletable implement destination.Deletable
func (c *Client) Deletable() bool {
return true
}
// Fetchable implement destination.Fetchable
func (c *Client) Fetchable() bool {
return false
}
// Writable implement destination.Writable
func (c *Client) Writable() bool {
return true
}
// Delete implement destination.Delete
func (c *Client) Delete(ctx context.Context, p string) (err error) {
cp := utils.Join(c.Path, p)
_, err = c.client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(c.BucketName),
Key: aws.String(cp),
})
if err != nil {
return
}
logrus.Debugf("s3 delete object %s.", cp)
return
}
// Write implement destination.Write
func (c *Client) Write(ctx context.Context, p string, size int64, r io.Reader) (err error) {
cp := utils.Join(c.Path, p)
_, err = c.client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(c.BucketName),
Key: aws.String(cp),
// wrap by limitReader to keep body consistent with size
Body: aws.ReadSeekCloser(io.LimitReader(r, size)),
ContentLength: aws.Int64(size),
})
if err != nil {
return
}
logrus.Debugf("s3 wrote object %s.", cp)
return
}
// Fetch implement destination.Fetch
func (c *Client) Fetch(ctx context.Context, p, url string) (err error) {
return constants.ErrEndpointFuncNotImplemented
}
// Partable implement destination.Partable
func (c *Client) Partable() bool {
return true
}
// InitPart implement destination.InitPart
func (c *Client) InitPart(ctx context.Context, p string, size int64) (uploadID string, partSize int64, partNumbers int, err error) {
cp := utils.Join(c.Path, p)
resp, err := c.client.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(c.BucketName),
Key: aws.String(cp),
})
if err != nil {
return
}
uploadID = *resp.UploadId
partSize, err = calculatePartSize(size)
if err != nil {
logrus.Errorf("Object %s is too large", p)
return
}
partNumbers = int(size / partSize)
if size%partSize != 0 {
partNumbers++
}
return
}
// UploadPart implement destination.UploadPart
func (c *Client) UploadPart(ctx context.Context, o *model.PartialObject, r io.Reader) (err error) {
cp := utils.Join(c.Path, o.Key)
_, err = c.client.UploadPart(&s3.UploadPartInput{
Bucket: aws.String(c.BucketName),
Key: aws.String(cp),
UploadId: aws.String(o.UploadID),
ContentLength: aws.Int64(o.Size),
PartNumber: aws.Int64(int64(o.PartNumber)),
// wrap by limitReader to keep body consistent with size
Body: aws.ReadSeekCloser(io.LimitReader(r, o.Size)),
})
if err != nil {
return
}
// Trick: We need to check from current part number here.
// if we check from -1, then complete will be skipped, because next will never be nil.
next, err := model.NextPartialObject(ctx, o.Key, o.PartNumber)
if err != nil {
return
}
if next != nil {
logrus.Debugf("s3 wrote partial object %s at %d.", o.Key, o.Offset)
return nil
}
parts := make([]*s3.CompletedPart, o.TotalNumber)
for i := 0; i < o.TotalNumber; i++ {
parts[i] = &s3.CompletedPart{
PartNumber: aws.Int64(int64(i)),
}
}
_, err = c.client.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(c.BucketName),
Key: aws.String(cp),
UploadId: aws.String(o.UploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: parts,
},
})
if err != nil {
return err
}
return nil
}
|
package models
import (
"errors"
"time"
"github.com/badoux/checkmail"
"github.com/jinzhu/gorm"
)
type User struct {
ID uint32 `gorm:"primary_key;auto_increment" json:"id"`
Username string `gorm:"size:255;not null;unique" json:"username"`
Email string `gorm:"size:100;not null;unique" json:"email"`
Password string `gorm:"size:100;not null;" json:"password"`
CreatedAt time.Time `gorm:"default:CURRENT_TIMESTAMP" json:"created_at"`
UpdatedAt time.Time `gorm:"default:CURRENT_TIMESTAMP" json:"updated_at"`
}
// func (u *User) Validate(action string) error {
// switch strings.ToLower(action) {
// case "update":
// if u.Username == "" {
// return errors.New("Required Username")
// }
// if u.Password == "" {
// return errors.New("Required Password")
// }
// if u.Email == "" {
// return errors.New("Required Email")
// }
// if err := checkmail.ValidateFormat(u.Email); err != nil {
// return errors.New("Invalid Email")
// }
// return nil
// case "login":
// if u.Password == "" {
// return errors.New("Required Password")
// }
// if u.Email == "" {
// return errors.New("Required Email")
// }
// if err := checkmail.ValidateFormat(u.Email); err != nil {
// return errors.New("Invalid Email")
// }
// return nil
// default:
// if u.Username == "" {
// return errors.New("Required username")
// }
// if u.Password == "" {
// return errors.New("Required password")
// }
// if u.Email == "" {
// return errors.New("Required email")
// }
// if err := checkmail.ValidateFormat(u.Email); err != nil {
// return errors.New("Invalid Email")
// }
// return nil
// }
// }
func (u *User) Validate() error {
if u.Username == "" {
return errors.New("Required username")
}
if u.Password == "" {
return errors.New("Required password")
}
if u.Email == "" {
return errors.New("Required email")
}
if err := checkmail.ValidateFormat(u.Email); err != nil {
return errors.New("Invalid Email")
}
return nil
}
func (u *User) SaveUser(db *gorm.DB) (*User, error) {
var err error
err = db.Debug().Create(&u).Error
if err != nil {
return &User{}, err
}
return u, nil
}
func (u *User) SelectSaveUser(db *gorm.DB) (*User, error) {
var err error
err = db.Select("Username", "Email", "Password").Create(&u).Error
if err != nil {
return &User{}, err
}
return u, nil
}
// func (u *User) BatchInsert(db *gorm.DB) (*User, error) {
// var err error
// err = db.Create(&u).Error
// if err != nil {
// return &User{}, err
// }
// return u, nil
// }
// Get user
// func (u *User) SingleObject(db *gorm.DB) (*User, error) {
// var err error
// err = db.First(&u).Error
// if err != nil {
// return &User{}, err
// }
// return u, nil
// }
func (u *User) FindAllUsers(db *gorm.DB) (*[]User, error) {
var err error
users := []User{}
err = db.Debug().Model(&User{}).Limit(100).Find(&users).Error
if err != nil {
return &[]User{}, err
}
return &users, err
}
func (u *User) FindUserByID(db *gorm.DB, uid uint32) (*User, error) {
var err error
err = db.Debug().Model(User{}).Where("id = ?", uid).Take(&u).Error
if err != nil {
return &User{}, err
}
if gorm.IsRecordNotFoundError(err) {
return &User{}, errors.New("User Not Found")
}
return u, err
}
func (u *User) UpdateAUser(db *gorm.DB, uid uint32) (*User, error) {
var err error
err = db.Debug().Model(&User{}).Where("id = ?", uid).Updates(User{Username: u.Username, Email: u.Email, Password: u.Password}).Error
if err != nil {
return &User{}, err
}
return u, nil
}
func (u *User) DeleteAUser(db *gorm.DB, uid uint32) (int64, error) {
db = db.Debug().Model(&User{}).Where("id = ?", uid).Take(&User{}).Delete(&User{})
if db.Error != nil {
return 0, db.Error
}
return db.RowsAffected, nil
}
|
/*
Copyright © 2023 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package port
import (
"encoding/json"
"fmt"
"net"
"github.com/Microsoft/go-winio"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc/debug"
"github.com/rancher-sandbox/rancher-desktop-agent/pkg/types"
)
const (
npipeEndpoint = "npipe:////./pipe/rancher_desktop/privileged_service"
protocol = "npipe://"
)
// Server is a port server listening for port events from
// RD Guest Agent over vtunnel.
type Server struct {
proxy *proxy
eventLogger debug.Log
quit chan interface{}
listener net.Listener
stopped bool
}
// NewServer creates and returns a new instance of a Port Server.
func NewServer(elog debug.Log) *Server {
return &Server{
proxy: newProxy(),
eventLogger: elog,
stopped: true,
}
}
// Start initiates the port server on a given host:port
func (s *Server) Start() error {
s.quit = make(chan interface{})
c := winio.PipeConfig{
//
// SDDL encoded.
//
// (system = SECURITY_NT_AUTHORITY | SECURITY_LOCAL_SYSTEM_RID)
// owner: system
// ACE Type: (A) Access Allowed
// grant: (GA) GENERIC_ALL to (WD) Everyone
//
SecurityDescriptor: "O:SYD:(A;;GA;;;WD)",
}
l, err := winio.ListenPipe(npipeEndpoint[len(protocol):], &c)
if err != nil {
return fmt.Errorf("port server listen error: %w", err)
}
s.listener = l
for {
conn, err := s.listener.Accept()
if err != nil {
select {
case <-s.quit:
s.eventLogger.Info(uint32(windows.NO_ERROR), "port server received a stop signal")
return nil
default:
return fmt.Errorf("port server connection accept error: %w", err)
}
} else {
go s.handleEvent(conn)
}
}
}
func (s *Server) handleEvent(conn net.Conn) {
defer conn.Close()
var pm types.PortMapping
err := json.NewDecoder(conn).Decode(&pm)
if err != nil {
s.eventLogger.Error(uint32(windows.ERROR_EXCEPTION_IN_SERVICE), fmt.Sprintf("port server decoding received payload error: %v", err))
return
}
s.eventLogger.Info(uint32(windows.NO_ERROR), fmt.Sprintf("handleEvent for %+v", pm))
if err = s.proxy.exec(pm); err != nil {
s.eventLogger.Error(uint32(windows.ERROR_EXCEPTION_IN_SERVICE), fmt.Sprintf("port proxy [%+v] failed: %v", pm, err))
}
}
// Stop shuts down the server gracefully
func (s *Server) Stop() {
close(s.quit)
s.listener.Close()
s.eventLogger.Info(uint32(windows.NO_ERROR), fmt.Sprintf("remove all %+v", s.proxy.portMappings))
if err := s.proxy.removeAll(); err != nil {
s.eventLogger.Warning(uint32(windows.ERROR_EXCEPTION_IN_SERVICE), err.Error())
}
s.stopped = true
}
|
package main
import (
"flag"
"fmt"
"os"
"sync"
"text/tabwriter"
"github.com/mayflower/docker-ls/cli/docker-ls/response"
"github.com/mayflower/docker-ls/cli/util"
"github.com/mayflower/docker-ls/lib"
)
type repositoriesCmd struct {
flags *flag.FlagSet
cfg *Config
}
func (r *repositoriesCmd) execute(argv []string) (err error) {
libCfg := lib.NewConfig()
libCfg.BindToFlags(r.flags)
r.cfg = newConfig()
r.cfg.bindToFlags(r.flags,
OPTION_JSON_OUTPUT|OPTION_PROGRESS|OPTION_RECURSION_LEVEL|OPTION_STATISTICS|OPTION_INTERACTIVE_PASSWORD|OPTION_TABLE_OUTPUT)
err = r.flags.Parse(argv)
if err != nil {
return
}
if r.cfg.interactivePassword {
err = util.PromptPassword(&libCfg)
if err != nil {
return
}
}
if len(r.flags.Args()) != 0 {
r.flags.Usage()
os.Exit(1)
}
registryApi, err := lib.NewRegistryApi(libCfg)
if err != nil {
return
}
var resp sortable
switch {
case r.cfg.recursionLevel >= 1:
resp, err = r.listLevel1(registryApi)
case r.cfg.recursionLevel == 0:
resp, err = r.listLevel0(registryApi)
}
if err != nil {
return
}
resp.Sort()
if r.cfg.tableOutput {
w := tabwriter.NewWriter(os.Stdout, 50, 1, 3, ' ', 0)
switch repositories := resp.(type) {
case *response.RepositoriesL0:
fmt.Fprintln(w, "REPOSITORY")
for _, repository := range repositories.Repositories {
fmt.Fprintf(w, "%s\n", repository)
}
w.Flush()
case *response.RepositoriesL1:
fmt.Fprintln(w, "REPOSITORY\tTAG")
for _, repository := range repositories.Repositories {
for _, tag := range repository.Tags {
fmt.Fprintf(w, "%s\t%s\n", repository.RepositoryName, tag)
}
}
w.Flush()
}
} else {
err = serializeToStdout(resp, r.cfg)
}
if r.cfg.statistics {
dumpStatistics(registryApi.GetStatistics())
}
return
}
func (r *repositoriesCmd) listLevel0(api lib.RegistryApi) (resp *response.RepositoriesL0, err error) {
progress := NewProgressIndicator(r.cfg)
progress.Start(fmt.Sprintf("requesting list from %s", api.GetRegistryUrl()))
result := api.ListRepositories()
resp = response.NewRepositoriesL0()
progress.Progress()
for repository := range result.Repositories() {
resp.AddRepository(repository)
}
err = result.LastError()
progress.Finish("done")
return
}
func (r *repositoriesCmd) listLevel1(api lib.RegistryApi) (resp *response.RepositoriesL1, err error) {
progress := NewProgressIndicator(r.cfg)
progress.Start(fmt.Sprintf("requesting list from %s", api.GetRegistryUrl()))
repositoriesResult := api.ListRepositories()
resp = response.NewRepositoriesL1()
progress.Progress()
errors := make(chan error)
go func() {
var wait sync.WaitGroup
for repository := range repositoriesResult.Repositories() {
wait.Add(1)
go func(repository lib.Repository) {
tagsResult := api.ListTags(repository.Name())
tagsL0 := response.NewTagsL0(repository.Name())
for tag := range tagsResult.Tags() {
tagsL0.AddTag(tag)
}
progress.Progress()
resp.AddTags(tagsL0)
if err := tagsResult.LastError(); err != nil {
errors <- err
}
wait.Done()
}(repository)
}
if err := repositoriesResult.LastError(); err != nil {
errors <- err
}
wait.Wait()
close(errors)
}()
for nextError := range errors {
if err == nil {
err = nextError
}
}
progress.Finish("done")
return
}
func newRepositoriesCmd(name string) (cmd *repositoriesCmd) {
cmd = &repositoriesCmd{
flags: flag.NewFlagSet(name, flag.ExitOnError),
}
cmd.flags.Usage = commandUsage(name, "", "List all repositories.", cmd.flags)
return
}
|
package postal
import (
"encoding/json"
"github.com/cloudfoundry-incubator/notifications/cf"
"github.com/cloudfoundry-incubator/notifications/models"
"github.com/pivotal-cf/uaa-sso-golang/uaa"
)
const (
EmailFieldName = "email"
RecipientsFieldName = "recipient"
EmptyIDForNonUser = ""
)
type RecipeInterface interface {
Dispatch(clientID string, guid TypedGUID, options Options, conn models.ConnectionInterface) ([]Response, error)
Trim([]byte) []byte
}
type EmailRecipe struct {
mailer MailerInterface
templatesLoader TemplatesLoaderInterface
}
func NewEmailRecipe(mailer MailerInterface, templatesLoader TemplatesLoaderInterface) EmailRecipe {
return EmailRecipe{
mailer: mailer,
templatesLoader: templatesLoader,
}
}
func (recipe EmailRecipe) Dispatch(clientID string, guid TypedGUID,
options Options, conn models.ConnectionInterface) ([]Response, error) {
users := map[string]uaa.User{EmptyIDForNonUser: uaa.User{Emails: []string{options.To}}}
subjectSuffix := recipe.subjectSuffix(options.Subject)
templates, err := recipe.templatesLoader.LoadTemplates(subjectSuffix, models.EmailBodyTemplateName, clientID, options.KindID)
if err != nil {
return []Response{}, TemplateLoadError("An email template could not be loaded")
}
return recipe.mailer.Deliver(conn, templates, users, options, cf.CloudControllerSpace{}, cf.CloudControllerOrganization{}, clientID), nil
}
func (recipe EmailRecipe) Trim(responses []byte) []byte {
t := Trimmer{}
return t.TrimFields(responses, RecipientsFieldName)
}
func (recipe EmailRecipe) determineSubjectTemplate(subject string) string {
if subject == "" {
return SubjectMissingTemplateName
}
return SubjectProvidedTemplateName
}
type Trimmer struct{}
func (t Trimmer) TrimFields(responses []byte, field string) []byte {
var results []map[string]string
err := json.Unmarshal(responses, &results)
if err != nil {
panic(err)
}
for _, value := range results {
delete(value, field)
}
responses, err = json.Marshal(results)
if err != nil {
panic(err)
}
return responses
}
func (recipe EmailRecipe) subjectSuffix(subject string) string {
if subject == "" {
return models.SubjectMissingTemplateName
}
return models.SubjectProvidedTemplateName
}
|
package main
import (
"fmt"
"net"
"strings"
)
func main() {
//创建监听套接字
//listener,err:=net.Listen("tcp","127.0.0.1:8000")
listener,err:=net.Listen("tcp","172.18.2.34:8000")
if err!=nil{
fmt.Println("Net.Listen err",err)
return
}
defer listener.Close()
//监听客户端请求
for{
fmt.Println("正在监听客户端连接...")
conn,err:=listener.Accept()
if err!=nil{
fmt.Println("listener.Accept err:",err)
return
}
//具体完成服务器和客户端的通信
go HandlerConnect(conn)
}
}
func HandlerConnect(conn net.Conn) {
defer conn.Close()
//读取客户端网络地址
addr:=conn.RemoteAddr()
fmt.Println(addr,"客户端成功连接")
//循环读取客户端数据
buf:=make([]byte,4096)
for{
n,err:=conn.Read(buf)
if "exit\r\n"==string(buf[:n])||"exit\n"==string(buf[:n]){
fmt.Println(addr,"服务器接收到客户端退出请求 退出")
return
}
if n==0{
fmt.Println(addr,"服务器检测到客户端关闭...")
return
}
if err!=nil{
fmt.Println("conn.Read err:",err)
return
}
fmt.Println("服务器读到数据:",string(buf[:n]))
//小写转大写
str:=strings.ToUpper(string(buf[:n]))
conn.Write([]byte(str))
}
} |
package first
import (
"go/ast"
"github.com/bunniesandbeatings/go-flavor-parser/architecture"
"github.com/davecgh/go-spew/spew"
)
type TypeSpecVisitor struct {
Context
TypeSpec *ast.TypeSpec
}
func NewTypeSpecVisitor(context Context, typeSpec *ast.TypeSpec) *TypeSpecVisitor {
return &TypeSpecVisitor{
context,
typeSpec,
}
}
func parseMethods(funcType *ast.FuncType) []architecture.Type {
params := fieldListTypes(funcType.Params)
returns := fieldListTypes(funcType.Results)
result := []architecture.Type{"bunnies", "beatings"}
for _, p := range params {
result = append(result, p)
}
for _, r := range returns {
result = append(result, r)
}
return result
}
func (visitor TypeSpecVisitor) Visit(node ast.Node) ast.Visitor {
// TODO: Can types be private?
switch t := node.(type) {
case *ast.InterfaceType:
methods := []*architecture.Method{}
for i, field := range t.Methods.List {
if len(field.Names) != 1 {
panic(spew.Sprintf("Method %d of interface %s does not have one name: %#v", i, visitor.TypeSpec.Name.Name, field.Names))
}
var parmTypes, returnTypes []architecture.Type
if t, ok := field.Type.(*ast.FuncType); ok {
parmTypes = fieldListTypes(t.Params)
returnTypes = fieldListTypes(t.Results)
} else {
panic(spew.Sprintf("Cannot determine type of interface field %#v", field))
}
method := &architecture.Method{
Func: architecture.Func{
Name: field.Names[0].Name,
Package: visitor.Package.Name,
Filename: visitor.Filename,
ParmTypes: parmTypes,
ReturnTypes: returnTypes,
},
ReceiverType: "",
}
methods = append(methods, method)
}
visitor.Package.AddInterface(visitor.TypeSpec.Name.Name, visitor.Filename, methods)
case *ast.StructType:
visitor.Package.AddStruct(visitor.TypeSpec.Name.Name, visitor.Filename)
}
return nil
}
|
package main
import (
"fmt"
"syscall"
)
func getSecrets() (string, string, string) {
var accessKey string
var secretKey string
var warn string
if a, ok := syscall.Getenv("AWS_ACCESS"); ok {
accessKey = a
} else {
warn += fmt.Sprintf("AWS_ACCESS env var is nil in operator deployment. Please set up AWS_ACCESS as an environment variable in operator deployment\n")
}
if s, ok := syscall.Getenv("AWS_SECRET"); ok {
secretKey = s
} else {
warn += fmt.Sprintf("AWS_SECRET env var is nil in operator deployment. Please set up AWS_SECRET as an environment variable in operator deployment\n")
}
return accessKey, secretKey, warn
}
|
package rpcd
import (
"github.com/Cloud-Foundations/Dominator/lib/errors"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/proto/fleetmanager"
)
func (t *srpcType) PowerOnMachine(conn *srpc.Conn,
request fleetmanager.PowerOnMachineRequest,
reply *fleetmanager.PowerOnMachineResponse) error {
*reply = fleetmanager.PowerOnMachineResponse{
errors.ErrorToString(t.hypervisorsManager.PowerOnMachine(
request.Hostname, conn.GetAuthInformation()))}
return nil
}
|
package testhandler
import (
"bytes"
"context"
"fmt"
"log"
"os"
"strconv"
"time"
"github.com/modeckrus/firebase/firebasestorage"
"cloud.google.com/go/firestore"
cstorage "cloud.google.com/go/storage"
gstorage "cloud.google.com/go/storage"
firebase "firebase.google.com/go"
Auth "firebase.google.com/go/auth"
"firebase.google.com/go/db"
firestorage "firebase.google.com/go/storage"
"github.com/disintegration/imaging"
)
//Size size
type Size struct {
Width int
Height int
}
// FirestoreEvent is the payload of a Firestore event.
type FirestoreEvent struct {
OldValue FirestoreValue `json:"oldValue"`
Value FirestoreValue `json:"value"`
UpdateMask struct {
FieldPaths []string `json:"fieldPaths"`
} `json:"updateMask"`
}
// FirestoreValue holds Firestore fields.
type FirestoreValue struct {
CreateTime time.Time `json:"createTime"`
// Fields is the data for this value. The type depends on the format of your
// database. Log an interface{} value and inspect the result to see a JSON
// representation of your database fields.
Fields ThubHandStruct `json:"fields"`
Name string `json:"name"`
UpdateTime time.Time `json:"updateTime"`
}
type ThumbVal struct {
}
var client *db.Client
var fstore *firestore.Client
var storage *firestorage.Client
var bucket *gstorage.BucketHandle
var auth *Auth.Client
var cstor *cstorage.Client
var projectID = os.Getenv("GOOGLE_CLOUD_PROJECT")
func init() {
ctx := context.Background()
conf := &firebase.Config{
ProjectID: projectID,
DatabaseURL: fmt.Sprintf("https://%v.firebaseio.com/", projectID),
StorageBucket: fmt.Sprintf("%v.appspot.com", projectID),
}
//opt := option.WithCredentialsFile("./secured/adminsdk.json") //Specify this file by ur adminsdk, u can find it in settigns of ur firebase project
app, err := firebase.NewApp(ctx, conf)
if err != nil {
log.Fatal(err)
}
client, err = app.Database(ctx)
if err != nil {
log.Fatal(err)
}
fstore, err = app.Firestore(ctx)
if err != nil {
log.Fatal(err)
}
auth, err = app.Auth(ctx)
if err != nil {
log.Fatal(err)
}
cstor, err = cstorage.NewClient(ctx)
if err != nil {
log.Fatal(err)
}
bucket = cstor.Bucket("gvisionmodeck.appspot.com")
}
type ThubHandStruct struct {
Path struct {
StringValue string `json:"stringValue"`
} `json:"Path"`
Sizes struct {
ArrayValue struct {
Values []struct {
MapValue struct {
Fields struct {
Height struct {
StringValue string `json:"stringValue"`
} `json:"Height"`
Width struct {
IntegerValue string `json:"integerValue"`
} `json:"Width"`
} `json:"fields"`
} `json:"mapValue"`
} `json:"values"`
} `json:"arrayValue"`
} `json:"Sizes"`
}
type Thubnail struct {
Path string `json:"path"`
Sizes []struct {
Height int `json:"height"`
Width int `json:"width"`
} `json:"sizes"`
}
func convertToThubStrcut(hand ThubHandStruct) (Thubnail, error) {
var thub Thubnail
thub.Path = hand.Path.StringValue
for _, size := range hand.Sizes.ArrayValue.Values {
h, err := strconv.Atoi(size.MapValue.Fields.Height.StringValue)
if err != nil {
return thub, err
}
w, err := strconv.Atoi(size.MapValue.Fields.Width.IntegerValue)
if err != nil {
return thub, err
}
thub.Sizes = append(thub.Sizes, struct {
Height int "json:\"height\""
Width int "json:\"width\""
}{
Height: h,
Width: w,
})
}
return thub, nil
}
//Thubnails make thunail of images in firebase storage
func Thubnails(cstor *gstorage.Client, bucket *gstorage.BucketHandle, filename string, sizes []Size) {
b, err := firebasestorage.Read(cstor, bucket, filename)
io := bytes.NewReader(b)
src, err := imaging.Decode(io)
err = imaging.Save(src, filename)
if err != nil {
log.Fatal(err)
}
for _, size := range sizes {
img := imaging.Thumbnail(src, size.Width, size.Height, imaging.Lanczos)
/*
err = imaging.Save(img, fmt.Sprintf("@thubnail_%vX%v_%v", size.Width, size.Height, filename))
if err != nil {
log.Fatal(err)
}
*/
filetype, err := imaging.FormatFromFilename(filename)
if err != nil {
log.Fatal(err)
}
var buf bytes.Buffer
imaging.Encode(&buf, img, filetype)
//f, err := os.Open(fmt.Sprintf("@thub_%vX%v%v", size.Width, size.Height, filename))
err = firebasestorage.Write(cstor, bucket, fmt.Sprintf("@thub_%vX%v%v", size.Width, size.Height, filename), &buf)
if err != nil {
log.Println(err)
}
//imaging.Encode( )
}
}
|
package main
import (
"bufio"
"os"
"strconv"
"fmt"
"strings"
)
func main() {
reader := bufio.NewReader(os.Stdin)
p_string, _ := ReadLine(reader)
p, err := strconv.Atoi(p_string)
check(err)
q_string, _ := ReadLine(reader)
q, err := strconv.Atoi(q_string)
check(err)
var kaprekars []int
for i := q; i >= p; i-- {
//get the length of i
if Contains(kaprekars,i) {
continue
}
d := len(strconv.Itoa(i))
sq := int64(i * i)
sqAr := strings.SplitAfter(strconv.FormatInt(sq,10),"")
sqAr0 := sqAr[:len(sqAr)-d]
sq0 := 0
sq1 := 0
sq0,err = strconv.Atoi(strings.Join(sqAr0,""))
sqAr1 := sqAr[len(sqAr)-d:]
sq1,err = strconv.Atoi(strings.Join(sqAr1,""))
if sq0 + sq1 == i {
kaprekars = append(kaprekars,i)
}
}
if len(kaprekars) == 0 {
fmt.Println("INVALID RANGE")
} else {
for j := len(kaprekars)-1; j >= 0; j-- {
fmt.Print(kaprekars[j])
if j > 0 {
fmt.Print(" ")
}
}
}
}
func Contains(slice []int, item int) bool {
set := make(map[int]struct{},len(slice))
for _, s := range slice {
set[s] = struct{}{}
}
_, ok := set[item]
return ok
}
func ReadLine(r *bufio.Reader) (string, error) {
var (
isPrefix bool = true
err error = nil
line, ln []byte
)
for isPrefix && err == nil {
line, isPrefix, err = r.ReadLine()
ln = append(ln, line...)
}
return string(ln), err
}
func check(err error) {
if err != nil {
panic(err)
}
}
|
package rsi
import (
"github.com/charlesfan/go-api/repository/user"
"github.com/charlesfan/go-api/utils/log"
)
type EmailLoginBody struct {
Email string `json:"email" binding:"required"`
Password string `json:"password" binding:"required"`
}
type loginService struct {
// ---Repository---
user user.Repository
// ---Other---
}
func (s *loginService) EmailChecking(b *EmailLoginBody) error {
log.Info("Here in EmailChecking function: ")
log.Info("Got Email: ", b.Email)
log.Info("PASSWORD: ", b.Password)
return nil
}
func NewLoginService(u user.Repository) LoginServicer {
s := &loginService{
user: u,
}
return s
}
|
package transform
import "testing"
func TestInstall(t *testing.T) {
install()
}
|
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package knativeeventing
import (
"context"
"fmt"
mf "github.com/manifestival/manifestival"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
clientset "knative.dev/operator/pkg/client/clientset/versioned"
eventingv1alpha1 "knative.dev/operator/pkg/apis/operator/v1alpha1"
knereconciler "knative.dev/operator/pkg/client/injection/reconciler/operator/v1alpha1/knativeeventing"
"knative.dev/operator/pkg/reconciler/common"
kec "knative.dev/operator/pkg/reconciler/knativeeventing/common"
"knative.dev/operator/version"
"knative.dev/pkg/logging"
pkgreconciler "knative.dev/pkg/reconciler"
)
const (
oldFinalizerName = "delete-knative-eventing-manifest"
)
// Reconciler implements controller.Reconciler for KnativeEventing resources.
type Reconciler struct {
// kubeClientSet allows us to talk to the k8s for core APIs
kubeClientSet kubernetes.Interface
// kubeClientSet allows us to talk to the k8s for operator APIs
operatorClientSet clientset.Interface
// config is the manifest of KnativeEventing
config mf.Manifest
// Platform-specific behavior to affect the transform
platform common.Platforms
}
// Check that our Reconciler implements controller.Reconciler
var _ knereconciler.Interface = (*Reconciler)(nil)
var _ knereconciler.Finalizer = (*Reconciler)(nil)
// FinalizeKind removes all resources after deletion of a KnativeEventing.
func (r *Reconciler) FinalizeKind(ctx context.Context, original *eventingv1alpha1.KnativeEventing) pkgreconciler.Event {
logger := logging.FromContext(ctx)
// List all KnativeEventings to determine if cluster-scoped resources should be deleted.
kes, err := r.operatorClientSet.OperatorV1alpha1().KnativeEventings("").List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to list all KnativeEventings: %w", err)
}
for _, ke := range kes.Items {
if ke.GetDeletionTimestamp().IsZero() {
// Not deleting all KnativeEventings. Nothing to do here.
return nil
}
}
manifest, err := r.transform(ctx, original)
if err != nil {
return fmt.Errorf("failed to transform manifest: %w", err)
}
logger.Info("Deleting cluster-scoped resources")
return common.Uninstall(&manifest)
}
// ReconcileKind compares the actual state with the desired, and attempts to
// converge the two.
func (r *Reconciler) ReconcileKind(ctx context.Context, ke *eventingv1alpha1.KnativeEventing) pkgreconciler.Event {
logger := logging.FromContext(ctx)
ke.Status.InitializeConditions()
ke.Status.ObservedGeneration = ke.Generation
logger.Infow("Reconciling KnativeEventing", "status", ke.Status)
stages := []func(context.Context, *mf.Manifest, *eventingv1alpha1.KnativeEventing) error{
r.ensureFinalizerRemoval,
r.install,
r.checkDeployments,
r.deleteObsoleteResources,
}
manifest, err := r.transform(ctx, ke)
if err != nil {
ke.Status.MarkInstallFailed(err.Error())
return err
}
for _, stage := range stages {
if err := stage(ctx, &manifest, ke); err != nil {
return err
}
}
logger.Infow("Reconcile stages complete", "status", ke.Status)
return nil
}
func (r *Reconciler) transform(ctx context.Context, instance *eventingv1alpha1.KnativeEventing) (mf.Manifest, error) {
logger := logging.FromContext(ctx)
logger.Debug("Transforming manifest")
platform, err := r.platform.Transformers(r.kubeClientSet, logger)
if err != nil {
return mf.Manifest{}, err
}
transformers := common.Transformers(ctx, instance)
transformers = append(transformers, kec.DefaultBrokerConfigMapTransform(instance, logger))
transformers = append(transformers, platform...)
return r.config.Transform(transformers...)
}
// ensureFinalizerRemoval ensures that the obsolete "delete-knative-eventing-manifest" is removed from the resource.
func (r *Reconciler) ensureFinalizerRemoval(_ context.Context, _ *mf.Manifest, instance *eventingv1alpha1.KnativeEventing) error {
patch, err := common.FinalizerRemovalPatch(instance, oldFinalizerName)
if err != nil {
return fmt.Errorf("failed to construct the patch: %w", err)
}
if patch == nil {
// Nothing to do here.
return nil
}
patcher := r.operatorClientSet.OperatorV1alpha1().KnativeEventings(instance.Namespace)
if _, err := patcher.Patch(instance.Name, types.MergePatchType, patch); err != nil {
return fmt.Errorf("failed to patch finalizer away: %w", err)
}
return nil
}
func (r *Reconciler) install(ctx context.Context, manifest *mf.Manifest, ke *eventingv1alpha1.KnativeEventing) error {
logger := logging.FromContext(ctx)
logger.Debug("Installing manifest")
return common.Install(manifest, version.EventingVersion, &ke.Status)
}
func (r *Reconciler) checkDeployments(ctx context.Context, manifest *mf.Manifest, ke *eventingv1alpha1.KnativeEventing) error {
logger := logging.FromContext(ctx)
logger.Debug("Checking deployments")
return common.CheckDeployments(r.kubeClientSet, manifest, &ke.Status)
}
// Delete obsolete resources from previous versions
func (r *Reconciler) deleteObsoleteResources(ctx context.Context, manifest *mf.Manifest, instance *eventingv1alpha1.KnativeEventing) error {
resources := []*unstructured.Unstructured{
// Remove old resources from 0.12
// https://github.com/knative/eventing-operator/issues/90
// sources and controller are merged.
// delete removed or renamed resources.
common.NamespacedResource("v1", "ServiceAccount", instance.GetNamespace(), "eventing-source-controller"),
common.ClusterScopedResource("rbac.authorization.k8s.io/v1", "ClusterRole", "knative-eventing-source-controller"),
common.ClusterScopedResource("rbac.authorization.k8s.io/v1", "ClusterRoleBinding", "knative-eventing-source-controller"),
common.ClusterScopedResource("rbac.authorization.k8s.io/v1", "ClusterRoleBinding", "eventing-source-controller"),
common.ClusterScopedResource("rbac.authorization.k8s.io/v1", "ClusterRoleBinding", "eventing-source-controller-resolver"),
// Remove the legacysinkbindings webhook at 0.13
common.ClusterScopedResource("admissionregistration.k8s.io/v1beta1", "MutatingWebhookConfiguration", "legacysinkbindings.webhook.sources.knative.dev"),
// Remove the knative-eventing-sources-namespaced-admin ClusterRole at 0.13
common.ClusterScopedResource("rbac.authorization.k8s.io/v1", "ClusterRole", "knative-eventing-sources-namespaced-admin"),
// Remove the apiserversources.sources.eventing.knative.dev CRD at 0.13
common.ClusterScopedResource("apiextensions.k8s.io/v1beta1", "CustomResourceDefinition", "apiserversources.sources.eventing.knative.dev"),
// Remove the containersources.sources.eventing.knative.dev CRD at 0.13
common.ClusterScopedResource("apiextensions.k8s.io/v1beta1", "CustomResourceDefinition", "containersources.sources.eventing.knative.dev"),
// Remove the cronjobsources.sources.eventing.knative.dev CRD at 0.13
common.ClusterScopedResource("apiextensions.k8s.io/v1beta1", "CustomResourceDefinition", "cronjobsources.sources.eventing.knative.dev"),
// Remove the sinkbindings.sources.eventing.knative.dev CRD at 0.13
common.ClusterScopedResource("apiextensions.k8s.io/v1beta1", "CustomResourceDefinition", "sinkbindings.sources.eventing.knative.dev"),
// Remove the deployment sources-controller at 0.13
common.NamespacedResource("apps/v1", "Deployment", instance.GetNamespace(), "sources-controller"),
// Remove the resources at at 0.14
common.NamespacedResource("v1", "ServiceAccount", instance.GetNamespace(), "pingsource-jobrunner"),
common.ClusterScopedResource("rbac.authorization.k8s.io/v1", "ClusterRole", "knative-eventing-jobrunner"),
common.ClusterScopedResource("rbac.authorization.k8s.io/v1", "ClusterRoleBinding", "pingsource-jobrunner"),
}
for _, r := range resources {
if err := manifest.Client.Delete(r); err != nil {
return err
}
}
return nil
}
|
package main
import (
"fmt"
"time"
)
func main() {
var t = time.Now()
// PROCURA ALGUM CASE QUE FOR TRUE
switch {
case t.Hour() < 12:
fmt.Println("Bom dia")
case t.Hour() < 18:
fmt.Println("Boa tarde")
default:
fmt.Println("Boa noite")
}
}
|
package git
import (
"path"
"runtime"
"strings"
"testing"
)
func TestDescribeCommit(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
describeOpts, err := DefaultDescribeOptions()
checkFatal(t, err)
formatOpts, err := DefaultDescribeFormatOptions()
checkFatal(t, err)
commitID, _ := seedTestRepo(t, repo)
commit, err := repo.LookupCommit(commitID)
checkFatal(t, err)
// No annotated tags can be used to describe master
_, err = commit.Describe(&describeOpts)
checkDescribeNoRefsFound(t, err)
// Fallback
fallback := describeOpts
fallback.ShowCommitOidAsFallback = true
result, err := commit.Describe(&fallback)
checkFatal(t, err)
resultStr, err := result.Format(&formatOpts)
checkFatal(t, err)
compareStrings(t, "473bf77", resultStr)
// Abbreviated
abbreviated := formatOpts
abbreviated.AbbreviatedSize = 2
result, err = commit.Describe(&fallback)
checkFatal(t, err)
resultStr, err = result.Format(&abbreviated)
checkFatal(t, err)
compareStrings(t, "473b", resultStr)
createTestTag(t, repo, commit)
// Exact tag
patternOpts := describeOpts
patternOpts.Pattern = "v[0-9]*"
result, err = commit.Describe(&patternOpts)
checkFatal(t, err)
resultStr, err = result.Format(&formatOpts)
checkFatal(t, err)
compareStrings(t, "v0.0.0", resultStr)
// Pattern no match
patternOpts.Pattern = "v[1-9]*"
result, err = commit.Describe(&patternOpts)
checkDescribeNoRefsFound(t, err)
commitID, _ = updateReadme(t, repo, "update1")
commit, err = repo.LookupCommit(commitID)
checkFatal(t, err)
// Tag-1
result, err = commit.Describe(&describeOpts)
checkFatal(t, err)
resultStr, err = result.Format(&formatOpts)
checkFatal(t, err)
compareStrings(t, "v0.0.0-1-gd88ef8d", resultStr)
// Strategy: All
describeOpts.Strategy = DescribeAll
result, err = commit.Describe(&describeOpts)
checkFatal(t, err)
resultStr, err = result.Format(&formatOpts)
checkFatal(t, err)
compareStrings(t, "heads/master", resultStr)
repo.CreateBranch("hotfix", commit, false)
// Workdir (branch)
result, err = repo.DescribeWorkdir(&describeOpts)
checkFatal(t, err)
resultStr, err = result.Format(&formatOpts)
checkFatal(t, err)
compareStrings(t, "heads/hotfix", resultStr)
}
func checkDescribeNoRefsFound(t *testing.T, err error) {
// The failure happens at wherever we were called, not here
_, file, line, ok := runtime.Caller(1)
expectedString := "no reference found, cannot describe anything"
if !ok {
t.Fatalf("Unable to get caller")
}
if err == nil || !strings.Contains(err.Error(), expectedString) {
t.Fatalf(
"%s:%v: was expecting error %v, got %v",
path.Base(file),
line,
expectedString,
err,
)
}
}
|
package util
import (
"fmt"
"github.com/mndrix/tap-go"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/cgroups"
)
// ValidateLinuxResourcesNetwork validates linux.resources.network.
func ValidateLinuxResourcesNetwork(config *rspec.Spec, t *tap.T, state *rspec.State) error {
cg, err := cgroups.FindCgroup()
t.Ok((err == nil), "find network cgroup")
if err != nil {
t.Diagnostic(err.Error())
return nil
}
lnd, err := cg.GetNetworkData(state.Pid, config.Linux.CgroupsPath)
t.Ok((err == nil), "get network cgroup data")
if err != nil {
t.Diagnostic(err.Error())
return nil
}
t.Ok(*lnd.ClassID == *config.Linux.Resources.Network.ClassID, "network ID set correctly")
t.Diagnosticf("expect: %d, actual: %d", *config.Linux.Resources.Network.ClassID, *lnd.ClassID)
for _, priority := range config.Linux.Resources.Network.Priorities {
found := false
for _, lip := range lnd.Priorities {
if lip.Name == priority.Name {
found = true
t.Ok(lip.Priority == priority.Priority, fmt.Sprintf("network priority for %s is set correctly", priority.Name))
t.Diagnosticf("expect: %d, actual: %d", priority.Priority, lip.Priority)
}
}
t.Ok(found, fmt.Sprintf("network priority for %s found", priority.Name))
}
return nil
}
|
package valexa
import (
"net/http"
"io"
"fmt"
"net/url"
"path"
"strings"
"bytes"
"io/ioutil"
"os"
"encoding/pem"
"encoding/base64"
"encoding/json"
"crypto"
"crypto/sha1"
"crypto/x509"
"crypto/rsa"
"time"
)
//https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/developing-an-alexa-skill-as-a-web-service#hosting-a-custom-skill-as-a-web-service
type checkRequestBody struct {
R *http.Request //请求对象
}
func (T *checkRequestBody) echoRequest(echoApp *EchoApplication) (echoReq *EchoRequest, err error) {
err = json.NewDecoder(T.R.Body).Decode(&echoReq)
if err != nil {
return nil, fmt.Errorf("valexa: Body 数据结构无法解析, 错误的是(%s)", err)
}
//检查时间
if !echoReq.VerifyTimestamp(echoApp.ValidReqTimestamp) {
return nil, fmt.Errorf("valexa: 请求时间超出(>%ds),已经过时了。", echoApp.ValidReqTimestamp)
}
return echoReq, nil
}
func (T *checkRequestBody) verifyBody(echoApp *EchoApplication) (body io.Reader, err error) {
if T.R.Method != "POST" {
return nil, fmt.Errorf("valexa: 请求仅支持 POST 方法, 错误的是(%s)", T.R.Method)
}
certURL := T.R.Header.Get("SignatureCertChainUrl")
link, err := url.Parse(certURL)
if err != nil{
return nil, fmt.Errorf("valexa: 解析SignatureCertChainUrl地址路径失败, 错误的是(%s)", err)
}
if !strings.EqualFold(link.Scheme, "https") {
return nil, fmt.Errorf("valexa: 网址协议仅支持https, 错误的是(%s)", link.Scheme)
}
if !strings.EqualFold(link.Host, "s3.amazonaws.com") && !strings.EqualFold(link.Host, "s3.amazonaws.com:443") {
return nil, fmt.Errorf("valexa: 网址host仅支持s3.amazonaws.com, 错误的是(%s)", link.Host)
}
if !strings.HasPrefix(path.Clean(link.Path) , "/echo.api/") {
return nil, fmt.Errorf("valexa: 网址Path前缀仅支持/echo.api/, 错误的是(%s)", link.Path)
}
//读取证书文件
name := path.Base(link.Path)
filePath := path.Join(echoApp.CertFolder, name)
certBody, err := ioutil.ReadFile(filePath)
if err != nil {
resp, err := http.Get(certURL)
if err != nil {
return nil, fmt.Errorf("valexa: 下载证书文件失败, 错误的是(%s)", err)
}
certBody, err = ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, fmt.Errorf("valexa: 读取文件失败, 错误的是(%s)", err)
}
osFile, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return nil, fmt.Errorf("valexa: 创建文件失败, 错误的是(%s)", err)
}
n, err := osFile.Write(certBody)
osFile.Close()
if err != nil {
return nil, fmt.Errorf("valexa: 写入文件失败, 错误的是(%s)", err)
}
if len(certBody) != n {
os.Rename(filePath, filePath+".temp")
return nil, fmt.Errorf("valexa: 证书文件保存到本地不完整!")
}
}
if len(certBody) == 0 {
return nil, fmt.Errorf("valexa: 证书文件大小为 0")
}
//如果不能识别这个证书,需要重命名
var rename bool = true
defer func(){
if err != nil && rename {
os.Rename(filePath, filePath+".temp")
}
}()
var (
cCert *x509.Certificate
rCert *x509.Certificate
)
pemBlock, certBody := pem.Decode(certBody)
if pemBlock == nil {
return nil, fmt.Errorf("valexa: 无法解析证书PEM文件!")
}
x509Certificate, err := x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("valexa: 无法解析证书PEM, 错误的是(%s)", err)
}
cCert = x509Certificate
for len(certBody)>0 {
pemBlock, certBody = pem.Decode(certBody)
if pemBlock == nil {
return nil, fmt.Errorf("valexa: 无法解析证书PEM文件!")
}
rCert, err = x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("valexa: 无法解析证书PEM, 错误的是(%s)", err)
}
if err := cCert.CheckSignatureFrom(rCert); err != nil {
return nil, fmt.Errorf("valexa: 无法验证证书链签名, 错误的是(%s)", err)
}
cCert = rCert
}
// Amazon 提供的证书链是不完整的,无法使用根证书验证自身
// 所以这里注释
// if err := cCert.CheckSignatureFrom(cCert); err != nil {
// return nil, fmt.Errorf("根证书无法验证自身签名, 错误的是(%s)", err)
// }
if time.Now().Unix() < x509Certificate.NotBefore.Unix() || time.Now().Unix() > x509Certificate.NotAfter.Unix() {
return nil, fmt.Errorf("valexa: Amazon 证书已经过期!")
}
//检查证书签属名称
foundName := false
for _, altName := range x509Certificate.Subject.Names {
if altName.Value.(string) == "echo-api.amazon.com" {
foundName = true
break
}
}
if !foundName {
return nil, fmt.Errorf("valexa: Amazon 证书 Subject.names[].Value 没有检测到包含 echo-api.amazon.com 域名。")
}
//如果错误,不要命名证书文件
rename = false
//验证KEY
publicKey := x509Certificate.PublicKey
encryptedSig, err := base64.StdEncoding.DecodeString(T.R.Header.Get("Signature"))
if err != nil {
return nil, fmt.Errorf("valexa: 请求标头 Signature 无法识别, 错误的是(%s)", T.R.Header.Get("Signature"))
}
//读取Body, 和转化 HASH
var bodyBuf bytes.Buffer
hash := sha1.New()
ioReader := io.TeeReader(T.R.Body, &bodyBuf)
_, err = io.Copy(hash, ioReader)
T.R.Body.Close()
T.R.Body = ioutil.NopCloser(&bodyBuf)
if err != nil && err != io.ErrUnexpectedEOF {
return nil, fmt.Errorf("valexa: 读取 Body 数据转化成 sha1 HASH 出了问题, 错误的是(%s)", err)
}
if err := rsa.VerifyPKCS1v15(publicKey.(*rsa.PublicKey), crypto.SHA1, hash.Sum(nil), encryptedSig); err != nil {
return nil, fmt.Errorf("valexa: 证书无法验证 Body 数据, 错误的是(%s)", err)
}
return &bodyBuf, nil
}
|
package api
import (
"InkaTry/warehouse-storage-be/internal/http/admin/dtos"
"InkaTry/warehouse-storage-be/internal/pkg/http/responder"
"InkaTry/warehouse-storage-be/internal/pkg/stores"
"context"
"encoding/json"
"errors"
"github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
)
func TestListBrands(t *testing.T) {
tts := []struct {
caseName string
handlerFunc func(ctx context.Context) (*dtos.ListBrandsResponse, error)
request func() *http.Request
result func(resp *http.Response)
}{
{
caseName: "when return result is ok",
request: func() *http.Request {
req, _ := http.NewRequest(http.MethodGet, "/list/brands", nil)
return req
},
handlerFunc: func(ctx context.Context) (*dtos.ListBrandsResponse, error) {
return &dtos.ListBrandsResponse{
Brands: stores.Results{},
}, nil
},
result: func(resp *http.Response) {
var responseBody *responder.AdvanceCommonResponse
json.NewDecoder(resp.Body).Decode(&responseBody)
assert.Equal(t, resp.StatusCode, http.StatusOK)
},
},
{
caseName: "when no result found, is ok",
request: func() *http.Request {
req, _ := http.NewRequest(http.MethodGet, "/list/brands", nil)
return req
},
handlerFunc: func(ctx context.Context) (*dtos.ListBrandsResponse, error) {
return nil, nil
},
result: func(resp *http.Response) {
var responseBody *responder.AdvanceCommonResponse
json.NewDecoder(resp.Body).Decode(&responseBody)
assert.Equal(t, resp.StatusCode, http.StatusOK)
},
},
{
caseName: "when error occurs",
request: func() *http.Request {
req, _ := http.NewRequest(http.MethodGet, "/list/brands", nil)
return req
},
handlerFunc: func(ctx context.Context) (*dtos.ListBrandsResponse, error) {
return nil, errors.New("")
},
result: func(resp *http.Response) {
var responseBody *responder.AdvanceCommonResponse
json.NewDecoder(resp.Body).Decode(&responseBody)
assert.Equal(t, resp.StatusCode, http.StatusInternalServerError)
},
},
}
for _, tt := range tts {
t.Log(tt.caseName)
router := mux.NewRouter()
router.Handle("/list/brands", ListBrands(tt.handlerFunc))
rr := httptest.NewRecorder()
req := tt.request()
router.ServeHTTP(rr, req)
tt.result(rr.Result())
}
}
|
package mongo
import (
"encoding/binary"
"flag"
"fmt"
"time"
. "../../base"
"../../store"
"github.com/golang/glog"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
var mongo string
func init() {
flag.StringVar(&mongo, "mongo", "mongodb://127.0.0.1/stock", "mongo uri")
store.Register("mongo", &Mongo{})
}
func (p *Mongo) Open() (err error) {
if p.session != nil {
p.Close()
}
session, err := mgo.Dial(mongo)
if err != nil {
return
}
p.session = session
return
}
type Mongo struct {
store.Mem
session *mgo.Session
}
func (p *Mongo) Close() {
if p.session != nil {
p.session.Close()
p.session = nil
}
}
func (p *Mongo) LoadTDatas(table string) (res []Tdata, err error) {
c := p.session.DB("").C(table)
d := Tdata{}
iter := c.Find(nil).Sort("_id").Iter()
for iter.Next(&d) {
res = append(res, d)
}
if err := iter.Close(); err != nil {
glog.Warningln(err)
}
return
}
func data2BsonM(data interface{}) (m bson.M, err error) {
m = make(bson.M)
buf, err := bson.Marshal(data)
if err != nil {
return
}
err = bson.Unmarshal(buf, m)
return
}
func (p *Mongo) SaveTDatas(table string, datas []Tdata) (err error) {
c := p.session.DB("").C(table)
b := c.Bulk()
for i, _ := range datas {
data := &datas[i]
id := Time2ObjectId(data.Time)
m, err := data2BsonM(*data)
if err != nil {
glog.Warningln("convert tdata error", err, *data)
continue
}
m["_id"] = id
b.Upsert(bson.M{"_id": id}, m)
}
_, err = b.Run()
if err != nil {
glog.Warningln("insert tdatas error", err)
}
return
}
func (p *Mongo) LoadTicks(table string) (res []Tick, err error) {
c := p.session.DB("").C(table)
d := Tick{}
iter := c.Find(nil).Sort("_id").Iter()
for iter.Next(&d) {
res = append(res, d)
}
if err := iter.Close(); err != nil {
glog.Warningln(err)
}
return
}
func (p *Mongo) SaveTicks(table string, ticks []Tick) (err error) {
c := p.session.DB("").C(table)
b := c.Bulk()
for i, _ := range ticks {
tick := &ticks[i]
id := Time2ObjectId(tick.Time)
m, err := data2BsonM(*tick)
if err != nil {
glog.Warningln("convert tick error", err, *tick)
continue
}
m["_id"] = id
b.Upsert(bson.M{"_id": id}, m)
}
_, err = b.Run()
if err != nil {
glog.Warningln("insert ticks error", err)
}
return
}
func Time2ObjectId(t time.Time) bson.ObjectId {
var b [12]byte
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
binary.BigEndian.PutUint16(b[4:6], uint16(t.Nanosecond()/int(time.Millisecond)))
return bson.ObjectId(string(b[:]))
}
func ObjectId2Time(oid bson.ObjectId) time.Time {
id := string(oid)
if len(oid) != 12 {
panic(fmt.Sprintf("Invalid ObjectId: %q", id))
}
secs := int64(binary.BigEndian.Uint32([]byte(id[0:4])))
nsec := int64(binary.BigEndian.Uint16([]byte(id[4:6]))) * int64(time.Millisecond)
return time.Unix(secs, nsec).UTC()
}
|
package bot
import (
"fmt"
"log"
"time"
"github.com/anihouse/bot/config"
"github.com/bwmarrin/discordgo"
"github.com/robfig/cron"
"github.com/sirupsen/logrus"
)
type modules map[string]Module
// build variables
var (
Version string
)
var (
logger *logrus.Logger
)
// bot instanses
var (
Session *discordgo.Session
Cron = new(cron.Cron)
Modules = make(modules)
)
func Init() {
auth()
}
func auth() {
fmt.Println("Authorization...")
var err error
Session, err = discordgo.New(config.Session.Token)
if err != nil {
log.Fatal(err)
}
logger, err = Logger(config.Session.Log)
if err != nil {
log.Fatal(err)
}
switch logger.Level {
case logrus.ErrorLevel:
Session.LogLevel = discordgo.LogError
case logrus.WarnLevel:
Session.LogLevel = discordgo.LogWarning
case logrus.InfoLevel:
Session.LogLevel = discordgo.LogInformational
case logrus.DebugLevel:
Session.LogLevel = discordgo.LogDebug
}
discordgo.Logger = func(msgL, caller int, format string, a ...interface{}) {
switch msgL {
case discordgo.LogError:
logger.Errorf(format, a...)
case discordgo.LogWarning:
logger.Warnf(format, a...)
case discordgo.LogInformational:
logger.Infof(format, a...)
case discordgo.LogDebug:
logger.Debugf(format, a...)
}
}
err = Session.Open()
if err != nil {
log.Fatal(err)
}
fmt.Println("Running as", Session.State.User, "at", time.Now().Format("02-01-2006 15:04:05.999999999"))
}
|
// +build remoteclient
package adapter
import (
"context"
"encoding/json"
"fmt"
"io"
"strings"
"time"
"github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/varlink/go/varlink"
)
// ImageRuntime is wrapper for image runtime
type RemoteImageRuntime struct{}
// RemoteRuntime describes a wrapper runtime struct
type RemoteRuntime struct {
Conn *varlink.Connection
Remote bool
}
// LocalRuntime describes a typical libpod runtime
type LocalRuntime struct {
*RemoteRuntime
}
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
func GetRuntime(c *cli.Context) (*LocalRuntime, error) {
runtime := RemoteRuntime{}
conn, err := runtime.Connect()
if err != nil {
return nil, err
}
rr := RemoteRuntime{
Conn: conn,
Remote: true,
}
foo := LocalRuntime{
&rr,
}
return &foo, nil
}
// Shutdown is a bogus wrapper for compat with the libpod runtime
func (r RemoteRuntime) Shutdown(force bool) error {
return nil
}
// ContainerImage
type ContainerImage struct {
remoteImage
}
type remoteImage struct {
ID string
Labels map[string]string
RepoTags []string
RepoDigests []string
Parent string
Size int64
Created time.Time
InputName string
Names []string
Digest digest.Digest
isParent bool
Runtime *LocalRuntime
}
// Container ...
type Container struct {
remoteContainer
}
// remoteContainer ....
type remoteContainer struct {
Runtime *LocalRuntime
config *libpod.ContainerConfig
state *libpod.ContainerState
}
// GetImages returns a slice of containerimages over a varlink connection
func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
var newImages []*ContainerImage
images, err := iopodman.ListImages().Call(r.Conn)
if err != nil {
return nil, err
}
for _, i := range images {
name := i.Id
if len(i.RepoTags) > 1 {
name = i.RepoTags[0]
}
newImage, err := imageInListToContainerImage(i, name, r)
if err != nil {
return nil, err
}
newImages = append(newImages, newImage)
}
return newImages, nil
}
func imageInListToContainerImage(i iopodman.ImageInList, name string, runtime *LocalRuntime) (*ContainerImage, error) {
created, err := splitStringDate(i.Created)
if err != nil {
return nil, err
}
ri := remoteImage{
InputName: name,
ID: i.Id,
Labels: i.Labels,
RepoTags: i.RepoTags,
RepoDigests: i.RepoTags,
Parent: i.ParentId,
Size: i.Size,
Created: created,
Names: i.RepoTags,
isParent: i.IsParent,
Runtime: runtime,
}
return &ContainerImage{ri}, nil
}
// NewImageFromLocal returns a container image representation of a image over varlink
func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
img, err := iopodman.GetImage().Call(r.Conn, name)
if err != nil {
return nil, err
}
return imageInListToContainerImage(img, name, r)
}
// LoadFromArchiveReference creates an image from a local archive
func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
// TODO We need to find a way to leak certDir, creds, and the tlsverify into this function, normally this would
// come from cli options but we don't want want those in here either.
imageID, err := iopodman.PullImage().Call(r.Conn, srcRef.DockerReference().String(), "", "", signaturePolicyPath, true)
if err != nil {
return nil, err
}
newImage, err := r.NewImageFromLocal(imageID)
if err != nil {
return nil, err
}
return []*ContainerImage{newImage}, nil
}
// New calls into local storage to look for an image in local storage or to pull it
func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, forcePull bool) (*ContainerImage, error) {
// TODO Creds needs to be figured out here too, like above
tlsBool := dockeroptions.DockerInsecureSkipTLSVerify
// Remember SkipTlsVerify is the opposite of tlsverify
// If tlsBook is true or undefined, we do not skip
SkipTlsVerify := false
if tlsBool == types.OptionalBoolFalse {
SkipTlsVerify = true
}
imageID, err := iopodman.PullImage().Call(r.Conn, name, dockeroptions.DockerCertPath, "", signaturePolicyPath, SkipTlsVerify)
if err != nil {
return nil, err
}
newImage, err := r.NewImageFromLocal(imageID)
if err != nil {
return nil, err
}
return newImage, nil
}
func splitStringDate(d string) (time.Time, error) {
fields := strings.Fields(d)
t := fmt.Sprintf("%sT%sZ", fields[0], fields[1])
return time.ParseInLocation(time.RFC3339Nano, t, time.UTC)
}
// IsParent goes through the layers in the store and checks if i.TopLayer is
// the parent of any other layer in store. Double check that image with that
// layer exists as well.
func (ci *ContainerImage) IsParent() (bool, error) {
return ci.remoteImage.isParent, nil
}
// ID returns the image ID as a string
func (ci *ContainerImage) ID() string {
return ci.remoteImage.ID
}
// Names returns a string array of names associated with the image
func (ci *ContainerImage) Names() []string {
return ci.remoteImage.Names
}
// Created returns the time the image was created
func (ci *ContainerImage) Created() time.Time {
return ci.remoteImage.Created
}
// Size returns the size of the image
func (ci *ContainerImage) Size(ctx context.Context) (*uint64, error) {
usize := uint64(ci.remoteImage.Size)
return &usize, nil
}
// Digest returns the image's digest
func (ci *ContainerImage) Digest() digest.Digest {
return ci.remoteImage.Digest
}
// Labels returns a map of the image's labels
func (ci *ContainerImage) Labels(ctx context.Context) (map[string]string, error) {
return ci.remoteImage.Labels, nil
}
// Dangling returns a bool if the image is "dangling"
func (ci *ContainerImage) Dangling() bool {
return len(ci.Names()) == 0
}
// TagImage ...
func (ci *ContainerImage) TagImage(tag string) error {
_, err := iopodman.TagImage().Call(ci.Runtime.Conn, ci.ID(), tag)
return err
}
// RemoveImage calls varlink to remove an image
func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
return iopodman.RemoveImage().Call(r.Conn, img.InputName, force)
}
// History returns the history of an image and its layers
func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) {
var imageHistories []*image.History
reply, err := iopodman.HistoryImage().Call(ci.Runtime.Conn, ci.InputName)
if err != nil {
return nil, err
}
for _, h := range reply {
created, err := splitStringDate(h.Created)
if err != nil {
return nil, err
}
ih := image.History{
ID: h.Id,
Created: &created,
CreatedBy: h.CreatedBy,
Size: h.Size,
Comment: h.Comment,
}
imageHistories = append(imageHistories, &ih)
}
return imageHistories, nil
}
// LookupContainer gets basic information about container over a varlink
// connection and then translates it to a *Container
func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
state, err := r.ContainerState(idOrName)
if err != nil {
return nil, err
}
config := r.Config(idOrName)
if err != nil {
return nil, err
}
rc := remoteContainer{
r,
config,
state,
}
c := Container{
rc,
}
return &c, nil
}
func (r *LocalRuntime) GetLatestContainer() (*Container, error) {
return nil, libpod.ErrNotImplemented
}
// ContainerState returns the "state" of the container.
func (r *LocalRuntime) ContainerState(name string) (*libpod.ContainerState, error) { //no-lint
reply, err := iopodman.ContainerStateData().Call(r.Conn, name)
if err != nil {
return nil, err
}
data := libpod.ContainerState{}
if err := json.Unmarshal([]byte(reply), &data); err != nil {
return nil, err
}
return &data, err
}
// Config returns a container config
func (r *LocalRuntime) Config(name string) *libpod.ContainerConfig {
// TODO the Spec being returned is not populated. Matt and I could not figure out why. Will defer
// further looking into it for after devconf.
// The libpod function for this has no errors so we are kind of in a tough
// spot here. Logging the errors for now.
reply, err := iopodman.ContainerConfig().Call(r.Conn, name)
if err != nil {
logrus.Error("call to container.config failed")
}
data := libpod.ContainerConfig{}
if err := json.Unmarshal([]byte(reply), &data); err != nil {
logrus.Error("failed to unmarshal container inspect data")
}
return &data
}
|
package main
import (
"crypto/rsa"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/signal"
"runtime/debug"
"syscall"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
kitprometheus "github.com/go-kit/kit/metrics/prometheus"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
stdjwt "github.com/dgrijalva/jwt-go"
"github.com/hathbanger/microsvc-base/pkg/microsvc"
"github.com/hathbanger/microsvc-base/pkg/microsvc/models"
)
var (
errChannel = make(chan error)
fconfig = flag.String(
"config",
"config.json",
"the path to the service configuration file",
)
flogPath = flag.String(
"log-path",
os.Getenv("LOG_PATH"),
"the absolute path to the log file",
)
faddress = flag.String(
"address",
os.Getenv("SERVICE_ADDRESS"),
"the advertise address of the service (default env var \"SERVICE_ADDRESS\")",
)
fport = flag.String(
"port",
os.Getenv("SERVICE_PORT"),
"the service advertise port (default env variable \"SERVICE_PORT\")",
)
fbindPort = flag.String(
"bind-port",
os.Getenv("PORT"),
"the service port (default env variable \"PORT\")",
)
fdebug = flag.Bool(
"debug", false, "print debug information for the service",
)
fversion = flag.Bool(
"version", false, "set to true for version info",
)
)
func main() {
// parse flags
flag.Parse()
if *fversion {
fmt.Printf(
`{ "name": "%s", "version": "%s", "commit": "%s", "arch": "%s", "build_time": "%s", "api_version": "%s" }`,
microsvc.Name,
microsvc.Ver,
microsvc.GitCommit,
microsvc.Arch,
microsvc.BuildTime,
microsvc.APIVersion,
)
os.Exit(0)
}
// load configuration
config, err := loadConfig()
if err != nil {
fmt.Println("WOO!!!", err)
errChannel <- err
}
// set up logger
var logger log.Logger
{
if len(*flogPath) <= 0 {
*flogPath = fmt.Sprintf("/var/log/%s.log", microsvc.Name)
}
file, err := os.OpenFile(
*flogPath,
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644,
)
if err != nil {
fmt.Println("YO", err)
errChannel <- err
}
logger = log.NewJSONLogger(io.MultiWriter(os.Stdout, file))
logger = log.With(logger, "service", microsvc.Name)
logger = log.With(logger, "timestamp", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller)
if !*fdebug {
logger = level.NewFilter(logger, level.AllowError(), level.AllowInfo())
}
}
var svc microsvc.Service
{
svc = microsvc.New(config, logger)
svc = microsvc.InstrumentingMiddleware(
kitprometheus.NewSummaryFrom(
prometheus.SummaryOpts{
Namespace: "microservices",
Subsystem: "microsvc",
Name: "request_duration_seconds",
Help: "Request duration in seconds.",
},
[]string{"result", "mtype", "unit", "method"},
),
kitprometheus.NewCounterFrom(
prometheus.CounterOpts{
Namespace: "microservices",
Subsystem: "microsvc",
Name: "request_count",
Help: "Number of requests received",
},
[]string{"result", "mtype", "unit", "method"},
),
svc,
)
}
sd, reg, err := svc.ServiceDiscovery(config.ServiceAddr, config.ServicePort)
if err != nil {
errChannel <- err
}
logger.Log("registration", reg)
if err != nil {
errChannel <- err
}
// trap any unmanaged panaics and deregister the service from CONSUL
defer func() {
if r := recover(); r != nil {
err = logger.Log(
"trapped_panic", r,
"stack_trace", debug.Stack(),
"deregistering", reg.ID,
)
if err != nil {
errChannel <- err
}
err := sd.Agent().ServiceDeregister(reg.ID)
if err != nil {
errChannel <- err
}
}
logger.Log("deregistering", reg.ID)
err = sd.Agent().ServiceDeregister(reg.ID)
if err != nil {
errChannel <- err
}
}()
server := initializeServer(
microsvc.MakeRoutes(svc, logger, config),
*fbindPort,
config,
)
go func() {
err := sd.Agent().ServiceRegister(reg)
if err != nil {
errChannel <- err
}
logger.Log("transport", "HTTP", "port", config.ServicePort)
errChannel <- server.ListenAndServe()
}()
// trap interrupts.
go func() {
c := make(chan os.Signal)
signal.Notify(
c,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGHUP,
syscall.SIGQUIT,
)
errChannel <- fmt.Errorf("%s", <-c)
}()
logger.Log("exit", <-errChannel)
}
func initializeServer(router *mux.Router, port string, config *models.Config) http.Server {
httpServerReadTimeout, _ := time.ParseDuration(config.HTTPServerReadTimeout)
httpServerWriteTimeout, _ := time.ParseDuration(config.HTTPServerWriteTimeout)
return http.Server{
Addr: fmt.Sprintf(":%s", port),
Handler: handlers.CORS(
handlers.AllowedOrigins([]string{
"*",
}),
handlers.AllowedMethods([]string{
"GET", "HEAD", "POST", "PUT", "OPTIONS",
}),
handlers.AllowedHeaders([]string{
"Origin", "X-Requested-With", "Content-Type", "Authorization",
}),
)(router),
ReadTimeout: httpServerReadTimeout,
WriteTimeout: httpServerWriteTimeout,
}
}
func loadConfig() (*models.Config, error) {
var c *models.Config
_, err := os.Stat(*fconfig)
if err != nil {
return nil, err
}
f, err := ioutil.ReadFile(*fconfig)
if err != nil {
return nil, err
}
err = json.Unmarshal(f, &c)
if err != nil {
return nil, err
}
if len(*flogPath) <= 0 {
*flogPath = c.LogPath
}
if len(*faddress) > 0 {
c.ServiceAddr = *faddress
}
if len(*fport) > 0 {
c.ServicePort = *fport
}
if len(*fbindPort) <= 0 {
return nil, errors.New("bind-port not set. please set $PORT or pass the bind-port flag")
}
var pubPEM = `-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCtnZ+HA5hmAcEW
SG/NrT9bddJHyF2w3SdEH/TYSvVl63Mxn9FcVkyL29AKdAUp65SmEOLpRK6hqoLq
8UdC3NXQ/X21OTcV05VAJuMm2KqKeSuEcsUIkhqpfLxNoIN9AIrGEMKNHJXUMNvf
QMr1hTIupZ6Jpy4AAlsKQ6D90kHp9UcN8lJ69gcmGejcvmI7+WlGv3HVbjulBcqK
UJdtFcWgZr0vFrihWjJgA0MPlknMgQf/jerMjGpQIKLZ2MxHdHngSc2NMma0dP4G
oh0GUsp5MIUg5hM/nD4v3iXM7tNPEQFj4V1o5fbLGSyPHqZYpw230ByxPw6x7Xn4
Gyl2crL1AgMBAAECggEAWn3Qu3+tPGXnrWSeGbcWUeaMbuvJobjzkXeSl/YiCDh7
tz7U0esNRMySmBA27M2kkhY1H160IwGL8UdHXFtceuzVS9MBmjfJEEH0nbfK1Bgq
DYQAnOICUZr5TwC96DaTHn932DMxCQNaZvgPkX8WU+fxRVBFEq4no6byT7n6ryVU
PCJsxs1kjXUJsCrE2MqZJCM19j+CNKa0JLoWAafPMb82znYMQ6f1q9Z4T+MZ1ajH
J+ljKa0WySUJexaFnTwSo/UXq3m+vSB9fDM87QPA/bqfhHnOp4RmT6pTfgC1iKUZ
dHVnUvi1Wao7KcnY00EyMbazmhg1fUbLUldQHcyE4QKBgQDZvHJRyqdyNP6Oab5g
X9brwCfriFTCf06K4Cy5ocaYNZowpuANsDNl5wikkHLFneJbQtJlhvUwtwTXfNQt
jbMuyj5zwieveAlPrheXGW/VjX4CmoVwv7HixenQscwKWfzS8Vk6GHCumqrLYyGs
OAMRHNXQ3pE8ZMgRK3rj239P3QKBgQDMIEeSH2UIQjyFtIUTvLzMwCsgGNpOrPHf
A5k+wC55Hv95/GhCkA2KTmzHl9JzrmqoYqVrTaryWtqtvLiXiGgxmPkPT1RLOH/W
Fz6qC9khrlBYyb3sc53mnwhAVEOPsp8eY+lN07bVMV8Any0vB4qyiCFTEGWBBXXd
SIDOn7hJ+QKBgQCAhyr8eSIK2pmBO45zmV9m3pEyCdHu1fNpKxd7pLF0W//exELy
EZblilGhwtrdKGvb7z//SoEl9oNXKIqfMUwaTKw87Nk8TSFB9cRbH1rStqkxpEEs
4xuAf8+br7iAS8pgQrOnBZJOn2I+mQ/hd1boHRtiJl+ZROyMphvusT0fyQKBgQCB
JldCK5zn2cijK/Ea6MpnnZprh23wY1nxKTy3SC7fMW6gxsNMggofHLmUmwlracpP
2YIh3xUum69KR2Jfdc2+u7OxLRb/NLMlSLW8LxzlQ33Qf2wsA4a/GJXG5cmNTI2C
U+KT/ETspH0gTpXu8I2foaO8A17FgCfvpuTgVovqOQKBgHVslwTHZzF1W86gghlQ
Pu/R6nmGCn3eoExdDTUb95UsaJARUI2Btah2kG4hb+n/tv7CgU72+ZLlkbGxPXnv
I4qNruA1uDyzzf1WRck72bRHezbJaiihQxPSg8gexS4KN2x6z0jp8U1D2Dv24+XI
brsn3pvJjLnMcQeLtDRe8t7z
-----END PRIVATE KEY-----`
k, _ := stdjwt.ParseRSAPublicKeyFromPEM([]byte(pubPEM))
if err != nil {
fmt.Println("READIN", k)
return nil, err
}
var tempKey = rsa.PublicKey{}
c.PublicKey = &tempKey
return c, nil
}
|
package config
import (
"context"
"sync"
"github.com/pomerium/pomerium/internal/log"
)
// The LogManager configures logging based on options.
type LogManager struct {
mu sync.Mutex
}
// NewLogManager creates a new LogManager.
func NewLogManager(ctx context.Context, src Source) *LogManager {
mgr := &LogManager{}
src.OnConfigChange(ctx, mgr.OnConfigChange)
mgr.OnConfigChange(ctx, src.GetConfig())
return mgr
}
// Close closes the log manager.
func (mgr *LogManager) Close() error {
return nil
}
// OnConfigChange is called whenever configuration changes.
func (mgr *LogManager) OnConfigChange(_ context.Context, cfg *Config) {
if cfg == nil || cfg.Options == nil {
return
}
mgr.mu.Lock()
defer mgr.mu.Unlock()
if cfg.Options.Debug {
log.EnableDebug()
} else {
log.DisableDebug()
}
if cfg.Options.LogLevel != "" {
log.SetLevel(cfg.Options.LogLevel.ToZerolog())
}
}
|
package dynamic_programming
import (
"fmt"
"testing"
)
func Test_isScramble(t *testing.T) {
res := isScramble("great", "rgeat")
res2 := isScramble("abcde", "caebd")
fmt.Println(res, res2)
}
|
package functions
import "math"
// ReLU For activation function
// f(x) = max(0,x) for All x in R.
func ReLU(x float64) float64 {
const (
Overflow = 1.0239999999999999e+03
Underflow = -1.0740e+03
NearZero = 1.0 / (1 << 28) // 2**-28
)
switch {
case math.IsNaN(x) || math.IsInf(x, 1):
return x
case math.IsInf(x, -1):
return 0
case x > Overflow:
return math.Inf(1)
case x < Underflow:
return 0
case -NearZero < x && x < NearZero:
return 1 + x
}
if x > 0 {
return x
} else {
return 0
}
}
//ReLUPrime for backpropagation
func ReLUPrime(x float64) float64 {
if x > 0 {
return 1.0
} else {
return 0.0
}
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
log "github.com/sirupsen/logrus"
"github.com/Luzifer/rconfig/v2"
)
var (
cfg = struct {
CleanEnv bool `flag:"clean" default:"false" description:"Do not pass current environment to child process"`
EncryptionMethod string `flag:"encryption" default:"openssl-md5" description:"Encryption method used for encrypted env-file (Available: gpg-symmetric, openssl-md5, openssl-sha256)"`
EnvFile string `flag:"env-file" default:".env" description:"Location of the environment file"`
LogLevel string `flag:"log-level" default:"info" description:"Log level (debug, info, warn, error, fatal)"`
PasswordFile string `flag:"password-file" default:"" description:"Read encryption key from file"`
Password string `flag:"password,p" default:"" env:"PASSWORD" description:"Password to decrypt environment file"`
Silent bool `flag:"q" default:"false" description:"Suppress informational messages from envrun (DEPRECATED, use --log-level=warn)"`
VersionAndExit bool `flag:"version" default:"false" description:"Prints current version and exits"`
}{}
version = "dev"
)
func init() {
if err := rconfig.ParseAndValidate(&cfg); err != nil {
log.Fatalf("Unable to parse commandline options: %s", err)
}
if cfg.VersionAndExit {
fmt.Printf("envrun %s\n", version)
os.Exit(0)
}
if cfg.Silent && cfg.LogLevel == "info" {
// Migration of deprecated flag
cfg.LogLevel = "warn"
}
if l, err := log.ParseLevel(cfg.LogLevel); err != nil {
log.WithError(err).Fatal("Unable to parse log level")
} else {
log.SetLevel(l)
}
}
func envListToMap(list []string) map[string]string {
out := map[string]string{}
for _, entry := range list {
if len(entry) == 0 || entry[0] == '#' {
continue
}
parts := strings.SplitN(entry, "=", 2)
if len(parts) != 2 {
log.WithField("entry", entry).Warn("Invalid env-file entry")
continue
}
out[parts[0]] = parts[1]
}
return out
}
func envMapToList(envMap map[string]string) []string {
out := []string{}
for k, v := range envMap {
out = append(out, k+"="+v)
}
return out
}
func main() {
if cfg.Password == "" && cfg.PasswordFile != "" {
if _, err := os.Stat(cfg.PasswordFile); err == nil {
data, err := ioutil.ReadFile(cfg.PasswordFile)
if err != nil {
log.WithError(err).Fatal("Unable to read password from file")
}
cfg.Password = strings.TrimSpace(string(data))
}
}
dec, err := decryptMethodFromName(cfg.EncryptionMethod)
if err != nil {
log.WithError(err).Fatal("Could not load decrypt method")
}
pairs, err := loadEnvFromFile(cfg.EnvFile, cfg.Password, dec)
if err != nil {
log.WithError(err).Fatal("Could not load env file")
}
var childenv = envListToMap(os.Environ())
if cfg.CleanEnv {
childenv = map[string]string{}
}
for k, v := range pairs {
childenv[k] = v
}
if len(rconfig.Args()) < 2 {
log.Fatal("No command specified")
}
c := exec.Command(rconfig.Args()[1], rconfig.Args()[2:]...)
c.Env = envMapToList(childenv)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Stdin = os.Stdin
err = c.Run()
switch err.(type) {
case nil:
log.Info("Process exitted with code 0")
os.Exit(0)
case *exec.ExitError:
log.Error("Unclean exit with exit-code != 0")
os.Exit(1)
default:
log.WithError(err).Error("An unknown error occurred")
os.Exit(2)
}
}
func loadEnvFromFile(filename, passphrase string, decrypt decryptMethod) (map[string]string, error) {
body, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("Could not read env-file: %s", err)
}
if passphrase != "" {
if body, err = decrypt(body, passphrase); err != nil {
return nil, fmt.Errorf("Could not decrypt env-file: %s", err)
}
}
return envListToMap(strings.Split(string(body), "\n")), nil
}
|
package main
import (
"bytes"
"fmt"
"strconv"
)
type IP uint32
func ParseIP(b []byte) IP {
return IP(IP(b[0])<<24 + IP(b[1])<<16 + IP(b[2])<<8 + IP(b[3]))
}
func (ip IP) String() string {
var bf bytes.Buffer
for i := 1; i <= 4; i++ {
bf.WriteString(strconv.Itoa(int((ip >> ((4 - uint(i)) * 8)) & 0xff)))
if i != 4 {
bf.WriteByte('.')
}
}
return bf.String()
}
func BitShow(n int) string {
var ext string = "B"
if n >= 1024 {
n /= 1024
ext = "Kb"
}
if n >= 1024 {
n /= 1024
ext = "Mb"
}
if n >= 1024 {
n /= 1024
ext = "Gb"
}
return fmt.Sprintf("%v %v", n, ext)
}
|
package virtual_security
import (
"errors"
"reflect"
"testing"
)
func Test_marginPosition_exitable(t *testing.T) {
t.Parallel()
tests := []struct {
name string
position *marginPosition
arg float64
want error
}{
{name: "保有数不足でエグジットできないなら、エラーを返す",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 300},
arg: 500,
want: NotEnoughOwnedQuantityError},
{name: "拘束数不足でエグジットできないなら、エラーを返す",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 200},
arg: 300,
want: NotEnoughHoldQuantityError},
{name: "exit可能ならnilを返す",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 300},
arg: 300,
want: nil},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.position.exitable(test.arg)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_marginPosition_exit(t *testing.T) {
t.Parallel()
tests := []struct {
name string
position *marginPosition
arg float64
wantOwnedQuantity float64
wantHoldQuantity float64
want error
}{
{name: "エグジットできるなら保有数と拘束数を減らす",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 200},
arg: 100,
wantOwnedQuantity: 200,
wantHoldQuantity: 100,
want: nil},
{name: "保有数不足でエグジットできないなら、エラーを返す",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 300},
arg: 500,
wantOwnedQuantity: 300,
wantHoldQuantity: 300,
want: NotEnoughOwnedQuantityError},
{name: "拘束数不足でエグジットできないなら、エラーを返す",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 200},
arg: 300,
wantOwnedQuantity: 300,
wantHoldQuantity: 200,
want: NotEnoughHoldQuantityError},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.position.exit(test.arg)
if !reflect.DeepEqual(test.wantOwnedQuantity, test.position.OwnedQuantity) ||
!reflect.DeepEqual(test.wantHoldQuantity, test.position.HoldQuantity) ||
!errors.Is(got, test.want) {
t.Errorf("%s error\nwant: %+v, %+v, %+v\ngot: %+v, %+v, %+v\n", t.Name(),
test.wantOwnedQuantity, test.wantHoldQuantity, test.want,
test.position.OwnedQuantity, test.position.HoldQuantity, got)
}
})
}
}
func Test_marginPosition_holdable(t *testing.T) {
t.Parallel()
tests := []struct {
name string
position *marginPosition
arg float64
want error
}{
{name: "拘束できないなら、エラーを返す",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 100},
arg: 300,
want: NotEnoughOwnedQuantityError},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.position.holdable(test.arg)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_marginPosition_hold(t *testing.T) {
t.Parallel()
tests := []struct {
name string
position *marginPosition
arg float64
wantHoldQuantity float64
want error
}{
{name: "拘束できるなら拘束数を増やす",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 200},
arg: 100,
wantHoldQuantity: 300,
want: nil},
{name: "拘束できないなら拘束数を増やさず、エラーを返す",
position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 100},
arg: 300,
wantHoldQuantity: 100,
want: NotEnoughOwnedQuantityError},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.position.hold(test.arg)
if !reflect.DeepEqual(test.wantHoldQuantity, test.position.HoldQuantity) || !errors.Is(got, test.want) {
t.Errorf("%s error\nwant: %+v, %+v\ngot: %+v, %+v\n", t.Name(), test.wantHoldQuantity, test.want, test.position.HoldQuantity, got)
}
})
}
}
func Test_marginPosition_release(t *testing.T) {
t.Parallel()
tests := []struct {
name string
position *marginPosition
arg float64
wantHoldQuantity float64
want error
}{
{name: "拘束を解放できるなら拘束数を減らす",
position: &marginPosition{HoldQuantity: 300},
arg: 100,
wantHoldQuantity: 200,
want: nil},
{name: "拘束を解放できないなら拘束数を減らさず、エラーを返す",
position: &marginPosition{HoldQuantity: 100},
arg: 200,
wantHoldQuantity: 100,
want: NotEnoughHoldQuantityError},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.position.release(test.arg)
if !reflect.DeepEqual(test.wantHoldQuantity, test.position.HoldQuantity) || !errors.Is(got, test.want) {
t.Errorf("%s error\nwant: %+v, %+v\ngot: %+v, %+v\n", t.Name(), test.wantHoldQuantity, test.want, test.position.HoldQuantity, got)
}
})
}
}
func Test_marginPosition_isDied(t *testing.T) {
t.Parallel()
tests := []struct {
name string
marginPosition *marginPosition
want bool
}{
{name: "保有数がなければ死んでいる", marginPosition: &marginPosition{OwnedQuantity: 0}, want: true},
{name: "保有数があれば生きている", marginPosition: &marginPosition{OwnedQuantity: 100}, want: false},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.marginPosition.isDied()
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
func Test_marginPosition_orderableQuantity(t *testing.T) {
t.Parallel()
tests := []struct {
name string
position *marginPosition
want float64
}{
{name: "保有数と拘束数が同じなら0", position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 300}, want: 0},
{name: "拘束されていなければ保有数がそのまま出る", position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 0}, want: 300},
{name: "部分的に拘束されているなら、保有数-拘束数の値が出る", position: &marginPosition{OwnedQuantity: 300, HoldQuantity: 200}, want: 100},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
got := test.position.orderableQuantity()
if !reflect.DeepEqual(test.want, got) {
t.Errorf("%s error\nwant: %+v\ngot: %+v\n", t.Name(), test.want, got)
}
})
}
}
|
// Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wrap
import (
"github.com/apid/apid-core"
"github.com/mattn/go-sqlite3"
)
type wrapTx struct {
*sqlite3.SQLiteTx
log apid.LogService
}
func (tx *wrapTx) Commit() (err error) {
tx.log.Debug("begin commit")
if err = tx.SQLiteTx.Commit(); err != nil {
tx.log.Errorf("failed commit: %s", err)
return
}
tx.log.Debug("end commit")
return
}
func (tx *wrapTx) Rollback() (err error) {
tx.log.Debug("begin rollback")
if err = tx.SQLiteTx.Rollback(); err != nil {
tx.log.Errorf("failed rollback: %s", err)
}
tx.log.Debug("end rollback")
return
}
|
// for, switch, break.
package main
import (
"fmt"
)
func canNotBreakLoop() {
fmt.Println("--- can not break the for loops ---")
for i := 0; i < 10; i++ {
switch i {
case 3:
fmt.Printf("%d hello\n", i)
case 4:
fmt.Printf("%d expected break the loop but can not\n", i)
break
default:
fmt.Println(i)
}
}
}
func breakLoop() {
fmt.Println("--- label break ---")
L:
for i := 0; i < 10; i++ {
switch i {
case 3:
fmt.Printf("%d hello\n", i)
case 4:
fmt.Printf("%d break\n", i)
break L
default:
fmt.Println(i)
}
}
}
func main() {
canNotBreakLoop()
breakLoop()
}
|
package utils
import (
"bytes"
"container/list"
"crypto/rand"
"encoding/base32"
"encoding/json"
"fmt"
"io"
"io/ioutil"
mrand "math/rand"
"net/http"
"net/url"
"regexp"
"strings"
"time"
_const "github.com/IcanFun/utils/const"
goi18n "github.com/nicksnyder/go-i18n/i18n"
"github.com/pborman/uuid"
)
const (
LOWERCASE_LETTERS = "abcdefghijklmnopqrstuvwxyz"
UPPERCASE_LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
NUMBERS = "0123456789"
SYMBOLS = " !\"\\#$%&'()*+,-./:;<=>?@[]^_`|~"
USER_AUTH_SERVICE_LDAP = "ldap"
LDAP_SYNC_TASK_NAME = "LDAP Syncronization"
IDSIZE = 10
)
const (
StatusBadRequest = http.StatusBadRequest
)
type StringInterface map[string]interface{}
type StringMap map[string]string
type StringArray []string
type EncryptStringMap map[string]string
type AppOK struct {
Status string `json:"status" example:"ok"`
}
type AppError struct {
Id string `json:"id"`
Message string `json:"message"` // Message to be display to the end user without debugging information
DetailedError string `json:"detailed_error"` // Internal error string to help the developer
RequestId string `json:"request_id,omitempty"` // The RequestId that's also set in the header
StatusCode int `json:"status_code,omitempty"` // The http status code
Where string `json:"-"` // The function where it happened in the form of Struct.Func
Code int `json:"code"`
params map[string]interface{}
}
func (er *AppError) Error() string {
return er.Where + ": " + er.Message + ", " + er.DetailedError
}
func (er *AppError) Translate(T goi18n.TranslateFunc) {
if er.params == nil {
er.Message = T(er.Id)
} else {
er.Message = T(er.Id, er.params)
}
}
func (er *AppError) SystemMessage(T goi18n.TranslateFunc) string {
if er.params == nil {
return T(er.Id)
} else {
return T(er.Id, er.params)
}
}
func NewAppError(where string, id string, params map[string]interface{}, details string, status int) *AppError {
ap := &AppError{}
ap.Id = id
ap.params = params
ap.Message = id
ap.Where = where
ap.DetailedError = details
ap.StatusCode = status
return ap
}
func (er *AppError) ToJson() string {
b, err := json.Marshal(er)
if err != nil {
return ""
} else {
return string(b)
}
}
func NewLocAppError(where string, id string, params map[string]interface{}, details string) *AppError {
ap := &AppError{}
ap.Id = id
ap.params = params
ap.Message = id
ap.Where = where
ap.DetailedError = details
ap.StatusCode = 500
return ap
}
// AppErrorFromJson will decode the input and return an AppError
func AppErrorFromJson(data io.Reader) *AppError {
str := ""
bytes, rerr := ioutil.ReadAll(data)
if rerr != nil {
str = rerr.Error()
} else {
str = string(bytes)
}
decoder := json.NewDecoder(strings.NewReader(str))
var er AppError
err := decoder.Decode(&er)
if err == nil {
return &er
} else {
return NewLocAppError("AppErrorFromJson", "model.utils.decode_json.app_error", nil, "body: "+str)
}
}
var encoding = base32.NewEncoding("ybndrfg8ejkmcpqxot1uwisza345h769")
func NewId() string {
var b bytes.Buffer
encoder := base32.NewEncoder(encoding, &b)
encoder.Write(uuid.NewRandom())
encoder.Close()
b.Truncate(26) // removes the '==' padding
return b.String()
}
func NewRandomString(length int) string {
var b bytes.Buffer
str := make([]byte, length+8)
rand.Read(str)
encoder := base32.NewEncoder(encoding, &b)
encoder.Write(str)
encoder.Close()
b.Truncate(length) // removes the '==' padding
return b.String()
}
func MapFromJson(data io.Reader) map[string]string {
decoder := json.NewDecoder(data)
var objmap map[string]string
if err := decoder.Decode(&objmap); err != nil {
return make(map[string]string)
} else {
return objmap
}
}
func ArrayFromJson(data io.Reader) []string {
decoder := json.NewDecoder(data)
var objmap []string
if err := decoder.Decode(&objmap); err != nil {
return make([]string, 0)
} else {
return objmap
}
}
func ArrayToJson(objmap []string) string {
if b, err := json.Marshal(objmap); err != nil {
return ""
} else {
return string(b)
}
}
func IsValidHttpUrl(rawUrl string) bool {
if strings.Index(rawUrl, "http://") != 0 && strings.Index(rawUrl, "https://") != 0 {
return false
}
if _, err := url.ParseRequestURI(rawUrl); err != nil {
return false
}
return true
}
func StringInterfaceToJson(objmap map[string]interface{}) string {
if b, err := json.Marshal(objmap); err != nil {
return ""
} else {
return string(b)
}
}
func GetToday0hMills() int64 {
timeStr := time.Now().Format("2006-01-02")
t, _ := time.ParseInLocation("2006-01-02", timeStr, time.Local)
return t.UnixNano() / int64(time.Millisecond)
}
var reservedName = []string{
"signup",
"login",
"admin",
"api",
"oauth",
}
type Status struct {
Status string `json:"status"`
}
func StatusFromJson(data io.Reader) *Status {
decoder := json.NewDecoder(data)
var status Status
if err := decoder.Decode(&status); err != nil {
return nil
} else {
return &status
}
}
func GenerateOrderID() int64 {
t := time.Now() //2018-07-11 15:07:51.8858085 +0800 CST m=+0.004000001
s1 := mrand.New(mrand.NewSource(time.Now().UnixNano()))
//获取当前年月日,时分秒
y := int64(t.Year()) //年
m := int64(t.Month()) //月
d := int64(t.Day()) //日
h := int64(t.Hour()) //小时
i := int64(t.Minute()) //分钟
s := int64(t.Second())
r := int64(s1.Intn(99999))
orderID := (y*10000000000+m*100000000+d*1000000+h*10000+i*100+s)*100000 + r
return orderID
}
func GeneratePayCode() string {
s1 := mrand.New(mrand.NewSource(time.Now().UnixNano()))
Paycode := fmt.Sprintf("%d", int64(s1.Intn(999999)))
return Paycode
}
var baseStr string = "ABCDEFGHJKLMNPQRSTUVWXYZ12345679"
var base []byte = []byte(baseStr)
//1544804416
func Base34(n uint64) []byte {
quotient := n
mod := uint64(0)
l := list.New()
for quotient != 0 {
mod = quotient % 32
quotient = quotient / 32
l.PushFront(base[int(mod)])
}
listLen := l.Len()
if listLen >= 5 {
res := make([]byte, 0, listLen)
for i := l.Front(); i != nil; i = i.Next() {
res = append(res, i.Value.(byte))
}
return res
} else {
res := make([]byte, 0, 5)
for i := 0; i < 5; i++ {
if i < 5-listLen {
res = append(res, base[0])
} else {
res = append(res, l.Front().Value.(byte))
l.Remove(l.Front())
}
}
return res
}
}
var source = "E5FCDG3HQ4BNPJ2RSTUV67MWX89KLYZ"
func CreateCode(user_id int64) string {
num := user_id
code := []byte("AAAAA")
i := 4
l := int64(len(source))
for num > 0 {
mod := num % l
num = (num - mod) / l
code[i] = source[mod]
i--
}
return string(code)
}
func DecodeCode(code []byte) int {
source_string := []byte(source)
n := 0
l := len(source)
for k, v := range code {
if v == 'A' {
continue
}
x := code[k : k+1]
p := bytes.Index(source_string, x)
if p < 0 {
return 0
}
fmt.Println(source_string[p])
n = l*n + p
}
return n
}
//目前是6位纯数字
func IsFundPasswordValid(password string) *AppError {
if len(password) != 6 {
return NewAppError("User.IsValid",
"api.user.is_valid.fund_pwd", nil, "", http.StatusBadRequest,
)
}
if result, _ := regexp.MatchString("\\d+", password); !result {
return NewAppError("User.IsValid",
"api.user.is_valid.fund_pwd", nil, "", http.StatusBadRequest,
)
}
return nil
}
func GetProtocol(r *http.Request) string {
if r.Header.Get(_const.HEADER_FORWARDED_PROTO) == "https" || r.TLS != nil {
return "https"
} else {
return "http"
}
}
|
package tax
import (
"errors"
"strconv"
)
type TableInput struct {
TableCount int // 4 digits
PeriodCount int // 1 digit
TypeCount int // 1 digit
Salary int // 5 digits
Tax int // 5 digits
}
func convert(str string) (*TableInput, error) {
if len(str) != 16 {
return &TableInput{}, errors.New("invalid length on string")
}
result := TableInput{}
res, err := toInt(str[0:4])
if err != nil {
return &TableInput{}, errors.New("invalid length on string")
}
result.TableCount = res
res, err = toInt(str[4:5])
if err != nil {
return &TableInput{}, errors.New("invalid length on string")
}
result.PeriodCount = res
res, err = toInt(str[5:6])
if err != nil {
return &TableInput{}, errors.New("invalid length on string")
}
result.TypeCount = res
res, err = toInt(str[6:11])
if err != nil {
return &TableInput{}, errors.New("invalid length on string")
}
result.Salary = res
res, err = toInt(str[11:16])
if err != nil {
return &TableInput{}, errors.New("invalid length on string")
}
result.Tax = res
return &result, nil
}
func toInt(ts string) (int, error) {
ti, err := strconv.ParseInt(ts, 10, 32)
if err != nil {
return -1, errors.New("invalid length on string")
}
res := int(ti)
return res, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.