text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"math"
"math/rand"
)
var count int = 0
func addition(a [][]float64, b [][]float64) [][]float64 {
m := float(len(a[0]))
// s := int(math.Pow(m, 2))
c := make([][]float64, int(math.Pow(m, 2)))
for i := range c {
c[i] = make([]float64, int(math.Pow(m, 2)))
}
for i := 0; i < m; i++ {
for j := 0; j < m; j++ {
c[i][j]=a[i][j]+b[i][j]
}
}
return c
}
func print(a [][]float64) {
m := len(a[0])
for i := 0; i < m; i++ {
for j := 0; j < m; j++ {
fmt.Println("", a[i][j])
}
fmt.Println("\n")
}
count += m+1
}
func main() {
var m int
fmt.Scanln("length: ", m)
r := rand.NewSource(100)
var a = [m][m]float64
var b = [m][m]float64
var c = [m][m]float64
for i := 0; i < m; i++ {
for j := 0; j < m; j++ {
a[i][j] = r.Int63()
b[i][j] = r.Int63()
}
}
print(a)
print(b)
c = addition(a, b)
print(c)
}
|
package main
import (
"bufio"
"fmt"
"log"
"net"
"os"
)
var PORT = "8080"
func main() {
conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%s", PORT))
if err != nil{
log.Println("Connect error: ", err.Error())
}
for {
in := bufio.NewReader(os.Stdin)
fmt.Printf("Write your text:\n")
msg,_ := in.ReadString('\n')
fmt.Fprintf(conn, msg + "\n")
response, _ := bufio.NewReader(conn).ReadString('\n')
fmt.Println(response)
}
} |
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package view
import (
"context"
"fmt"
"github.com/gdamore/tcell/v2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/references/cli/top/component"
"github.com/oam-dev/kubevela/references/cli/top/model"
)
// ContainerView is a view which displays info of container of aime pod
type ContainerView struct {
*CommonResourceView
ctx context.Context
}
// Init container view
func (v *ContainerView) Init() {
v.CommonResourceView.Init()
// set title of view
v.SetTitle(fmt.Sprintf("[ %s ]", "Container")).SetTitleColor(v.app.config.Theme.Table.Title.Color())
v.bindKeys()
}
// Name return pod container name
func (v *ContainerView) Name() string {
return "Container"
}
// Start the container view
func (v *ContainerView) Start() {
v.Clear()
v.Update(func() {})
v.CommonResourceView.AutoRefresh(v.Update)
}
// Stop the container view
func (v *ContainerView) Stop() {
v.CommonResourceView.Stop()
}
// Hint return key action menu hints of the container view
func (v *ContainerView) Hint() []model.MenuHint {
return v.Actions().Hint()
}
// InitView init a new container view
func (v *ContainerView) InitView(ctx context.Context, app *App) {
v.ctx = ctx
if v.CommonResourceView == nil {
v.CommonResourceView = NewCommonView(app)
}
}
// Refresh the view content
func (v *ContainerView) Refresh(_ *tcell.EventKey) *tcell.EventKey {
v.CommonResourceView.Refresh(true, v.Update)
return nil
}
// Update refresh the content of body of view
func (v *ContainerView) Update(timeoutCancel func()) {
v.BuildHeader()
v.BuildBody()
timeoutCancel()
}
// BuildHeader render the header of table
func (v *ContainerView) BuildHeader() {
header := []string{"Name", "Image", "Ready", "State", "CPU", "MEM", "CPU/R", "CPU/L", "MEM/R", "MEM/L", "TerminateMessage", "RestartCount"}
v.CommonResourceView.BuildHeader(header)
}
// BuildBody render the body of table
func (v *ContainerView) BuildBody() {
containerList, err := model.ListContainerOfPod(v.ctx, v.app.client, v.app.config.RestConfig)
if err != nil {
return
}
resourceInfos := containerList.ToTableBody()
v.CommonResourceView.BuildBody(resourceInfos)
rowNum := len(containerList)
v.ColorizePhaseText(rowNum)
}
func (v *ContainerView) bindKeys() {
v.Actions().Delete([]tcell.Key{tcell.KeyEnter})
v.Actions().Add(model.KeyActions{
component.KeyL: model.KeyAction{Description: "Log", Action: v.logView, Visible: true, Shared: true},
})
}
// ColorizePhaseText colorize the state column text
func (v *ContainerView) ColorizePhaseText(rowNum int) {
for i := 1; i < rowNum+1; i++ {
state := v.Table.GetCell(i, 3).Text
highlightColor := v.app.config.Theme.Table.Body.String()
switch common.ContainerState(state) {
case common.ContainerRunning:
highlightColor = v.app.config.Theme.Status.Healthy.String()
case common.ContainerWaiting:
highlightColor = v.app.config.Theme.Status.Waiting.String()
case common.ContainerTerminated:
highlightColor = v.app.config.Theme.Status.UnHealthy.String()
default:
}
v.Table.GetCell(i, 3).SetText(fmt.Sprintf("[%s::]%s", highlightColor, state))
}
}
func (v *ContainerView) logView(event *tcell.EventKey) *tcell.EventKey {
row, _ := v.GetSelection()
if row == 0 {
return event
}
name := v.GetCell(row, 0).Text
ctx := context.WithValue(v.ctx, &model.CtxKeyContainer, name)
v.app.command.run(ctx, "log")
return nil
}
|
package pkg
// job指每个用户下载的任务作为一个job或者自己的上传任务作为job.
// 用于以Pkg为单位的上传或者下载状态的跟踪和记录
import (
"./../transfer/task"
"./models"
"cydex"
"cydex/transfer"
"fmt"
clog "github.com/cihub/seelog"
"sync"
"time"
)
const (
// JD数据同步进数据库的间隔
// DEFAULT_CACHE_SYNC_TIMEOUT = 30 * time.Second
DEFAULT_CACHE_SYNC_TIMEOUT = 0
// 延时删除job的时间
DELAY_DEL_JOB_TIME = 20 * time.Second
)
var (
JobMgr *JobManager
)
func init() {
JobMgr = NewJobManager()
}
func HashJob(uid, pid string, typ int) string {
var s string
switch typ {
case cydex.UPLOAD:
s = "U"
case cydex.DOWNLOAD:
s = "D"
}
return fmt.Sprintf("%s_%s_%s", uid, s, pid)
}
// 获取运行时segs的信息, 没有就添加进runtime
// size表示接收到的size, 不是seg的size
func getSegRuntime(jd *models.JobDetail, sid string) (s *models.Seg) {
if jd.Segs == nil {
clog.Trace("new segs map")
jd.Segs = make(map[string]*models.Seg)
}
s, _ = jd.Segs[sid]
if s == nil {
s = new(models.Seg)
s.Sid = sid
jd.Segs[sid] = s
}
clog.Trace("seg num: ", len(jd.Segs))
return
}
func updateJobDetail(jd *models.JobDetail, state *transfer.TaskState, seg_state int) (save bool) {
jd.Bitrate = state.Bitrate
if seg_state == cydex.TRANSFER_STATE_DONE {
// NOTE: 因为f2tp下载时一个片段结束后得到的totalbytes是偏小的,所以使用数据库里的size来计算
seg_m, _ := models.GetSeg(state.Sid)
if seg_m != nil {
state.RealTotalBytes = seg_m.Size
}
if jd.File == nil {
jd.GetFile()
}
jd.CurSegSize = 0
jd.FinishedSize += state.GetTotalBytes()
jd.NumFinishedSegs++
jd.State = cydex.TRANSFER_STATE_PAUSE // cdxs-14
// old-cdxs-22, 保护数据不过限
if jd.FinishedSize > jd.File.Size {
jd.FinishedSize = jd.File.Size
}
if jd.NumFinishedSegs > jd.File.NumSegs {
jd.NumFinishedSegs = jd.File.NumSegs
}
save = true
} else {
jd.CurSegSize = state.TotalBytes
jd.State = seg_state
}
// jd is finished?
if jd.NumFinishedSegs == jd.File.NumSegs {
clog.Infof("%s is finished", jd)
jd.State = cydex.TRANSFER_STATE_DONE
jd.FinishTime = time.Now()
save = true
}
clog.Tracef("%s update: %d %d %d s:%d", jd, jd.NumFinishedSegs, jd.FinishedSize, jd.CurSegSize, jd.State)
return
}
type Track struct {
// int没啥用,这里当作set使用
Uploads map[string]int
Downloads map[string]int
}
func NewTrack() *Track {
t := new(Track)
t.Uploads = make(map[string]int)
t.Downloads = make(map[string]int)
return t
}
// type JobRuntime struct {
// *models.Job
// NumFinishedDetails int
// }
type JobObserver interface {
OnJobCreate(*models.Job)
OnJobStart(*models.Job)
OnJobFinish(*models.Job)
}
type JobManager struct {
lock sync.Mutex
cache_sync_timeout time.Duration //cache同步超时时间
del_job_delay time.Duration // 延迟删除job时间
jobs map[string]*models.Job // jobid->job, cache
track_users map[string]*Track // uid->track, track里记录上传下载的pid
track_pkgs map[string]*Track // pid->track, track里记录上传下载的uid
track_deletes map[string]*Track // uid->track, track里记录被删除的pid
job_observers []JobObserver
}
func NewJobManager() *JobManager {
jm := new(JobManager)
jm.cache_sync_timeout = DEFAULT_CACHE_SYNC_TIMEOUT
jm.jobs = make(map[string]*models.Job)
jm.track_users = make(map[string]*Track)
jm.track_pkgs = make(map[string]*Track)
jm.track_deletes = make(map[string]*Track)
jm.del_job_delay = DELAY_DEL_JOB_TIME
return jm
}
// 创建一个新任务, 因为是活动任务,会加入cache
func (self *JobManager) CreateJob(uid, pid string, typ int) (err error) {
clog.Infof("create job: u[%s], p[%s], t[%d]", uid, pid, typ)
hashid := HashJob(uid, pid, typ)
jobid := hashid
// 已经存在的状态要复位,并重新加入track
if job_m, _ := models.GetJob(jobid, false); job_m != nil {
clog.Infof("%s is existed, add to track again", jobid)
self.lock.Lock()
defer self.lock.Unlock()
// job需要reset, 重新开始
job_m.GetDetails()
for _, jd := range job_m.Details {
jd.GetFile()
if jd.File.Size > 0 {
jd.Reset()
} else {
jd.StartTime = time.Now()
jd.FinishTime = jd.StartTime
jd.Save()
}
}
// add track
self.AddTrack(uid, pid, typ, false)
// issue-1, 上传用户要监控下载用户状态,上传完的要加入track
if typ == cydex.DOWNLOAD {
upload_jobs, _ := models.GetJobsByPid(pid, cydex.UPLOAD, nil)
for _, u_job := range upload_jobs {
self.AddTrack(u_job.Uid, u_job.Pid, u_job.Type, false)
}
}
// 删除原有的cache
delete(self.jobs, jobid)
// notify observers
for _, o := range self.job_observers {
o.OnJobCreate(job_m)
}
return nil
}
session := models.DB().NewSession()
defer func() {
models.SessionRelease(session)
if err != nil {
clog.Errorf("create job failed: %s", err)
}
}()
if err = session.Begin(); err != nil {
return
}
j := &models.Job{
JobId: jobid,
Uid: uid,
Pid: pid,
Type: typ,
}
if _, err = session.Insert(j); err != nil {
return err
}
clog.Debugf("insert a new Job: %s", jobid)
// j.Details = make(map[string]*models.JobDetail)
// create details
pkg, err := models.GetPkg(pid, true)
if err != nil || pkg == nil {
return err
}
j.Pkg = pkg
for _, f := range pkg.Files {
jd := &models.JobDetail{
JobId: jobid,
Fid: f.Fid,
}
// jzh: 如果是0的文件或者文件夹,则状态就置为DONE, 因为客户端不会发送传输命令
if f.Size == 0 {
jd.StartTime = time.Now()
jd.FinishTime = jd.StartTime
jd.State = cydex.TRANSFER_STATE_DONE
}
if _, err := session.Insert(jd); err != nil {
return err
}
// jd.Segs = make(map[string]*models.Seg)
// j.Details[f.Fid] = jd
clog.Tracef("insert job_detail fid:%s", f.Fid)
}
if err = session.Commit(); err != nil {
return
}
self.lock.Lock()
defer self.lock.Unlock()
// add track
self.AddTrack(uid, pid, typ, false)
// issue-1, 上传用户要监控下载用户状态,上传完的要加入track
if typ == cydex.DOWNLOAD {
upload_jobs, _ := models.GetJobsByPid(pid, cydex.UPLOAD, nil)
for _, u_job := range upload_jobs {
self.AddTrack(u_job.Uid, u_job.Pid, u_job.Type, false)
}
}
// notify observers
for _, o := range self.job_observers {
o.OnJobCreate(j)
}
return nil
}
// 删除job
func (self *JobManager) DeleteJob(uid, pid string, typ int) (err error) {
hashid := HashJob(uid, pid, typ)
clog.Infof("delete job: %s", hashid)
job, _ := models.GetJob(hashid, false)
if job == nil {
return
}
if err = models.DeleteJob(hashid); err != nil {
return
}
self.lock.Lock()
defer self.lock.Unlock()
self.DelTrack(uid, pid, typ, false)
delete(self.jobs, hashid)
// 要加入delete track, client通过增量接口获取
self.AddTrackOfDelete(uid, pid, typ, false)
clog.Debugf("delete job: %s over", hashid)
return
}
func (self *JobManager) getJob(hashid string, mutex bool) *models.Job {
var err error
if mutex {
self.lock.Lock()
defer self.lock.Unlock()
}
j, _ := self.jobs[hashid]
if j != nil {
return j
}
if j, err = models.GetJob(hashid, true); err != nil {
return nil
}
if j != nil {
j.Details = make(map[string]*models.JobDetail)
if !j.IsFinished() {
j.GetDetails()
j.NumUnfinishedDetails = j.CountUnfinishedDetails()
// issue-6: 需要计数已经完成的jd
// for _, jd := range j.Details {
// if jd.State == cydex.TRANSFER_STATE_DONE {
// j.NumFinishedDetails++
// }
// }
// save to cache
j.IsCached = true
self.jobs[hashid] = j
}
}
return j
}
// 从cache里取; 没有的话从数据库取; 如果是非finished,则加入cache
func (self *JobManager) GetJob(hashid string) *models.Job {
return self.getJob(hashid, true)
}
// 从cache里取,没有则从数据库取
func (self *JobManager) GetJobDetail(jobid, fid string) (jd *models.JobDetail) {
var err error
j := self.GetJob(jobid)
if j == nil {
return
}
var ok bool
jd, ok = j.Details[fid]
if !ok {
if jd, err = models.GetJobDetail(jobid, fid); err != nil {
return nil
}
j.Details[fid] = jd
}
return
}
// implement task.TaskObserver
func (self *JobManager) AddTask(t *task.Task) {
jobid := t.JobId
job := self.GetJob(jobid)
if job == nil {
return
}
jd := self.GetJobDetail(jobid, t.Fid)
if jd == nil {
return
}
if jd.File == nil {
jd.GetFile()
}
// issue-47, issue-50
if jd.StartTime.IsZero() || (t.NumSegs == jd.File.NumSegs) {
if jd.File.Size > 0 {
jd.Reset()
jd.StartTime = time.Now()
} else {
jd.StartTime = time.Now()
jd.FinishTime = jd.StartTime
}
jd.Save()
}
if job.State == cydex.TRANSFER_STATE_IDLE {
job.SaveState(cydex.TRANSFER_STATE_DOING)
self.lock.Lock()
defer self.lock.Unlock()
for _, o := range self.job_observers {
o.OnJobStart(job)
}
}
}
func (self *JobManager) DelTask(t *task.Task) {
if t == nil {
return
}
// cdxs-12: JobDetail和task状态要保持一致
jobid := t.JobId
j := self.GetJob(jobid)
if j == nil {
return
}
jd := self.GetJobDetail(jobid, t.Fid)
if jd == nil {
return
}
if jd.State == t.State {
return
}
// NOTE: 有发生task超时,但客户端会重发请求并完成了传输,所以不允许超时task更改该状态
if jd.State == cydex.TRANSFER_STATE_DONE {
return
}
// NOTE: node的task会由于异常而停止,不一定带sid,所以jd也要更新。
// 如果task是end状态,jd不一定是,例如边上传边下时task完了,但是jd不一定完毕。
// end状态一般带sid,由TaskStateNotify来处理。如果不带,这边也没法处理,因为需要确认所有的segs完成才算完成
if t.State == cydex.TRANSFER_STATE_PAUSE {
jd.State = t.State
jd.Save()
}
}
func (self *JobManager) TaskStateNotify(t *task.Task, state *transfer.TaskState) {
if t == nil || state == nil || state.Sid == "" {
return
}
sid := state.Sid
jobid := t.JobId
j := self.GetJob(jobid)
if j == nil {
return
}
jd := self.GetJobDetail(jobid, t.Fid)
if jd == nil {
return
}
if jd.File == nil {
jd.GetFile()
}
seg_state := t.State
// 更新JobDetails状态, 根据判断更新Job状态, 是否finished?
force_save := updateJobDetail(jd, state, seg_state)
if jd.State == cydex.TRANSFER_STATE_DONE {
j.NumUnfinishedDetails--
}
// // job is finished?
// if j.NumFinishedDetails == len(j.Details) {
// clog.Infof("%s is finished", j)
// // j.SetState(cydex.TRANSFER_STATE_DONE)
// self.lock.Lock()
// delete(self.jobs, j.JobId)
// self.DelTrack(j.Uid, j.Pid, j.Type, false)
// self.lock.Unlock()
// }
//jzh: 将上传seg的发生变化的状态更新进数据库
if j.Type == cydex.UPLOAD {
seg_m, _ := models.GetSeg(sid)
// clog.Tracef("sid:%s model_s:%d runtime_s:%d", sid, seg_m.State, seg_rt.State)
if seg_m != nil && seg_m.State != seg_state {
// clog.Trace(sid, "set state ", seg_rt.State)
seg_m.SetState(seg_state)
}
}
if force_save || time.Since(jd.UpdateAt) >= self.cache_sync_timeout {
jd.Save()
}
self.ProcessJob(jobid)
}
func (self *JobManager) SetCacheSyncTimeout(d time.Duration) {
self.cache_sync_timeout = d
}
func (self *JobManager) HasCachedJob(jobid string) bool {
defer self.lock.Unlock()
self.lock.Lock()
_, ok := self.jobs[jobid]
return ok
}
func (self *JobManager) AddTrack(uid, pid string, typ int, mutex bool) {
if mutex {
self.lock.Lock()
defer self.lock.Unlock()
}
clog.Debugf("add track, u[%s], p[%s], t[%d]", uid, pid, typ)
track, _ := self.track_pkgs[pid]
if track == nil {
track = NewTrack()
self.track_pkgs[pid] = track
}
if typ == cydex.UPLOAD {
track.Uploads[uid] = 1
} else {
track.Downloads[uid] = 1
}
track, _ = self.track_users[uid]
if track == nil {
track = NewTrack()
self.track_users[uid] = track
}
if typ == cydex.UPLOAD {
track.Uploads[pid] = 1
} else {
track.Downloads[pid] = 1
}
}
// 删除track, issues-1, 上传用户要等下载完成后才能删除
func (self *JobManager) DelTrack(uid, pid string, typ int, mutex bool) {
if mutex {
self.lock.Lock()
defer self.lock.Unlock()
}
if typ == cydex.UPLOAD {
track, _ := self.track_pkgs[pid]
if track != nil {
// 如果还有下载则不退出
if len(track.Downloads) > 0 {
return
}
}
}
self.delTrack(uid, pid, typ)
// 如果上传的pid, 无下载用户了,并且上传完了,需要删除
track, _ := self.track_pkgs[pid]
if track != nil {
if len(track.Downloads) == 0 {
for uid, _ := range track.Uploads {
// cdxs-17
jobid := HashJob(uid, pid, cydex.UPLOAD)
job := self.getJob(jobid, false)
if job != nil && !self.isJobFinished(job) {
return
}
self.delTrack(uid, pid, cydex.UPLOAD)
}
}
}
}
func (self *JobManager) delTrack(uid, pid string, typ int) {
clog.Debugf("del track, u[%s], p[%s], t[%d]", uid, pid, typ)
track, _ := self.track_pkgs[pid]
if track != nil {
if typ == cydex.UPLOAD {
delete(track.Uploads, uid)
} else {
delete(track.Downloads, uid)
}
if len(track.Uploads) == 0 && len(track.Downloads) == 0 {
delete(self.track_pkgs, pid)
}
}
track, _ = self.track_users[uid]
if track != nil {
if typ == cydex.UPLOAD {
delete(track.Uploads, pid)
} else {
delete(track.Downloads, pid)
}
if len(track.Uploads) == 0 && len(track.Downloads) == 0 {
delete(self.track_users, uid)
}
}
}
func (self *JobManager) GetPkgTrack(pid string, typ int) (uids []string) {
self.lock.Lock()
defer self.lock.Unlock()
var m map[string]int
track, _ := self.track_pkgs[pid]
if track != nil {
if typ == cydex.UPLOAD {
m = track.Uploads
} else {
m = track.Downloads
}
for k, _ := range m {
uids = append(uids, k)
}
}
return
}
func (self *JobManager) GetUserTrack(uid string, typ int) (pids []string) {
self.lock.Lock()
defer self.lock.Unlock()
var m map[string]int
track, _ := self.track_users[uid]
if track != nil {
if typ == cydex.UPLOAD {
m = track.Uploads
} else {
m = track.Downloads
}
for k, _ := range m {
pids = append(pids, k)
}
}
return
}
// 从cache中获取jobs信息
func (self *JobManager) GetJobsByUid(uid string, typ int) (jobs []*models.Job, err error) {
pids := self.GetUserTrack(uid, typ)
for _, pid := range pids {
hashid := HashJob(uid, pid, typ)
job := self.GetJob(hashid)
if job != nil {
jobs = append(jobs, job)
}
}
return
}
// 从cache中获取jobs信息
func (self *JobManager) GetJobsByPid(pid string, typ int) (jobs []*models.Job, err error) {
uids := self.GetPkgTrack(pid, typ)
for _, uid := range uids {
hashid := HashJob(uid, pid, typ)
job := self.GetJob(hashid)
if job != nil {
jobs = append(jobs, job)
}
}
return
}
// 从数据库中同步track信息
func (self *JobManager) LoadTracks() error {
clog.Debug("load tracks")
jobs, err := models.GetUnFinishedJobs()
if err != nil {
return err
}
defer self.lock.Unlock()
self.lock.Lock()
self.ClearTracks(false)
for _, j := range jobs {
self.AddTrack(j.Uid, j.Pid, j.Type, false)
// issue-1, 上传用户要监控下载用户状态,上传完的要加入track
if j.Type == cydex.DOWNLOAD {
upload_jobs, _ := models.GetJobsByPid(j.Pid, cydex.UPLOAD, nil)
for _, u_job := range upload_jobs {
self.AddTrack(u_job.Uid, u_job.Pid, u_job.Type, false)
}
}
}
return nil
}
// NOTE: cdxs-13, 为job增加了FinishedTimes字段,job需要同步一下状态
func (self *JobManager) JobsSyncState() error {
clog.Infof("jobs sync state")
jobs, err := models.GetJobs(cydex.DOWNLOAD, nil)
if err == nil {
for _, job := range jobs {
if job.IsFinished() {
if job.FinishedTimes == 0 {
job.Finish()
}
}
}
}
jobs, err = models.GetJobs(cydex.UPLOAD, nil)
if err == nil {
for _, job := range jobs {
if job.IsFinished() {
if job.FinishedTimes == 0 {
job.Finish()
}
}
}
}
models.ProtectJobDetailState()
return nil
}
// 增加delete_track的信息
func (self *JobManager) AddTrackOfDelete(uid string, pid string, typ int, mutex bool) {
if mutex {
self.lock.Lock()
defer self.lock.Unlock()
}
clog.Debugf("add delete track, u[%s], p[%s], t[%d]", uid, pid, typ)
track, _ := self.track_deletes[uid]
if track == nil {
track = NewTrack()
self.track_deletes[uid] = track
}
if typ == cydex.UPLOAD {
track.Uploads[pid] = 1
} else {
track.Downloads[pid] = 1
}
}
// 获取delete_track的信息
// remove表示获取后是否删除,下次就取不到了
func (self *JobManager) getTrackOfDelete(uid string, typ int, remove bool) (pids []string) {
var m map[string]int
track, _ := self.track_deletes[uid]
if track == nil {
return
}
if typ == cydex.UPLOAD {
m = track.Uploads
} else {
m = track.Downloads
}
for k, _ := range m {
pids = append(pids, k)
if remove {
delete(m, k)
}
}
if len(track.Uploads) == 0 && len(track.Downloads) == 0 {
delete(self.track_deletes, uid)
}
return
}
func (self *JobManager) isJobFinished(job *models.Job) bool {
if !job.IsCached {
if job.CountUnfinishedDetails() == 0 {
return true
}
} else {
if job.NumUnfinishedDetails <= 0 {
return true
}
}
return false
}
func (self *JobManager) ProcessJob(jobid string) {
job := self.GetJob(jobid)
if job == nil {
return
}
if self.isJobFinished(job) {
clog.Infof("%s is finished", jobid)
job.Finish()
// 延时删除track和cache
if self.del_job_delay > 0 {
go func() {
job := job
time.Sleep(self.del_job_delay)
self.lock.Lock()
delete(self.jobs, jobid)
self.DelTrack(job.Uid, job.Pid, job.Type, false)
self.lock.Unlock()
}()
} else {
self.lock.Lock()
delete(self.jobs, jobid)
self.DelTrack(job.Uid, job.Pid, job.Type, false)
self.lock.Unlock()
}
self.lock.Lock()
for _, o := range self.job_observers {
o.OnJobFinish(job)
}
self.lock.Unlock()
}
}
// 获取delete_track的信息
// remove表示获取后是否删除,下次就取不到了
func (self *JobManager) GetTrackOfDelete(uid string, typ int, remove, mutex bool) (pids []string) {
if mutex {
self.lock.Lock()
defer self.lock.Unlock()
}
pids = self.getTrackOfDelete(uid, typ, remove)
return
}
func (self *JobManager) GetUnFinishedJobCount() (upload, download int) {
self.lock.Lock()
defer self.lock.Unlock()
for _, track := range self.track_pkgs {
upload += len(track.Uploads)
download += len(track.Downloads)
}
return
}
func (self *JobManager) ClearTracks(mutex bool) {
if mutex {
defer self.lock.Unlock()
self.lock.Lock()
}
self.track_users = make(map[string]*Track)
self.track_pkgs = make(map[string]*Track)
self.track_deletes = make(map[string]*Track)
}
func (self *JobManager) AddJobObserver(o JobObserver) {
if o == nil {
return
}
defer self.lock.Unlock()
self.lock.Lock()
self.job_observers = append(self.job_observers, o)
}
// 更新JD进度
func UpdateJobDetailProcess(job_id, fid string, finished_size uint64, num_finished_segs int) {
jd := JobMgr.GetJobDetail(job_id, fid)
if jd == nil {
return
}
clog.Tracef("%s update process: %d %d", jd, finished_size, num_finished_segs)
jd.FinishedSize = finished_size
jd.NumFinishedSegs = num_finished_segs
jd.Save()
}
|
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the ele that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author: Mahmoud Abdelsalam <scroveez@gmail.com>
*
*/
// goel packet format and session protocols
package el
import (
"bytes"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"net"
"strings"
"sync"
"sync/atomic"
"time"
)
const (
HOP_REQ uint8 = 0x20
HOP_ACK uint8 = 0xAC
HOP_DAT uint8 = 0xDA
HOP_FLG_PSH byte = 0x80 // port knocking and heartbeat
HOP_FLG_HSH byte = 0x40 // handshaking
HOP_FLG_FIN byte = 0x20 // finish session
HOP_FLG_MFR byte = 0x08 // more fragments
HOP_FLG_ACK byte = 0x04 // acknowledge
HOP_FLG_DAT byte = 0x00 // acknowledge
HOP_STAT_INIT int32 = iota // initing
HOP_STAT_HANDSHAKE // handeshaking
HOP_STAT_WORKING // working
HOP_STAT_FIN // finishing
HOP_HDR_LEN int = 16
HOP_PROTO_VERSION byte = 0x01 // protocol version
)
type elPacketHeader struct {
Flag byte
Seq uint32
Plen uint16
FragPrefix uint16
Frag uint8
Sid uint32
Dlen uint16
}
func (p elPacketHeader) String() string {
flag := make([]string, 0, 8)
if (p.Flag^HOP_FLG_MFR == 0) || (p.Flag == 0) {
flag = append(flag, "DAT")
}
if p.Flag&HOP_FLG_PSH != 0 {
flag = append(flag, "PSH")
}
if p.Flag&HOP_FLG_HSH != 0 {
flag = append(flag, "HSH")
}
if p.Flag&HOP_FLG_FIN != 0 {
flag = append(flag, "FIN")
}
if p.Flag&HOP_FLG_ACK != 0 {
flag = append(flag, "ACK")
}
if p.Flag&HOP_FLG_MFR != 0 {
flag = append(flag, "MFR")
}
sflag := strings.Join(flag, " | ")
return fmt.Sprintf(
"{Flag: %s, Seq: %d, Plen: %d, Prefix: %d, Frag: %d, Dlen: %d}",
sflag, p.Seq, p.Plen, p.FragPrefix, p.Frag, p.Dlen,
)
}
type ElPacket struct {
elPacketHeader
payload []byte
noise []byte
buf []byte
}
var cipher *elCipher
func (p *ElPacket) Pack() []byte {
p.Dlen = uint16(len(p.payload))
var buf *bytes.Buffer
if p.buf != nil {
// reduce memcopy
buf = bytes.NewBuffer(p.buf[:0])
binary.Write(buf, binary.BigEndian, p.elPacketHeader)
} else {
buf = bytes.NewBuffer(make([]byte, 0, p.Size()))
binary.Write(buf, binary.BigEndian, p.elPacketHeader)
buf.Write(p.payload)
buf.Write(p.noise)
p.buf = buf.Bytes()
}
return cipher.encrypt(p.buf)
}
func (p *ElPacket) Size() int {
return HOP_HDR_LEN + len(p.payload) + len(p.noise)
}
func (p *ElPacket) setPayload(d []byte) {
p.payload = d
p.Dlen = uint16(len(p.payload))
}
func (p *ElPacket) addNoise(n int) {
if p.buf != nil {
s := HOP_HDR_LEN + len(p.payload)
p.noise = p.buf[s:len(p.buf)]
} else {
p.noise = make([]byte, n)
}
rand.Read(p.noise)
}
func (p *ElPacket) setSid(sid [4]byte) {
p.Sid = binary.BigEndian.Uint32(sid[:])
}
func (p *ElPacket) String() string {
return fmt.Sprintf(
"{%v, Payload: %v, Noise: %v}",
p.elPacketHeader, p.payload, p.noise,
)
}
func unpackElPacket(b []byte) (*ElPacket, error) {
iv := b[:cipherBlockSize]
ctext := b[cipherBlockSize:]
if frame := cipher.decrypt(iv, ctext); frame != nil {
buf := bytes.NewBuffer(frame)
p := new(ElPacket)
binary.Read(buf, binary.BigEndian, &p.elPacketHeader)
p.payload = make([]byte, p.Dlen)
buf.Read(p.payload)
return p, nil
} else {
return nil, errors.New("Decrypt Packet Error")
}
}
func udpAddrHash(a *net.UDPAddr) [6]byte {
var b [6]byte
copy(b[:4], []byte(a.IP)[:4])
p := uint16(a.Port)
b[4] = byte((p >> 8) & 0xFF)
b[5] = byte(p & 0xFF)
return b
}
type hUDPAddr struct {
u *net.UDPAddr
hash [6]byte
}
func newhUDPAddr(a *net.UDPAddr) *hUDPAddr {
return &hUDPAddr{a, udpAddrHash(a)}
}
// goel Peer is a record of a peer's available UDP addrs
type ElPeer struct {
id uint64
ip net.IP
addrs map[[6]byte]int
_addrs_lst []*hUDPAddr // i know it's ugly!
seq uint32
state int32
hsDone chan struct{} // Handshake done
recvBuffer *elPacketBuffer
srv *ElServer
_lock sync.RWMutex
lastSeenTime time.Time
}
func newElPeer(id uint64, srv *ElServer, addr *net.UDPAddr, idx int) *ElPeer {
hp := new(ElPeer)
hp.id = id
hp._addrs_lst = make([]*hUDPAddr, 0)
hp.addrs = make(map[[6]byte]int)
hp.state = HOP_STAT_INIT
hp.seq = 0
hp.srv = srv
hp.recvBuffer = newElPacketBuffer(srv.toIface)
// logger.Debug("%v, %v", hp.recvBuffer, hp.srv)
a := newhUDPAddr(addr)
hp._addrs_lst = append(hp._addrs_lst, a)
hp.addrs[a.hash] = idx
return hp
}
func (h *ElPeer) Seq() uint32 {
return atomic.AddUint32(&h.seq, 1)
}
func (h *ElPeer) addr() (*net.UDPAddr, int, bool) {
defer h._lock.RUnlock()
h._lock.RLock()
addr := randAddr(h._addrs_lst)
// addr := h._addrs_lst[0]
idx, ok := h.addrs[addr.hash]
return addr.u, idx, ok
}
func (h *ElPeer) insertAddr(addr *net.UDPAddr, idx int) {
defer h._lock.Unlock()
h._lock.Lock()
a := newhUDPAddr(addr)
if _, found := h.addrs[a.hash]; !found {
h.addrs[a.hash] = idx
h._addrs_lst = append(h._addrs_lst, a)
//logger.Info("%v %d", addr, len(h._addrs_lst))
}
}
|
// Unit tests for file implementation of item repository.
//
// @author TSS
package file
import (
"testing"
"github.com/mashmb/1pass/1pass-core/core/domain"
)
func setupFileItemRepo() *fileItemRepo {
vault := domain.NewVault("../../../assets/onepassword_data")
repo := NewFileItemRepo()
items := make([]*domain.Item, 0)
rawItems := repo.LoadItems(vault)
for _, rawItem := range rawItems {
cat, err := domain.ItemCategoryEnum.FromCode(rawItem.Category)
if err == nil {
item := domain.NewItem(rawItem.Uid, "", "", "", rawItem.Trashed, cat, nil, rawItem.Created, rawItem.Updated)
items = append(items, item)
}
}
repo.StoreItems(items)
return repo
}
func TestCountByCategoryAndTrashed(t *testing.T) {
repo := setupFileItemRepo()
expected := 27
all := repo.CountByCategoryAndTrashed(nil, false)
if all != expected {
t.Errorf("CountByCategoryAndTrashed() = %d; expected %d", all, expected)
}
expected = 2
trashed := repo.CountByCategoryAndTrashed(nil, true)
if trashed != expected {
t.Errorf("CountByCategoryAndTrashed() = %d; expected %d", trashed, expected)
}
expected = 10
logins := repo.CountByCategoryAndTrashed(domain.ItemCategoryEnum.Login, false)
if logins != expected {
t.Errorf("CountByCategoryAndTrashed() = %d; expected %d", logins, expected)
}
}
func TestFindByCategoryAndTitleAndTrashed(t *testing.T) {
repo := setupFileItemRepo()
expected := 27
all := repo.FindByCategoryAndTitleAndTrashed(nil, "", false)
if len(all) != expected {
t.Errorf("[ALL] FindByCategoryAndTitleAndTrashed() = %d; expected = %d", len(all), expected)
}
expected = 10
logins := repo.FindByCategoryAndTitleAndTrashed(domain.ItemCategoryEnum.Login, "", false)
if len(logins) != expected {
t.Errorf("[LOGINS] FindByCategoryAndTitleAndTrashed() = %d; expected = %d", len(logins), expected)
}
expected = 2
cards := repo.FindByCategoryAndTitleAndTrashed(domain.ItemCategoryEnum.CreditCard, "", false)
if len(cards) != expected {
t.Errorf("[CARD] FindByCategoryAndTitleAndTrashed() = %d; expected = %d", len(cards), expected)
}
expected = 2
trashed := repo.FindByCategoryAndTitleAndTrashed(nil, "", true)
if len(trashed) != expected {
t.Errorf("[TRASH] FindByCategoryAndTitleAndTrashed() = %d; expected = %d", len(trashed), expected)
}
}
func TestFindFirtByUidAndTrashed(t *testing.T) {
repo := setupFileItemRepo()
uid := "358B7411EB8B45CD9CE592ED16F3E9DE"
trashed := false
item := repo.FindFirstByUidAndTrashed(uid, trashed)
if item.Uid != uid {
t.Errorf("[NOT-TRSHED] FindFirstByUidAndTrashed() = %v; expected = %v", item.Uid, uid)
}
if item.Trashed != trashed {
t.Errorf("[NOT-TRASHED] FindFirstByUidAndTrashed() = %v; expected = %v", item.Trashed, trashed)
}
uid = "0C4F27910A64488BB339AED63565D148"
trashed = true
item = repo.FindFirstByUidAndTrashed(uid, trashed)
if item.Uid != uid {
t.Errorf("[TRSHED] FindFirstByUidAndTrashed() = %v; expected = %v", item.Uid, uid)
}
if item.Trashed != trashed {
t.Errorf("[TRASHED] FindFirstByUidAndTrashed() = %v; expected = %v", item.Trashed, trashed)
}
}
func TestLoadItems(t *testing.T) {
repo := NewFileItemRepo()
vault := domain.NewVault("../../../assets/onepassword_data")
expected := 29
items := repo.LoadItems(vault)
if len(items) != expected {
t.Errorf("LoadItems() = %d; expected = %d", len(items), expected)
}
}
|
/******************************************************************************
Handler library based on Gorilla
Just add routes with their respective handlers
This file contains all the database related functions.
******************************************************************************/
package handlers
import (
"database/sql"
"log"
_ "github.com/mattn/go-sqlite3"
)
// Basing structure for each page
// Extend as needed
type PageData struct {
BlogName string
PostData *Post
UserData *User
}
type Post struct {
Title string
Body string
Date string
}
type User struct {
Uid int
Name string
}
var DB_DRIVER string
const dbPath = "./assets/static/blog.db"
// Insert into database
func insertPost(t string, p string, d string) {
db, err := sql.Open("sqlite3", dbPath)
checkErr(err)
stmt, err := db.Prepare("INSERT INTO Post(Title, Body, Date) values(?,?,?)")
checkErr(err)
_, err = stmt.Exec(t, p, d)
checkErr(err)
}
// Select all post for index
func selectAllPost() (rows *sql.Rows) {
db, err := sql.Open("sqlite3", dbPath)
checkErr(err)
rows, err = db.Query("SELECT * FROM Post")
checkErr(err)
return
}
func selectAllTitles() (rows *sql.Rows) {
db, err := sql.Open("sqlite3", dbPath)
checkErr(err)
rows, err = db.Query("SELECT Title FROM Post LIMIT 10")
return
}
// Simple function to check and log errors
func checkErr(err error) {
if err != nil {
log.Fatal("Error on database\n %s\n", err)
panic(err)
}
}
|
package main
import (
"fmt"
)
func main() {
var ptr *int
var i int = 123
ptr = &i
fmt.Println("Address of i:", &i)
fmt.Println("Value of ptr (address of i)", ptr)
fmt.Println("Value of i:", i)
fmt.Println("Value of i via pointer:", *ptr)
*ptr = 999
fmt.Println("Value of i via pointer", i)
}
|
package controller
import (
"math"
)
type gridParams struct {
xmin,ymin,xmax,ymax float64
xN, yN int
}
func NewGridParams(xmin,ymin,xmax,ymax float64, xN, yN int) *gridParams {
return &gridParams{xmin: xmin,ymin: ymin,xmax: xmax,ymax: ymax, xN: xN, yN: yN}
}
func (params *gridParams) dx() float64 {
return (params.xmax - params.xmin)/float64(params.xN)
}
func (params *gridParams) dy() float64 {
return (params.ymax - params.ymin)/float64(params.yN)
}
func (params *gridParams) indexAt(x,y float64) (int, int, []float64){
ix := int(math.Floor((x - params.xmin)/ params.dx()))
xin := x - params.xmin - float64(ix)*params.dx()
iy := int(math.Floor((y - params.ymin)/ params.dy()))
yin := y - params.ymin - float64(iy)*params.dy()
allArea := params.dx()*params.dy()
return ix, iy,
[]float64{
(params.dx()-xin)*(params.dy()-yin)/allArea,
xin*(params.dy()-yin)/allArea,
(params.dx()-xin)*yin/allArea,
xin*yin/allArea,
}
}
func (params *gridParams) pos(ix, iy int) (float64, float64) {
return params.xmin + float64(ix)*params.dx(), params.ymin + float64(iy)*params.dy()
} |
package bitfinex_websocket
import (
"fmt"
"github.com/gorilla/websocket"
"os"
"time"
)
type Bitfinex interface {
//ping
Ping()
// connect
WsConnect()
// subscribe
Subscribe()
// read message
ReadMessage()
// tick
BFTickWebsocket()
// depth
BFDepthWebsocket()
// trade
BFTradeWebsocket()
// kline
BFKlineWebsocket()
}
type bitfinex struct {
Url string
Ws *websocket.Conn
Channels []*BitfinexWebsocketRequest
DepthChannels []*BitfinexDepthWebsocketRequest
KlineChannels []*BitfinexKlineWebsocketRequest
}
// 初始化
func BitfinexWebsocketInit() *bitfinex {
bf := new(bitfinex)
bf.Url = os.Getenv("BITFINEX_URL")
return bf
}
// websocket connect
func (o *bitfinex) WsConnect() {
dialer := new(websocket.Dialer)
ws, _, err := dialer.Dial(o.Url, nil)
if err != nil {
fmt.Println("websocket connect error:", err)
return
}
o.Ws = ws
}
// ping 保持连接
func (o *bitfinex) Ping() {
pingMsg := ping{Event: "ping"}
done := make(chan struct{})
// 5s定时
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-done:
return
case <-ticker.C: // ping消息
err := o.Ws.WriteJSON(pingMsg)
if err != nil {
fmt.Println("ping error: ", err)
return
}
}
}
}
// subscribe
func (o *bitfinex) Subscribe(channel string) {
switch channel {
case "depth":
if o.DepthChannels == nil {
fmt.Println("no channels")
return
}
for _, channel := range o.DepthChannels {
err := o.Ws.WriteJSON(*channel)
if err != nil {
fmt.Println("subscribe error: ", err)
return
}
}
case "kline":
if o.KlineChannels == nil {
fmt.Println("no channels")
return
}
for _, channel := range o.KlineChannels {
err := o.Ws.WriteJSON(*channel)
if err != nil {
fmt.Println("subscribe error: ", err)
return
}
}
default:
if o.Channels == nil {
fmt.Println("no channels")
return
}
for _, channel := range o.Channels {
err := o.Ws.WriteJSON(*channel)
if err != nil {
fmt.Println("subscribe error: ", err)
return
}
}
}
}
// read message
func (o *bitfinex) ReadMessage() {
for true {
msgType, msg, err := o.Ws.ReadMessage()
if err != nil {
fmt.Println("read message error: ", err)
break
}
if msgType == websocket.TextMessage {
data := string(msg)
fmt.Println("message is:", msgType, data)
} else if msgType == websocket.CloseMessage {
// 重新连接
err := o.Ws.Close()
if err != nil {
fmt.Println("error:", err)
}
break
}
}
}
// tick
func (o *bitfinex) BFTickWebsocket() {
bfSymbols := NewBitfinexSymbol()
for _, symbol := range bfSymbols.BitfinexSymbols {
channel := BitfinexWebsocketRequest{
"subscribe",
"ticker",
symbol,
}
o.Channels = append(o.Channels, &channel)
}
}
// trade
func (o *bitfinex) BFTradeWebsocket() {
bfSymbols := NewBitfinexSymbol()
for _, symbol := range bfSymbols.BitfinexSymbols {
channel := BitfinexWebsocketRequest{
"subscribe",
"trades",
symbol,
}
o.Channels = append(o.Channels, &channel)
}
}
// depth
func (o *bitfinex) BFDepthWebsocket() {
bfSymbols := NewBitfinexSymbol()
for _, symbol := range bfSymbols.BitfinexSymbols {
channel := BitfinexDepthWebsocketRequest{
"subscribe",
"book",
"P0",
symbol,
"25",
}
o.DepthChannels = append(o.DepthChannels, &channel)
}
}
// kline
func (o *bitfinex) BFKlineWebsocket() {
bfSymbols := NewBitfinexSymbol()
bfCylces := NewBitfinexCycle()
for _, cycle := range bfCylces.BitfinexCycles {
for _, symbol := range bfSymbols.BitfinexSymbols {
channel := BitfinexKlineWebsocketRequest{
"subscribe",
"candles",
"trade:" + cycle + ":t" + symbol,
}
o.KlineChannels = append(o.KlineChannels, &channel)
}
}
}
type ping struct {
Event string `json:"event"`
}
type BitfinexWebsocketRequest struct {
Event string `json:"event"`
Channel string `json:"channel"`
Pair string `json:"pair"`
}
type BitfinexDepthWebsocketRequest struct {
Event string `json:"event"`
Channel string `json:"channel"`
Prec string `json:"prec"`
Symbol string `json:"symbol"`
Len string `json:"len"`
}
type BitfinexKlineWebsocketRequest struct {
Event string `json:"event"`
Channel string `json:"channel"`
Key string `json:"key"`
}
|
/*
Copyright IBM Corporation 2020
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package source_test
import (
"encoding/base64"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
log "github.com/sirupsen/logrus"
common "github.com/konveyor/move2kube/internal/common"
"github.com/konveyor/move2kube/internal/source"
irtypes "github.com/konveyor/move2kube/internal/types"
plantypes "github.com/konveyor/move2kube/types/plan"
yaml "gopkg.in/yaml.v3"
)
func TestGetServiceOptions(t *testing.T) {
log.SetLevel(log.DebugLevel)
t.Run("get services with a non existent directory and empty plan", func(t *testing.T) {
// Setup
inputpath := "this/does/not/exit/foobar/"
translator := source.Any2KubeTranslator{}
plan := plantypes.NewPlan()
want := []plantypes.Service{}
// Test
services, err := translator.GetServiceOptions(inputpath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to get the services properly. Expected:", want, "actual:", services)
}
})
t.Run("get services with empty directory and empty plan", func(t *testing.T) {
// Setup
inputpath := t.TempDir()
translator := source.Any2KubeTranslator{}
plan := plantypes.NewPlan()
want := []plantypes.Service{}
// Test
services, err := translator.GetServiceOptions(inputpath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to get the services properly. Expected:", want, "actual:", services)
}
})
t.Run("get services when the directory contains files and directories we can't read", func(t *testing.T) {
// Setup
inputpath := t.TempDir()
subdirpath := filepath.Join(inputpath, "nopermstoread")
if err := os.Mkdir(subdirpath, 0); err != nil {
t.Fatal("Failed to create a temporary directory for testing at path", subdirpath, "Error:", err)
}
ignorefilepath := filepath.Join(inputpath, common.IgnoreFilename)
if err := ioutil.WriteFile(ignorefilepath, []byte("foo/"), 0); err != nil {
t.Fatal("Failed to create a temporary file for testing at path", ignorefilepath, "Error:", err)
}
translator := source.Any2KubeTranslator{}
plan := plantypes.NewPlan()
want := []plantypes.Service{}
// Test
services, err := translator.GetServiceOptions(inputpath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to get the services properly. Expected:", want, "actual:", services)
}
})
t.Run("get services from a simple nodejs app and empty plan", func(t *testing.T) {
// Setup
inputPath := "../../samples/nodejs"
translator := source.Any2KubeTranslator{}
plan := plantypes.NewPlan()
plan.Name = "nodejs-app"
plan.Spec.Inputs.RootDir = inputPath
want := []plantypes.Service{}
if err := common.ReadYaml("testdata/expectedservicesfornodejsapp.yaml", &want); err != nil {
t.Fatal("Failed to read the expected output services from yaml. Error:", err)
}
// Test
services, err := translator.GetServiceOptions(inputPath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to create the services properly. Expected:", want, "Actual", services)
}
})
t.Run("get services from a simple nodejs app and filled plan", func(t *testing.T) {
// Setup
inputPath := "../../samples/nodejs"
translator := source.Any2KubeTranslator{}
// services
svc1 := plantypes.NewService("svc1", "Any2Kube")
svc1.SourceArtifacts[plantypes.SourceDirectoryArtifactType] = []string{"foo/"}
svc2 := plantypes.NewService("svc2", "Any2Kube")
svc2.SourceArtifacts[plantypes.SourceDirectoryArtifactType] = []string{"bar/"}
plan := plantypes.NewPlan()
plan.Name = "nodejs-app"
plan.Spec.Inputs.RootDir = inputPath
plan.Spec.Inputs.Services = map[string][]plantypes.Service{
"svc1": {svc1},
"svc2": {svc2},
}
want := []plantypes.Service{}
if err := common.ReadYaml("testdata/expectedservicesfornodejsapp.yaml", &want); err != nil {
t.Fatal("Failed to read the expected output services from yaml. Error:", err)
}
// Test
services, err := translator.GetServiceOptions(inputPath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to create the services properly. Expected:", want, "Actual", services)
}
})
t.Run("get services from a simple nodejs app that we already containerized", func(t *testing.T) {
// Setup
inputPath := "../../samples/nodejs"
translator := source.Any2KubeTranslator{}
// services
svc1 := plantypes.NewService("svc1", "Any2Kube")
svc1.SourceArtifacts[plantypes.SourceDirectoryArtifactType] = []string{"."}
plan := plantypes.NewPlan()
plan.Name = "nodejs-app"
plan.Spec.Inputs.RootDir = inputPath
plan.Spec.Inputs.Services = map[string][]plantypes.Service{
"svc1": {svc1},
}
want := []plantypes.Service{}
// if err := common.ReadYaml("testdata/expectedservicesfornodejsapp.yaml", &want); err != nil {
// t.Fatal("Failed to read the expected output services from yaml. Error:", err)
// }
// Test
services, err := translator.GetServiceOptions(inputPath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to create the services properly. Expected:", want, "Actual", services)
}
})
t.Run("test m2kignore can ignore a directory but include its subdirectories", func(t *testing.T) {
// 1. Ignore a directory, but include all subdirectories
// Setup
inputPath := "testdata/nodejsappwithm2kignorecase1"
translator := source.Any2KubeTranslator{}
plan := plantypes.NewPlan()
plan.Name = "nodejs-app"
plan.Spec.Inputs.RootDir = inputPath
want := []plantypes.Service{}
if err := common.ReadYaml("testdata/expectedservicesfornodejsappwithm2kignorecase1.yaml", &want); err != nil {
t.Fatal("Failed to read the expected output services from yaml. Error:", err)
}
// Test
services, err := translator.GetServiceOptions(inputPath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to create the services properly. Expected:", want, "Actual", services)
}
})
t.Run("test m2kignore can be used to ignore everything but a very specific subdirectory", func(t *testing.T) {
// Setup
inputPath := "testdata/javamavenappwithm2kignorecase2"
translator := source.Any2KubeTranslator{}
plan := plantypes.NewPlan()
plan.Name = "nodejs-app"
plan.Spec.Inputs.RootDir = inputPath
want := []plantypes.Service{}
if err := common.ReadYaml("testdata/expectedservicesforjavamavenappwithm2kignorecase2.yaml", &want); err != nil {
t.Fatal("Failed to read the expected output services from yaml. Error:", err)
}
// Test
services, err := translator.GetServiceOptions(inputPath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
// if err := common.WriteYaml("testdata/hmm.yaml", services); err != nil {
// t.Fatal("error", err)
// }
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to create the services properly. Expected:", want, "Actual", services)
}
})
t.Run("test m2kignore can include a directory but ignore all subdirectories", func(t *testing.T) {
// 2. Include a directory, ignore all subdirectories or a subset of subdirectories
// TODO: Note that while m2kignore might work as expected, the buildpacks do not.
// The CNB buildpacks when run on a directory will ALWAYS look inside all of its subdirectories as well.
// Setup
// We create the following directory structure:
// .
inputpath := t.TempDir()
// ./includeme/
// ./includeme/excludeme/
subdirpath := filepath.Join(inputpath, "includeme")
subsubdirpath := filepath.Join(subdirpath, "excludeme")
if err := os.MkdirAll(subsubdirpath, common.DefaultDirectoryPermission); err != nil {
t.Fatal("Failed to create a temporary directory for testing at path", subsubdirpath, "Error:", err)
}
// .m2kignore
testdatapath := "testdata/m2kignoreforignorecontents"
ignorerules, err := ioutil.ReadFile(testdatapath)
if err != nil {
t.Fatal("Failed to read the testdata at", testdatapath, "Error:", err)
}
ignorefilepath := filepath.Join(inputpath, common.IgnoreFilename)
if err := ioutil.WriteFile(ignorefilepath, ignorerules, common.DefaultFilePermission); err != nil {
t.Fatal("Failed to create a temporary file for testing at path", ignorefilepath, "Error:", err)
}
// ./includeme/excludeme/index.php
fpath := filepath.Join(subsubdirpath, "package.json")
if err := ioutil.WriteFile(fpath, []byte("this is ' invalid json"), common.DefaultFilePermission); err != nil {
t.Fatal("Failed to create a temporary file for testing at path", fpath, "Error:", err)
}
translator := source.Any2KubeTranslator{}
plan := plantypes.NewPlan()
want := []plantypes.Service{}
// Test
services, err := translator.GetServiceOptions(inputpath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to get the services properly. Expected:", want, "actual:", services)
}
})
t.Run("test multiple hierarchical m2kignores", func(t *testing.T) {
// TODO: Note that while m2kignore might work as expected, the buildpacks do not.
// The CNB buildpacks when run on a directory will ALWAYS look inside all of its subdirectories as well.
// The behaviour of the CNB buildpacks makes it virtually impossible to test this scenario.
// This test can really only be checked through vscode debugging to make sure the correct directories are being ignored.
// Setup
// We create the following directory structure:
/*
.
├── .m2kignore
├── a
│ └── a
│ ├── .m2kignore
│ ├── a
│ │ ├── a
│ │ ├── b
│ │ ├── c
│ │ └── d
│ └── b
│ ├── a
│ └── b
├── b
│ ├── .m2kignore
│ └── a
│ ├── a
│ └── b
└── c
└── a
*/
tempdir := t.TempDir()
testdatapath := "testdata/testmultiplem2kignores.tar"
tarbytes, err := ioutil.ReadFile(testdatapath)
if err != nil {
t.Fatalf("Failed to read the test data at path %q Error: %q", testdatapath, err)
}
tarstring := base64.StdEncoding.EncodeToString(tarbytes)
err = common.UnTarString(tarstring, tempdir)
if err != nil {
t.Fatalf("Failed to untar the test data into path %q Error: %q", tempdir, err)
}
inputpath := filepath.Join(tempdir, "testmultiplem2kignores")
translator := source.Any2KubeTranslator{}
plan := plantypes.NewPlan()
want := []plantypes.Service{}
// Test
services, err := translator.GetServiceOptions(inputpath, plan)
if err != nil {
t.Fatal("Failed to get the services. Error:", err)
}
if !reflect.DeepEqual(services, want) {
t.Fatal("Failed to get the services properly. Expected:", want, "actual:", services)
}
})
}
func TestTranslate(t *testing.T) {
log.SetLevel(log.DebugLevel)
t.Run("get intermediate representation with no services and an empty plan", func(t *testing.T) {
// Setup
// inputpath := "this/does/not/exit/foobar/"
translator := source.Any2KubeTranslator{}
services := []plantypes.Service{}
plan := plantypes.NewPlan()
want := irtypes.NewIR(plan)
// Test
ir, err := translator.Translate(services, plan)
if err != nil {
t.Fatal("Failed to get the intermediate representation. Error:", err)
}
if !reflect.DeepEqual(ir, want) {
t.Fatal("Failed to get the intermediate representation properly. Expected:", want, "actual:", ir)
}
})
t.Run("get intermediate representation with some services and an empty plan", func(t *testing.T) {
// Setup
translator := source.Any2KubeTranslator{}
// Input
services := []plantypes.Service{}
testdataservices := "testdata/datafortestingtranslate/servicesfromnodejsapp.yaml"
if err := common.ReadYaml(testdataservices, &services); err != nil {
t.Fatalf("Failed to read the testdata at path %q Error: %q", testdataservices, err)
}
plan := plantypes.NewPlan()
// Output
testdataoutput := "testdata/datafortestingtranslate/expectedirfornodejsapp.yaml"
wantbytes, err := ioutil.ReadFile(testdataoutput)
if err != nil {
t.Fatalf("Failed to read the testdata at path %q Error: %q", testdataoutput, err)
}
wantyaml := string(wantbytes)
// Test
ir, err := translator.Translate(services, plan)
if err != nil {
t.Fatal("Failed to get the intermediate representation. Error:", err)
}
irbytes, err := yaml.Marshal(ir)
if err != nil {
t.Fatal("Failed to marshal the intermediate representation to yaml for comparison. Error:", err)
}
iryaml := string(irbytes)
if iryaml != wantyaml {
t.Fatal("Failed to get the intermediate representation properly. Expected:", wantyaml, "actual:", iryaml)
}
})
}
|
// Support the Gherkin language, as found in Ruby's Cucumber and Python's Lettuce projects.
package gherkin
import "io"
import matchers "github.com/tychofreeman/go-matchers"
// Static Runner object to make creating tests easier
var DefaultRunner = CreateRunner()
// Use this function to let the user know that this
// test is not complete.
func Pending() {
panic("Pending")
}
// Pass-through for Runner.SetSetUpFn()
func SetUp(setup func()) {
DefaultRunner.SetSetUpFn(setup)
}
// Pass-through for Runner.SetTearDownFn()
func TearDown(teardown func()) {
DefaultRunner.SetTearDownFn(teardown)
}
// Pass-through for Runner.RegisterStepDef()
func RegisterStepDef(pattern string, stepdef interface{}) {
DefaultRunner.RegisterStepDef(pattern, stepdef)
}
func Given(pattern string, stepdef interface{}) {
DefaultRunner.RegisterStepDef(pattern, stepdef)
}
func When(pattern string, stepdef interface{}) {
DefaultRunner.RegisterStepDef(pattern, stepdef)
}
func Then(pattern string, stepdef interface{}) {
DefaultRunner.RegisterStepDef(pattern, stepdef)
}
func And(pattern string, stepdef interface{}) {
DefaultRunner.RegisterStepDef(pattern, stepdef)
}
// Pass-through for Runner.SetOutput()
func SetOutput(output io.Writer) {
DefaultRunner.SetOutput(output)
}
// Pass-through for Runner.Run()
// This should be called after everything else.
func Run(t matchers.Errorable, ctx interface{}) {
DefaultRunner.Run(t, ctx)
}
|
package powervs_test
import (
"fmt"
"os"
"testing"
"github.com/IBM-Cloud/power-go-client/power/models"
"github.com/IBM/vpc-go-sdk/vpcv1"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
machinev1 "github.com/openshift/api/machine/v1"
machinev1beta1 "github.com/openshift/api/machine/v1beta1"
"github.com/openshift/installer/pkg/asset/installconfig/powervs"
"github.com/openshift/installer/pkg/asset/installconfig/powervs/mock"
"github.com/openshift/installer/pkg/ipnet"
"github.com/openshift/installer/pkg/types"
powervstypes "github.com/openshift/installer/pkg/types/powervs"
)
type editFunctions []func(ic *types.InstallConfig)
var (
validRegion = "lon"
validCIDR = "192.168.0.0/24"
validCISInstanceCRN = "crn:v1:bluemix:public:internet-svcs:global:a/valid-account-id:valid-instance-id::"
validClusterName = "valid-cluster-name"
validDNSZoneID = "valid-zone-id"
validBaseDomain = "valid.base.domain"
validPowerVSResourceGroup = "valid-resource-group"
validPublicSubnetUSSouth1ID = "public-subnet-us-south-1-id"
validPublicSubnetUSSouth2ID = "public-subnet-us-south-2-id"
validPrivateSubnetUSSouth1ID = "private-subnet-us-south-1-id"
validPrivateSubnetUSSouth2ID = "private-subnet-us-south-2-id"
validServiceInstanceID = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
validSubnets = []string{
validPublicSubnetUSSouth1ID,
validPublicSubnetUSSouth2ID,
validPrivateSubnetUSSouth1ID,
validPrivateSubnetUSSouth2ID,
}
validUserID = "valid-user@example.com"
validZone = "lon04"
existingDNSRecordsResponse = []powervs.DNSRecordResponse{
{
Name: "valid-dns-record-name-1",
Type: "valid-dns-record-type",
},
{
Name: "valid-dns-record-name-2",
Type: "valid-dns-record-type",
},
}
noDNSRecordsResponse = []powervs.DNSRecordResponse{}
invalidArchitecture = func(ic *types.InstallConfig) { ic.ControlPlane.Architecture = "ppc64" }
cidrInvalid, _ = ipnet.ParseCIDR("192.168.0.0/16")
invalidMachinePoolCIDR = func(ic *types.InstallConfig) { ic.Networking.MachineNetwork[0].CIDR = *cidrInvalid }
cidrValid, _ = ipnet.ParseCIDR("192.168.0.0/24")
validMachinePoolCIDR = func(ic *types.InstallConfig) { ic.Networking.MachineNetwork[0].CIDR = *cidrValid }
validVPCRegion = "eu-gb"
invalidVPCRegion = "foo-bah"
setValidVPCRegion = func(ic *types.InstallConfig) { ic.Platform.PowerVS.VPCRegion = validVPCRegion }
validRG = "valid-resource-group"
anotherValidRG = "another-valid-resource-group"
validVPCID = "valid-id"
anotherValidVPCID = "another-valid-id"
validVPC = "valid-vpc"
setValidVPCName = func(ic *types.InstallConfig) { ic.Platform.PowerVS.VPCName = validVPC }
anotherValidVPC = "another-valid-vpc"
invalidVPC = "bogus-vpc"
validVPCs = []vpcv1.VPC{
{
Name: &validVPC,
ID: &validVPCID,
ResourceGroup: &vpcv1.ResourceGroupReference{
Name: &validRG,
ID: &validRG,
},
},
{
Name: &anotherValidVPC,
ID: &anotherValidVPCID,
ResourceGroup: &vpcv1.ResourceGroupReference{
Name: &anotherValidRG,
ID: &anotherValidRG,
},
},
}
validVPCSubnet = "valid-vpc-subnet"
invalidVPCSubnet = "invalid-vpc-subnet"
wrongVPCSubnet = "wrong-vpc-subnet"
validSubnet = &vpcv1.Subnet{
Name: &validRG,
VPC: &vpcv1.VPCReference{
Name: &validVPC,
ID: &validVPCID,
},
ResourceGroup: &vpcv1.ResourceGroupReference{
Name: &validRG,
ID: &validRG,
},
}
wrongSubnet = &vpcv1.Subnet{
Name: &validRG,
VPC: &vpcv1.VPCReference{
Name: &anotherValidVPC,
ID: &anotherValidVPCID,
},
ResourceGroup: &vpcv1.ResourceGroupReference{
Name: &validRG,
ID: &validRG,
},
}
)
func validInstallConfig() *types.InstallConfig {
return &types.InstallConfig{
ObjectMeta: metav1.ObjectMeta{
Name: validClusterName,
},
BaseDomain: validBaseDomain,
Networking: &types.Networking{
MachineNetwork: []types.MachineNetworkEntry{
{CIDR: *ipnet.MustParseCIDR(validCIDR)},
},
},
Publish: types.ExternalPublishingStrategy,
Platform: types.Platform{
PowerVS: validMinimalPlatform(),
},
ControlPlane: &types.MachinePool{
Architecture: "ppc64le",
},
Compute: []types.MachinePool{{
Architecture: "ppc64le",
}},
}
}
func validMinimalPlatform() *powervstypes.Platform {
return &powervstypes.Platform{
PowerVSResourceGroup: validPowerVSResourceGroup,
Region: validRegion,
ServiceInstanceID: validServiceInstanceID,
UserID: validUserID,
Zone: validZone,
}
}
func validMachinePool() *powervstypes.MachinePool {
return &powervstypes.MachinePool{}
}
func TestValidate(t *testing.T) {
cases := []struct {
name string
edits editFunctions
errorMsg string
}{
{
name: "valid install config",
edits: editFunctions{},
errorMsg: "",
},
{
name: "invalid architecture",
edits: editFunctions{invalidArchitecture},
errorMsg: `^controlPlane.architecture\: Unsupported value\: \"ppc64\"\: supported values: \"ppc64le\"`,
},
{
name: "invalid machine pool CIDR",
edits: editFunctions{invalidMachinePoolCIDR},
errorMsg: `Networking.MachineNetwork.CIDR: Invalid value: "192.168.0.0/16": Machine Pool CIDR must be /24.`,
},
{
name: "valid machine pool CIDR",
edits: editFunctions{validMachinePoolCIDR},
errorMsg: "",
},
}
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
editedInstallConfig := validInstallConfig()
for _, edit := range tc.edits {
edit(editedInstallConfig)
}
aggregatedErrors := powervs.Validate(editedInstallConfig)
if tc.errorMsg != "" {
assert.Regexp(t, tc.errorMsg, aggregatedErrors)
} else {
assert.NoError(t, aggregatedErrors)
}
})
}
}
func TestValidatePreExistingPublicDNS(t *testing.T) {
cases := []struct {
name string
edits editFunctions
errorMsg string
}{
{
name: "no pre-existing DNS records",
errorMsg: "",
},
{
name: "pre-existing DNS records",
errorMsg: `^\[baseDomain\: Duplicate value\: \"record api\.valid-cluster-name\.valid\.base\.domain already exists in CIS zone \(valid-zone-id\) and might be in use by another cluster, please remove it to continue\", baseDomain\: Duplicate value\: \"record api-int\.valid-cluster-name\.valid\.base\.domain already exists in CIS zone \(valid-zone-id\) and might be in use by another cluster, please remove it to continue\"\]$`,
},
{
name: "cannot get zone ID",
errorMsg: `^baseDomain: Internal error$`,
},
{
name: "cannot get DNS records",
errorMsg: `^baseDomain: Internal error$`,
},
}
setMockEnvVars()
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
powervsClient := mock.NewMockAPI(mockCtrl)
metadata := mock.NewMockMetadataAPI(mockCtrl)
dnsRecordNames := [...]string{fmt.Sprintf("api.%s.%s", validClusterName, validBaseDomain), fmt.Sprintf("api-int.%s.%s", validClusterName, validBaseDomain)}
// Mock common to all tests
metadata.EXPECT().CISInstanceCRN(gomock.Any()).Return(validCISInstanceCRN, nil).AnyTimes()
// Mocks: no pre-existing DNS records
powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil)
for _, dnsRecordName := range dnsRecordNames {
powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(noDNSRecordsResponse, nil)
}
// Mocks: pre-existing DNS records
powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil)
for _, dnsRecordName := range dnsRecordNames {
powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(existingDNSRecordsResponse, nil)
}
// Mocks: cannot get zone ID
powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return("", fmt.Errorf(""))
// Mocks: cannot get DNS records
powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil)
for _, dnsRecordName := range dnsRecordNames {
powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(nil, fmt.Errorf(""))
}
// Run tests
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
aggregatedErrors := powervs.ValidatePreExistingDNS(powervsClient, validInstallConfig(), metadata)
if tc.errorMsg != "" {
assert.Regexp(t, tc.errorMsg, aggregatedErrors)
} else {
assert.NoError(t, aggregatedErrors)
}
})
}
}
func TestValidateCustomVPCSettings(t *testing.T) {
cases := []struct {
name string
edits editFunctions
errorMsg string
}{
{
name: "invalid VPC region supplied alone",
edits: editFunctions{
func(ic *types.InstallConfig) {
ic.Platform.PowerVS.VPCRegion = invalidVPCRegion
},
},
errorMsg: fmt.Sprintf(`VPC.vpcRegion: Not found: "%s"`, invalidVPCRegion),
},
{
name: "valid VPC region supplied alone",
edits: editFunctions{
func(ic *types.InstallConfig) {
ic.Platform.PowerVS.VPCRegion = validVPCRegion
},
},
errorMsg: "",
},
{
name: "invalid VPC name supplied, without VPC region, not found near PowerVS region",
edits: editFunctions{
func(ic *types.InstallConfig) {
ic.Platform.PowerVS.VPCName = invalidVPC
},
},
errorMsg: fmt.Sprintf(`VPC.vpcName: Not found: "%s"`, invalidVPC),
},
{
name: "valid VPC name supplied, without VPC region, but found close to PowerVS region",
edits: editFunctions{
setValidVPCName,
},
errorMsg: "",
},
{
name: "valid VPC name, with invalid VPC region",
edits: editFunctions{
setValidVPCName,
func(ic *types.InstallConfig) {
ic.Platform.PowerVS.VPCRegion = invalidVPCRegion
},
},
errorMsg: "VPC.vpcRegion: Internal error: unknown region",
},
{
name: "valid VPC name, valid VPC region",
edits: editFunctions{
setValidVPCName,
setValidVPCRegion,
},
errorMsg: "",
},
{
name: "VPC subnet supplied, without vpcName",
edits: editFunctions{
func(ic *types.InstallConfig) {
ic.Platform.PowerVS.VPCSubnets = []string{validVPCSubnet}
},
},
errorMsg: `VPC.vpcSubnets: Invalid value: "null": invalid without vpcName`,
},
{
name: "VPC found, but not subnet",
edits: editFunctions{
setValidVPCName,
func(ic *types.InstallConfig) {
ic.Platform.PowerVS.VPCSubnets = []string{invalidVPCSubnet}
},
},
errorMsg: "VPC.vpcSubnets: Internal error",
},
{
name: "VPC found, subnet found as well, but not attached to the VPC",
edits: editFunctions{
setValidVPCName,
func(ic *types.InstallConfig) {
ic.Platform.PowerVS.VPCSubnets = []string{wrongVPCSubnet}
},
},
errorMsg: `VPC.vpcSubnets: Invalid value: "null": not attached to VPC`,
},
{
name: "region specified, VPC found, subnet found, and properly attached",
edits: editFunctions{
setValidVPCName,
setValidVPCRegion,
func(ic *types.InstallConfig) {
ic.Platform.PowerVS.VPCSubnets = []string{validVPCSubnet}
},
},
errorMsg: "",
},
}
setMockEnvVars()
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
powervsClient := mock.NewMockAPI(mockCtrl)
// Mocks: invalid VPC region only
// nothing to mock
// Mocks: valid VPC region only
// nothing to mock
// Mocks: invalid VPC name results in error
powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil)
// Mocks: valid VPC name only, no issues
powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil)
// Mocks: valid VPC name, invalid VPC region
powervsClient.EXPECT().GetVPCs(gomock.Any(), invalidVPCRegion).Return(nil, fmt.Errorf("unknown region"))
// Mocks: valid VPC name, valid VPC region, all good
powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil)
// Mocks: subnet specified, without vpcName, invalid
// nothing to mock
// Mocks: valid VPC name, but Subnet not found
powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil)
powervsClient.EXPECT().GetSubnetByName(gomock.Any(), invalidVPCSubnet, validVPCRegion).Return(nil, fmt.Errorf(""))
// Mocks: valid VPC name, but wrong Subnet (present, but not attached)
powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil)
powervsClient.EXPECT().GetSubnetByName(gomock.Any(), wrongVPCSubnet, validVPCRegion).Return(wrongSubnet, nil)
// Mocks: region specified, valid VPC, valid region, valid Subnet, all good
powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil)
powervsClient.EXPECT().GetSubnetByName(gomock.Any(), validVPCSubnet, validVPCRegion).Return(validSubnet, nil)
// Run tests
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
editedInstallConfig := validInstallConfig()
for _, edit := range tc.edits {
edit(editedInstallConfig)
}
aggregatedErrors := powervs.ValidateCustomVPCSetup(powervsClient, editedInstallConfig)
if tc.errorMsg != "" {
assert.Regexp(t, tc.errorMsg, aggregatedErrors)
} else {
assert.NoError(t, aggregatedErrors)
}
})
}
}
func createControlPlanes(numControlPlanes int, controlPlane *machinev1.PowerVSMachineProviderConfig) []machinev1beta1.Machine {
controlPlanes := make([]machinev1beta1.Machine, numControlPlanes)
for i := range controlPlanes {
masterName := fmt.Sprintf("rdr-hamzy-test3-syd04-zwmgs-master-%d", i)
controlPlanes[i].TypeMeta = metav1.TypeMeta{
Kind: "Machine",
APIVersion: "machine.openshift.io/v1beta1",
}
controlPlanes[i].ObjectMeta = metav1.ObjectMeta{
Name: masterName,
Namespace: "openshift-machine-api",
Labels: make(map[string]string),
}
controlPlanes[i].Labels["machine.openshift.io/cluster-api-cluster"] = "rdr-hamzy-test3-syd04-zwmgs"
controlPlanes[i].Labels["machine.openshift.io/cluster-api-machine-role"] = "master"
controlPlanes[i].Labels["machine.openshift.io/cluster-api-machine-type"] = "master"
controlPlanes[i].Spec.ProviderSpec = machinev1beta1.ProviderSpec{
Value: &runtime.RawExtension{
Raw: nil,
Object: controlPlane,
},
}
}
return controlPlanes
}
func createComputes(numComputes int32, compute *machinev1.PowerVSMachineProviderConfig) []machinev1beta1.MachineSet {
computes := make([]machinev1beta1.MachineSet, 1)
computes[0].Spec.Replicas = &numComputes
computes[0].Spec.Template.Spec.ProviderSpec = machinev1beta1.ProviderSpec{
Value: &runtime.RawExtension{
Raw: nil,
Object: compute,
},
}
return computes
}
func TestSystemPool(t *testing.T) {
setMockEnvVars()
dedicatedControlPlane := machinev1.PowerVSMachineProviderConfig{
TypeMeta: metav1.TypeMeta{Kind: "PowerVSMachineProviderConfig", APIVersion: "machine.openshift.io/v1"},
KeyPairName: "rdr-hamzy-test3-syd04-vcwtz-key",
SystemType: "e980",
ProcessorType: "Dedicated",
Processors: intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MemoryGiB: 32,
}
dedicatedControlPlanes := createControlPlanes(5, &dedicatedControlPlane)
dedicatedCompute := machinev1.PowerVSMachineProviderConfig{
TypeMeta: metav1.TypeMeta{Kind: "PowerVSMachineProviderConfig", APIVersion: "machine.openshift.io/v1"},
KeyPairName: "rdr-hamzy-test3-syd04-vcwtz-key",
SystemType: "e980",
ProcessorType: "Dedicated",
Processors: intstr.IntOrString{Type: intstr.Int, IntVal: 1},
MemoryGiB: 32,
}
dedicatedComputes := createComputes(3, &dedicatedCompute)
systemPoolNEComputeCores := &models.System{
Cores: func(f float64) *float64 { return &f }(2),
ID: 1,
Memory: func(i int64) *int64 { return &i }(256),
}
systemPoolsNEComputeCores := models.SystemPools{
"NotEnoughComputeCores": models.SystemPool{
Capacity: systemPoolNEComputeCores,
CoreMemoryRatio: float64(1.0),
MaxAvailable: systemPoolNEComputeCores,
MaxCoresAvailable: systemPoolNEComputeCores,
MaxMemoryAvailable: systemPoolNEComputeCores,
SharedCoreRatio: &models.MinMaxDefault{
Default: func(f float64) *float64 { return &f }(4),
Max: func(f float64) *float64 { return &f }(4),
Min: func(f float64) *float64 { return &f }(1),
},
Systems: []*models.System{
systemPoolNEComputeCores,
},
Type: "e980",
},
}
systemPoolNEWorkerCores := &models.System{
Cores: func(f float64) *float64 { return &f }(6),
ID: 1,
Memory: func(i int64) *int64 { return &i }(256),
}
systemPoolsNEWorkerCores := models.SystemPools{
"NotEnoughWorkerCores": models.SystemPool{
Capacity: systemPoolNEWorkerCores,
CoreMemoryRatio: float64(1.0),
MaxAvailable: systemPoolNEWorkerCores,
MaxCoresAvailable: systemPoolNEWorkerCores,
MaxMemoryAvailable: systemPoolNEWorkerCores,
SharedCoreRatio: &models.MinMaxDefault{
Default: func(f float64) *float64 { return &f }(4),
Max: func(f float64) *float64 { return &f }(4),
Min: func(f float64) *float64 { return &f }(1),
},
Systems: []*models.System{
systemPoolNEWorkerCores,
},
Type: "e980",
},
}
systemPoolNEComputeMemory := &models.System{
Cores: func(f float64) *float64 { return &f }(8),
ID: 1,
Memory: func(i int64) *int64 { return &i }(32),
}
systemPoolsNEComputeMemory := models.SystemPools{
"NotEnoughComputeMemory": models.SystemPool{
Capacity: systemPoolNEComputeMemory,
CoreMemoryRatio: float64(1.0),
MaxAvailable: systemPoolNEComputeMemory,
MaxCoresAvailable: systemPoolNEComputeMemory,
MaxMemoryAvailable: systemPoolNEComputeMemory,
SharedCoreRatio: &models.MinMaxDefault{
Default: func(f float64) *float64 { return &f }(4),
Max: func(f float64) *float64 { return &f }(4),
Min: func(f float64) *float64 { return &f }(1),
},
Systems: []*models.System{
systemPoolNEComputeMemory,
},
Type: "e980",
},
}
systemPoolNEWorkerMemory := &models.System{
Cores: func(f float64) *float64 { return &f }(8),
ID: 1,
Memory: func(i int64) *int64 { return &i }(192),
}
systemPoolsNEWorkerMemory := models.SystemPools{
"NotEnoughWorkerMemory": models.SystemPool{
Capacity: systemPoolNEWorkerMemory,
CoreMemoryRatio: float64(1.0),
MaxAvailable: systemPoolNEWorkerMemory,
MaxCoresAvailable: systemPoolNEWorkerMemory,
MaxMemoryAvailable: systemPoolNEWorkerMemory,
SharedCoreRatio: &models.MinMaxDefault{
Default: func(f float64) *float64 { return &f }(4),
Max: func(f float64) *float64 { return &f }(4),
Min: func(f float64) *float64 { return &f }(1),
},
Systems: []*models.System{
systemPoolNEWorkerMemory,
},
Type: "e980",
},
}
systemPoolGood := &models.System{
Cores: func(f float64) *float64 { return &f }(8),
ID: 1,
Memory: func(i int64) *int64 { return &i }(256),
}
systemPoolsGood := models.SystemPools{
"Enough": models.SystemPool{
Capacity: systemPoolGood,
CoreMemoryRatio: float64(1.0),
MaxAvailable: systemPoolGood,
MaxCoresAvailable: systemPoolGood,
MaxMemoryAvailable: systemPoolGood,
SharedCoreRatio: &models.MinMaxDefault{
Default: func(f float64) *float64 { return &f }(4),
Max: func(f float64) *float64 { return &f }(4),
Min: func(f float64) *float64 { return &f }(1),
},
Systems: []*models.System{
systemPoolGood,
},
Type: "e980",
},
}
err := powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEComputeCores)
assert.EqualError(t, err, "Not enough cores available (2) for the compute nodes (need 5)")
err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEWorkerCores)
assert.EqualError(t, err, "Not enough cores available (1) for the worker nodes (need 3)")
err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEComputeMemory)
assert.EqualError(t, err, "Not enough memory available (32) for the compute nodes (need 160)")
err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEWorkerMemory)
assert.EqualError(t, err, "Not enough memory available (32) for the worker nodes (need 96)")
err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsGood)
assert.Empty(t, err)
}
func setMockEnvVars() {
os.Setenv("POWERVS_AUTH_FILEPATH", "./tmp/powervs/config.json")
os.Setenv("IBMID", "foo")
os.Setenv("IC_API_KEY", "foo")
os.Setenv("IBMCLOUD_REGION", "foo")
os.Setenv("IBMCLOUD_ZONE", "foo")
}
|
package ip
import (
"net"
)
func IP() string {
ips := []string(nil)
is, err := net.Interfaces()
if err != nil || len(is) == 0 {
return ""
}
for _, i := range is {
if len(i.HardwareAddr) == 0 {
continue
}
as, err := i.Addrs()
if err != nil {
continue
}
for _, a := range as {
ip, ok := a.(*net.IPNet)
if ok && !ip.IP.IsLoopback() && ip.IP.To4() != nil {
ips = append(ips, ip.IP.String())
}
}
}
if len(ips) == 0 {
return ""
}
return ips[0]
}
|
package server
import (
"github.com/kelseyhightower/envconfig"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/vgarvardt/rklotz/pkg/server/plugin"
"github.com/vgarvardt/rklotz/pkg/server/renderer"
"github.com/vgarvardt/rklotz/pkg/server/web"
)
// Config represents server configuration
type Config struct {
PostsDSN string `envconfig:"POSTS_DSN" default:"file:///etc/rklotz/posts"`
PostsPerPage int `envconfig:"POSTS_PERPAGE" default:"10"`
StorageDSN string `envconfig:"STORAGE_DSN" default:"boltdb:///tmp/rklotz.db"`
LogConfig
web.HTTPConfig
web.SSLConfig
plugin.Config
renderer.UIConfig
renderer.RootURLConfig
}
// LogConfig represents logger configuration
type LogConfig struct {
Level string `envconfig:"LOG_LEVEL" default:"info"`
Type string `envconfig:"LOG_TYPE" default:"rklotz"`
}
// BuildLogger builds and initialises logger with the values from the config
func (c *LogConfig) BuildLogger() (*zap.Logger, error) {
logConfig := zap.NewProductionConfig()
logLevel := new(zap.AtomicLevel)
if err := logLevel.UnmarshalText([]byte(c.Level)); err != nil {
return nil, err
}
logConfig.Level = *logLevel
logConfig.Development = logLevel.String() == zapcore.DebugLevel.String()
logConfig.Sampling = nil
logConfig.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logConfig.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
logConfig.InitialFields = map[string]interface{}{"type": c.Type}
logger, err := logConfig.Build()
if err != nil {
return nil, err
}
return logger, nil
}
// LoadConfig loads app settings from environment variables
func LoadConfig() (*Config, error) {
var cfg Config
err := envconfig.Process("", &cfg)
if err != nil {
return nil, err
}
return &cfg, nil
}
|
package _142_Linked_List_Cycle_2
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func detectCycle(head *ListNode) *ListNode {
return detectCycleWithPointer(head)
}
func detectCycleWithPointer(head *ListNode) *ListNode {
var (
hhead = &ListNode{
Next: head,
}
fast, slow = hhead, hhead
)
for {
if fast.Next == nil || fast.Next.Next == nil {
return nil
}
fast = fast.Next.Next
slow = slow.Next
if fast == slow {
break
}
}
slow = hhead
for {
if slow == fast {
return slow
}
slow = slow.Next
fast = fast.Next
}
return nil
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"github.com/pingcap/log"
"go.uber.org/zap"
)
// DoProcess generates data.
func DoProcess(cfg *Config) {
table := newTable()
err := parseTableSQL(table, cfg.TableSQL)
if err != nil {
log.Fatal("parseTableSQL", zap.String("sql", cfg.TableSQL), zap.Error(err))
}
err = parseIndexSQL(table, cfg.IndexSQL)
if err != nil {
log.Fatal("parseIndexSQL", zap.Error(err))
}
dbs, err := createDBs(cfg.DBCfg, cfg.WorkerCount)
if err != nil {
log.Fatal("createDBs", zap.Error(err))
}
defer closeDBs(dbs)
err = execSQL(dbs[0], cfg.TableSQL)
if err != nil {
log.Fatal("execSQL", zap.Error(err))
}
err = execSQL(dbs[0], cfg.IndexSQL)
if err != nil {
log.Fatal("execSQL", zap.Error(err))
}
doProcess(table, dbs, cfg.JobCount, cfg.WorkerCount, cfg.Batch)
}
|
package info
import (
"fmt"
grpc "github.com/antonioalfa22/egida/proto"
googlegrpc "google.golang.org/grpc"
"log"
)
func CreateServicesClient(host string) grpc.ServicesClient {
addr := fmt.Sprintf("%s:%s", host, "8128")
conn, err := googlegrpc.Dial(addr, googlegrpc.WithInsecure())
if err != nil {
log.Fatalf("Impossible connect: %v", err)
}
return grpc.NewServicesClient(conn)
}
func CreatePackagesClient(host string) grpc.PackagesClient {
addr := fmt.Sprintf("%s:%s", host, "8128")
conn, err := googlegrpc.Dial(addr, googlegrpc.WithInsecure())
if err != nil {
log.Fatalf("Impossible connect: %v", err)
}
return grpc.NewPackagesClient(conn)
}
func CreateHardeningClient(host string) grpc.HardeningScoresClient {
addr := fmt.Sprintf("%s:%s", host, "8128")
conn, err := googlegrpc.Dial(addr, googlegrpc.WithInsecure())
if err != nil {
log.Fatalf("Impossible connect: %v", err)
}
return grpc.NewHardeningScoresClient(conn)
}
func CreateMachineInfoClient(host string) grpc.MachineInfoClient {
addr := fmt.Sprintf("%s:%s", host, "8128")
conn, err := googlegrpc.Dial(addr, googlegrpc.WithInsecure())
if err != nil {
log.Fatalf("Impossible connect: %v", err)
}
return grpc.NewMachineInfoClient(conn)
} |
package main
import "fmt"
type person struct {
fName string
lName string
favfood []string
}
func main() {
p1 := person{
fName: "Sirop",
lName: "Lesperance",
favfood: []string{"Katofel", "currywurst", "ketchup"},
}
// print out the p1
fmt.Println(p1)
// print out the values in favfood
fmt.Println(p1.favfood)
// print out the values by using a for "favfood" range loop
for _, v := range p1.favfood {
fmt.Println(v)
}
}
|
package main
import (
"context"
"errors"
"fmt"
"github.com/amisevsk/workspace-bootstrap/library"
"log"
"os"
"strings"
"time"
devworkspace "github.com/devfile/api/pkg/apis/workspaces/v1alpha2"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
repoDevfileEnvVar = "DEVFILE"
defaultDevfileEnvVar = "DEFAULT_DEVFILE"
)
func stop(err error) {
if err != nil {
log.Printf("Failed to bootstrap workspace: %s", err.Error())
}
time.Sleep(60 * time.Minute)
}
func main() {
log.Println("Beginning devfile bootstrap")
client, err := library.GetK8sClient()
if err != nil {
stop(err)
}
dw, err := library.GetDevWorkspace(client)
if err != nil {
stop(err)
}
log.Println("Read DevWorkspace on cluster complete")
devfile, err := getActualDevfile()
if err != nil {
stop(err)
}
log.Println("Read devfile complete")
dw.Spec.Template = devfile.Spec.Template
err = client.Patch(context.Background(), dw, k8sclient.Merge)
if err != nil {
stop(fmt.Errorf("failed to update DevWorkspace with devfile from repository: %w", err))
}
log.Println("Updated DevWorkspace with spec from repository")
stop(nil)
}
func getActualDevfile() (*devworkspace.DevWorkspace, error) {
repoDevfilePath := os.Getenv(repoDevfileEnvVar)
defaultDevfilePath := os.Getenv(defaultDevfileEnvVar)
log.Printf("Reading devfile.yaml from repo cloned to %s", strings.TrimSuffix(repoDevfilePath, "devfile.yaml"))
if repoDevfilePath == "" && defaultDevfilePath == "" {
return nil, fmt.Errorf("could not find devfile and no default is set")
}
if repoDevfilePath != "" {
devfile, err := library.ReadDevfile(repoDevfilePath)
if err != nil {
if errors.Is(err, library.ErrInvalidSchemaVersion) {
log.Printf("Devfile found in repository is unsupported; using default DevWorkspace")
return library.ReadDevfile(defaultDevfilePath)
}
return nil, fmt.Errorf("failed to read repo devfile: %w", err)
}
return devfile, nil
}
log.Printf("Cloned repository does not contain devfile.yaml; using default DevWorkspace")
return library.ReadDevfile(defaultDevfilePath)
}
|
package main
import (
"fmt"
"strconv"
)
var numero int
var texto string
var status bool
func main() {
// Go las inicializa automaticamente en 0
var num1, num2 int
num3, num4 := 2, "Texto"
num5, num6 := 56, 33
fmt.Println("Hello World")
numer := 3
fmt.Println(num1)
fmt.Println(num2)
fmt.Println(num3)
fmt.Println(num4)
fmt.Println(num5)
fmt.Println(num6)
fmt.Println(numer)
// Go inicializa en false
fmt.Println(status)
mostrarStatus()
// strconv contiene funciones para manipular tipos de datos
texto = strconv.Itoa(num3)
fmt.Println(texto)
}
func mostrarStatus() {
fmt.Println(status)
}
|
/* RZFeeser | Alta3 Research
HTTP GET with io.Copy() */
package main
import (
"io"
"log"
"net/http"
"os"
)
func main() {
resp, err := http.Get("http://webcode.me")
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
/* The io.Copy() function copies from source to destination until
either EOF is reached on source or an error occurs. Returns the number of bytes
copied and the first error encountered while copying, if any. */
_, err = io.Copy(os.Stdout, resp.Body)
if err != nil {
log.Fatal(err)
}
}
|
package repository
import (
. "2019_2_IBAT/pkg/pkg/models"
"fmt"
"github.com/google/uuid"
"github.com/pkg/errors"
)
func (m *DBUserStorage) CreateSeeker(seekerInput Seeker) bool {
// salt := make([]byte, 8)
// rand.Read(salt)
// seekerInput.Password = string(passwords.HashPass(salt, seekerInput.Password))
_, err := m.DbConn.Exec(
"INSERT INTO persons(id, email, first_name, second_name, password_hash, role, path_to_image)"+
"VALUES($1, $2, $3, $4, $5, $6, $7);", seekerInput.ID, seekerInput.Email, seekerInput.FirstName,
seekerInput.SecondName, seekerInput.Password, SeekerStr, seekerInput.PathToImg,
)
if err != nil {
fmt.Printf("CreateSeeker: %s\n", err)
return false
}
return true
}
func (m *DBUserStorage) GetSeekers() ([]Seeker, error) { //not tested
seekers := []Seeker{}
rows, err := m.DbConn.Queryx("SELECT id, email, first_name, second_name,"+
"path_to_image FROM persons WHERE role = $1;", SeekerStr)
if err != nil {
fmt.Printf("GetSeekers: %s\n", err)
return seekers, errors.New(InternalErrorMsg)
}
defer rows.Close()
for rows.Next() {
seek := Seeker{}
err = rows.StructScan(&seek)
if err != nil {
fmt.Printf("GetSeekers: %s\n", err)
return seekers, errors.New(InternalErrorMsg)
}
id_rows, err := m.DbConn.Query("SELECT r.id FROM resumes AS r WHERE r.own_id = $1;", seek.ID)
if err != nil {
fmt.Printf("GetSeekers: %s\n", err)
return seekers, errors.New(InternalErrorMsg)
}
defer id_rows.Close()
resumes := make([]uuid.UUID, 0)
for id_rows.Next() {
var id uuid.UUID
err = id_rows.Scan(&id)
if err != nil {
fmt.Printf("GetSeekers: %s\n", err)
return seekers, errors.New(InternalErrorMsg)
}
resumes = append(resumes, id)
}
seek.Resumes = resumes
seekers = append(seekers, seek)
}
return seekers, nil
}
func (m *DBUserStorage) GetSeeker(id uuid.UUID) (Seeker, error) {
row := m.DbConn.QueryRowx("SELECT id, email, first_name, second_name,"+
" path_to_image FROM persons WHERE id = $1;", id)
seeker := Seeker{}
err := row.StructScan(&seeker)
if err != nil {
fmt.Printf("GetSeeker: %s\n", err)
return seeker, errors.New(InternalErrorMsg)
}
id_rows, err := m.DbConn.Query("SELECT r.id FROM resumes AS r WHERE r.own_id = $1;", seeker.ID)
if err != nil {
fmt.Printf("GetSeeker: %s\n", err)
return seeker, errors.New(InternalErrorMsg)
}
defer id_rows.Close()
resumes := make([]uuid.UUID, 0)
for id_rows.Next() {
var id uuid.UUID
err = id_rows.Scan(&id)
if err != nil {
fmt.Printf("GetSeeker: %s\n", err)
return seeker, errors.New(InternalErrorMsg)
}
resumes = append(resumes, id)
}
seeker.Resumes = resumes
return seeker, nil
}
func (m *DBUserStorage) PutSeeker(seekerInput SeekerReg, id uuid.UUID) bool {
_, err := m.DbConn.Exec(
"UPDATE persons SET email = $1, first_name = $2, second_name = $3, password_hash = $4"+
" WHERE id = $5;", seekerInput.Email, seekerInput.FirstName,
seekerInput.SecondName, seekerInput.Password, id,
)
if err != nil {
fmt.Printf("PutSeeker: %s\n", err)
return false
}
return true
}
|
package main
import (
"fmt"
"net/http"
"time"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
)
func main() {
router := gin.Default()
router.Use(cors.New(
cors.Config{
AllowOrigins: []string{"http://localhost:8888"},
AllowMethods: []string{"POST"},
AllowHeaders: []string{"Origin", "Cookie", "Authorization"},
AllowCredentials: true,
MaxAge: 12 * time.Hour,
}))
router.POST("trap", func(c *gin.Context) {
fmt.Println("Cookie", c.Request.Header["Cookie"])
fmt.Println("Authorization", c.Request.Header["Authorization"])
c.JSON(http.StatusOK, gin.H{"code": 0, "msg": "success"})
})
router.Run(":8889")
}
|
package dependencyinjection
import (
"log"
"net/http"
)
func MyGreeterHandler(w http.ResponseWriter, r *http.Request) {
OtherGreet(w, "world")
}
func main() {
log.Fatal(http.ListenAndServe(":5000", http.HandlerFunc(MyGreeterHandler)))
}
|
package main
import (
"fmt"
"gopkg.in/vinxi/replay.v0"
"gopkg.in/vinxi/vinxi.v0"
"net/http"
)
func main() {
vs := vinxi.NewServer(vinxi.ServerOptions{Host: "localhost", Port: 3100})
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Printf("Replay server reached: %s => %s\n", r.RemoteAddr, r.URL.String())
w.Write([]byte("replay server"))
})
srv1 := &http.Server{Addr: "localhost:3123"}
srv2 := &http.Server{Addr: "localhost:3124"}
srv1.Handler = handler
srv2.Handler = handler
go srv1.ListenAndServe()
go srv2.ListenAndServe()
replayer := replay.New("http://localhost:3123", "http://localhost:3124")
replayer.SetHandler(func(err error, res *http.Response, req *http.Request) {
if err != nil {
fmt.Printf("Replay error: %s => %s\n", req.URL.String(), err)
return
}
fmt.Printf("Replay response: %s => %d\n", req.URL.String(), res.StatusCode)
})
vs.Use(replayer)
vs.Forward("http://httpbin.org")
fmt.Printf("Server listening on port: %d\n", 3100)
err := vs.Listen()
if err != nil {
fmt.Printf("Error: %s\n", err)
}
}
|
package wallet
import (
"crypto/ecdsa"
"log"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"golang.org/x/crypto/sha3"
)
func GeneratePrivateKey() *ecdsa.PrivateKey {
privateKey, err := crypto.GenerateKey()
if err != nil {
log.Fatal("error generating private key", err)
}
return privateKey
}
func GetPrivateKeyBytes(privateKey *ecdsa.PrivateKey) []byte {
return crypto.FromECDSA(privateKey)
}
// Private key to sign transactions
func GetPrivateKeyHex(privateKey *ecdsa.PrivateKey) string {
privateKeyBytes := GetPrivateKeyBytes(privateKey)
return hexutil.Encode(privateKeyBytes)[2:]
}
func GetPublicKey(privateKey *ecdsa.PrivateKey) interface{} {
return privateKey.Public()
}
func GetPublicKeyECDSA(publicKey interface{}) *ecdsa.PublicKey {
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
if !ok {
log.Fatal("error casting public key to ECDSA")
}
return publicKeyECDSA
}
func GetPublicKeyBytes(publicKeyECDSA *ecdsa.PublicKey) []byte {
return crypto.FromECDSAPub(publicKeyECDSA)
}
func GetPublicKeyHex(publicKeyECDSA *ecdsa.PublicKey) string {
publicKeyBytes := GetPublicKeyBytes(publicKeyECDSA)
return hexutil.Encode(publicKeyBytes)[4:]
}
// Public address that we usually see
func GetPublicAddress(publicKeyECDSA *ecdsa.PublicKey) string {
address := crypto.PubkeyToAddress(*publicKeyECDSA).Hex()
return address
}
func GetPublicAddressLegacy(publicKeyECDSA *ecdsa.PublicKey) string {
publicKeyBytes := GetPublicKeyBytes(publicKeyECDSA)
hash := sha3.NewLegacyKeccak256()
hash.Write(publicKeyBytes[1:])
return hexutil.Encode(hash.Sum(nil)[12:])
}
|
package _137_Single_Number_2
const Times = 3
func singleNumber(nums []int) int {
//return singleNumberWithCalcu(nums)
return singleNumberWithBit(nums)
}
// 位运算解法
func singleNumberWithBit(nums []int) int {
a, b := 0, 0
for _, num := range nums {
a = (a ^ num) & ^b
b = (^a ^ num) ^ b
}
return a
}
// 算术求和解法,先去重,再*3,减去所有原来num和,即2倍的目标值。此处不予实现。
func singleNumberWithSum(nums []int) int {
// ...
return 0
}
// 计数解法
func singleNumberWithCalcu(nums []int) int {
mark := []bool{}
for i := 0; i < len(nums); i++ {
mark = append(mark, false)
}
for i, n := range nums {
if mark[i] == false {
if i == len(nums)-1 {
return n
} else {
tmp := nums[i+1:]
times := 0
for j, m := range tmp {
if m == n {
mark[i+j+1] = true
times++
if times == Times-1 {
break
}
}
}
if times != Times-1 {
return n
}
}
} else {
continue
}
}
return 0
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
)
func TestTranslateAllowedTopologies(t *testing.T) {
testCases := []struct {
name string
topology []v1.TopologySelectorTerm
expectedToplogy []v1.TopologySelectorTerm
expErr bool
}{
{
name: "no translation",
topology: generateToplogySelectors(GCEPDTopologyKey, []string{"foo", "bar"}),
expectedToplogy: []v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: GCEPDTopologyKey,
Values: []string{"foo", "bar"},
},
},
},
},
},
{
name: "translate",
topology: []v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: "failure-domain.beta.kubernetes.io/zone",
Values: []string{"foo", "bar"},
},
},
},
},
expectedToplogy: []v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: GCEPDTopologyKey,
Values: []string{"foo", "bar"},
},
},
},
},
},
{
name: "combo",
topology: []v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: "failure-domain.beta.kubernetes.io/zone",
Values: []string{"foo", "bar"},
},
{
Key: GCEPDTopologyKey,
Values: []string{"boo", "baz"},
},
},
},
},
expectedToplogy: []v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: GCEPDTopologyKey,
Values: []string{"foo", "bar"},
},
{
Key: GCEPDTopologyKey,
Values: []string{"boo", "baz"},
},
},
},
},
},
{
name: "some other key",
topology: []v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: "test",
Values: []string{"foo", "bar"},
},
},
},
},
expErr: true,
},
}
for _, tc := range testCases {
t.Logf("Running test: %v", tc.name)
gotTop, err := translateAllowedTopologies(tc.topology, GCEPDTopologyKey)
if err != nil && !tc.expErr {
t.Errorf("Did not expect an error, got: %v", err)
}
if err == nil && tc.expErr {
t.Errorf("Expected an error but did not get one")
}
if !reflect.DeepEqual(gotTop, tc.expectedToplogy) {
t.Errorf("Expected topology: %v, but got: %v", tc.expectedToplogy, gotTop)
}
}
}
func TestAddTopology(t *testing.T) {
testCases := []struct {
name string
topologyKey string
zones []string
expErr bool
expectedAffinity *v1.VolumeNodeAffinity
}{
{
name: "empty zones",
topologyKey: GCEPDTopologyKey,
zones: nil,
expErr: true,
},
{
name: "only whitespace-named zones",
topologyKey: GCEPDTopologyKey,
zones: []string{" ", "\n", "\t", " "},
expErr: true,
},
{
name: "including whitespace-named zones",
topologyKey: GCEPDTopologyKey,
zones: []string{" ", "us-central1-a"},
expErr: false,
expectedAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: GCEPDTopologyKey,
Operator: v1.NodeSelectorOpIn,
Values: []string{"us-central1-a"},
},
},
},
},
},
},
},
{
name: "unsorted zones",
topologyKey: GCEPDTopologyKey,
zones: []string{"us-central1-f", "us-central1-a", "us-central1-c", "us-central1-b"},
expErr: false,
expectedAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: GCEPDTopologyKey,
Operator: v1.NodeSelectorOpIn,
// Values are expected to be ordered
Values: []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-f"},
},
},
},
},
},
},
},
}
for _, tc := range testCases {
t.Logf("Running test: %v", tc.name)
pv := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{},
}
err := addTopology(pv, tc.topologyKey, tc.zones)
if err != nil && !tc.expErr {
t.Errorf("Did not expect an error, got: %v", err)
}
if err == nil && tc.expErr {
t.Errorf("Expected an error but did not get one")
}
if err == nil && !reflect.DeepEqual(pv.Spec.NodeAffinity, tc.expectedAffinity) {
t.Errorf("Expected affinity: %v, but got: %v", tc.expectedAffinity, pv.Spec.NodeAffinity)
}
}
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package wasmlib
import "encoding/binary"
const (
// all TYPE_* values should exactly match the counterpart OBJTYPE_* values on the host!
TYPE_ARRAY int32 = 0x20
TYPE_ADDRESS int32 = 1
TYPE_AGENT_ID int32 = 2
TYPE_BYTES int32 = 3
TYPE_CHAIN_ID int32 = 4
TYPE_COLOR int32 = 5
TYPE_CONTRACT_ID int32 = 6
TYPE_HASH int32 = 7
TYPE_HNAME int32 = 8
TYPE_INT int32 = 9
TYPE_MAP int32 = 10
TYPE_STRING int32 = 11
)
var typeSizes = [...]int{0, 33, 37, 0, 33, 32, 37, 32, 4, 8, 0, 0}
type ScHost interface {
Exists(objId int32, keyId int32, typeId int32) bool
GetBytes(objId int32, keyId int32, typeId int32) []byte
GetKeyIdFromBytes(bytes []byte) int32
GetKeyIdFromString(key string) int32
GetObjectId(objId int32, keyId int32, typeId int32) int32
SetBytes(objId int32, keyId int32, typeId int32, value []byte)
}
// \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\
var host ScHost
func ConnectHost(h ScHost) ScHost {
oldHost := host
host = h
return oldHost
}
func Exists(objId int32, keyId Key32, typeId int32) bool {
return host.Exists(objId, int32(keyId), typeId)
}
func GetBytes(objId int32, keyId Key32, typeId int32) []byte {
bytes := host.GetBytes(objId, int32(keyId), typeId)
if len(bytes) == 0 {
return make([]byte, typeSizes[typeId])
}
return bytes
}
func GetKeyIdFromBytes(bytes []byte) Key32 {
return Key32(host.GetKeyIdFromBytes(bytes))
}
func GetKeyIdFromString(key string) Key32 {
return Key32(host.GetKeyIdFromString(key))
}
func GetLength(objId int32) int32 {
return int32(binary.LittleEndian.Uint64(GetBytes(objId, KeyLength, TYPE_INT)))
}
func GetObjectId(objId int32, keyId Key32, typeId int32) int32 {
return host.GetObjectId(objId, int32(keyId), typeId)
}
func SetBytes(objId int32, keyId Key32, typeId int32, value []byte) {
host.SetBytes(objId, int32(keyId), typeId, value)
}
func SetClear(objId int32) {
bytes := make([]byte, 8)
SetBytes(objId, KeyLength, TYPE_INT, bytes)
}
|
package main
import(
"conn"
)
func main(){
conn.NewTcpServer("0.0.0.0", 55555)
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/12/28 8:51 上午
# @File : lt_38_外观数列.go
# @Description :
# @Attention :
*/
package hot100
import (
"strconv"
"strings"
)
/*
n=5的时候:
1
11
21
1211
111221
一步一步来
给一个数,这个数是1
描述上一步的数,这个数是 1 即一个1,故写作11
描述上一步的数,这个数是11即两个1,故写作21
描述上一步的数,这个数是21即一个2一个1,故写作12-11
描述上一步的数,这个数是1211即一个1一个2两个1,故写作11-12-21
*/
// 关键是计算 n中出现相同数字的次数
func countAndSay(n int) string {
// 初始为1
prev := "1"
count := 0
for i := 2; i <= n; i++ {
cur := strings.Builder{}
// start=j 可以使得跳到下一个不匹配的开始,如之前是 11234,则start=j 可以跳到2开始
for j, start := 0, 0; j < len(prev); start = j {
//开始计算次数
for j < len(prev) && prev[j] == prev[start] {
j++
count++
}
cur.WriteString(strconv.Itoa(count))
cur.WriteByte(prev[start])
count = 0
}
prev = cur.String()
}
return prev
}
|
package controllers
import (
"github.com/insisthzr/echo-test/cookbook/twitter/models"
"github.com/insisthzr/echo-test/cookbook/twitter/utils"
"github.com/dgrijalva/jwt-go"
"github.com/labstack/echo"
"gopkg.in/mgo.v2/bson"
)
func Signup(c echo.Context) error {
user := new(models.User)
err := c.Bind(user)
if err != nil {
return err
}
if user.Email == "" || user.Password == "" {
return c.String(401, "email or password is nil")
}
exist, err := models.UserExist(user.Email)
if err != nil {
return err
}
if exist {
return c.String(400, "user exist")
}
user.ID = bson.NewObjectId()
err = user.AddUser()
if err != nil {
return err
}
user.Password = ""
return c.JSON(201, user)
}
func Login(c echo.Context) error {
user := new(models.User)
err := c.Bind(user)
if err != nil {
return err
}
if user.Email == "" || user.Password == "" {
return c.String(400, "email or password is nil")
}
existUser, err := models.FindUserByEmail(user.Email)
if err != nil {
return err
}
if existUser == nil {
return c.String(400, "user not exist")
}
if user.Password != existUser.Password {
return c.String(400, "email or password is incorrect")
}
token := utils.NewToken(existUser.ID.String())
signedString, err := utils.NewSignedString(token)
if err != nil {
return err
}
existUser.Token = signedString
existUser.Password = "" // don't send password
return c.JSON(200, existUser)
}
func Follow(c echo.Context) error {
from := utils.UserIDFromToken(c.Get("user").(*jwt.Token))
to := c.Param("to")
err := models.AddFollower(bson.ObjectIdHex(to), from)
if err != nil {
return err
}
return c.JSON(200, map[string]string{
"from": from,
"to": to,
})
}
|
package mars
import (
"math/rand"
"sort"
)
type (
opcode int8
addressMode int8
instructionModifier int8
)
const (
insnDAT opcode = iota
insnMOV
insnADD
insnSUB
insnJMP
insnJMZ
insnDJN
insnCMP
insnSPL
)
const (
addrIMMEDIATE addressMode = iota
addrRELATIVE
addrINDIRECT
addrDECREMENTINDIRECT
)
const (
modifierAB instructionModifier = iota
modifierB
modifierI
modifierF
)
type location struct {
opcode opcode
modifier instructionModifier
aAddr, bAddr addressMode
aField, bField int
}
type process struct {
redcode *Redcode
index int
nextThread int
threads []int
}
// Core is a Redcode execution core
type Core struct {
size int
minInterval int
cells []location
processes []*process
winnerIndex int
running bool
runningCount int
}
// Redcode is a ready-to-run Redcode program
type Redcode struct {
name, filename, author string
start int
instructions []location
}
func (location *location) clampValues(coreSize int) {
location.aField = clampValue(coreSize, location.aField)
location.bField = clampValue(coreSize, location.bField)
}
func (program *Redcode) Name() string {
if program.name == "" {
return "Unknonw"
}
return program.name
}
func (program *Redcode) Author() string {
if program.author == "" {
return "Anonymous"
}
return program.author
}
func (program *Redcode) PrepareAddresses(coreSize int) {
instructions := program.instructions
for index := range instructions {
instructions[index].clampValues(coreSize)
}
}
// NewCore creates a new Core instance with the specified size
func NewCore(size int) *Core {
return &Core{
winnerIndex: -1,
running: true,
size: size,
minInterval: 100,
cells: make([]location, size),
}
}
func (core *Core) LoadPrograms(programs []*Redcode, rnd *rand.Rand) {
programCount := len(programs)
// compute available size
available := core.size
available -= core.minInterval * programCount
for _, program := range programs {
available -= len(program.instructions)
}
// generate random numbers summing to available
sequence := make([]int, programCount+1)
for i := 0; i < programCount-1; i++ {
sequence[i] = rnd.Intn(available)
}
sequence[programCount-1] = 0
sequence[programCount] = available
sort.Ints(sequence)
core.processes = make([]*process, 0, programCount)
for index, program := range programs {
core.processes = append(core.processes, &process{
redcode: program,
index: index,
nextThread: 0,
threads: []int{0}, // fixed later
})
}
// Fisher-Yates shuffle
for i := programCount - 1; i > 0; i-- {
j := rnd.Intn(i + 1)
core.processes[i], core.processes[j] = core.processes[j], core.processes[i]
}
// Load program instructions into the core
baseAddress := rnd.Intn(core.size)
for index, process := range core.processes {
program := process.redcode
interval := sequence[index+1] - sequence[index]
copy(core.cells[baseAddress:], program.instructions)
if baseAddress+len(program.instructions) > core.size {
copy(core.cells, program.instructions[core.size-baseAddress:])
}
process.threads[0] = core.clampValue(baseAddress + program.start)
baseAddress = (baseAddress + core.minInterval + len(program.instructions) + interval) % core.size
}
}
func (core *Core) Step() {
runningCount := 0
winnerIndex := 0
for index, process := range core.processes {
if len(process.threads) == 0 {
continue
}
process.step(core)
if len(process.threads) != 0 {
runningCount++
winnerIndex = index
}
}
if runningCount == 1 {
core.winnerIndex = winnerIndex
}
core.running = runningCount > 1
core.runningCount = runningCount
}
func (core *Core) IsComplete() bool {
return !core.running
}
func (core *Core) Winner() *Redcode {
if core.winnerIndex == -1 {
return nil
}
return core.processes[core.winnerIndex].redcode
}
func (core *Core) RunningCount() int {
return core.runningCount
}
func (core *Core) RunningPrograms() []*Redcode {
result := make([]*Redcode, 0, core.runningCount)
for _, process := range core.processes {
if len(process.threads) == 0 {
continue
}
result = append(result, process.redcode)
}
return result
}
func (core *Core) RunningProgramIndices() []int {
result := make([]int, 0, core.runningCount)
for _, process := range core.processes {
if len(process.threads) == 0 {
continue
}
result = append(result, process.index)
}
return result
}
func (core *Core) Run(maxCycles int) {
if !core.running {
panic("Can't call Run() twice")
}
for i := 0; i < maxCycles && core.running; i++ {
core.Step()
}
}
func (core *Core) address(base int, mode addressMode, field int) int {
if mode == addrRELATIVE {
address := core.clampValue(base + field)
return address
} else if mode == addrINDIRECT {
pointer := core.clampValue(base + field)
address := core.clampValue(pointer + core.cells[pointer].bField)
return address
} else if mode == addrDECREMENTINDIRECT {
pointer := core.clampValue(base + field)
address := core.clampValue(pointer + core.cells[pointer].bField - 1)
core.cells[pointer].bField = core.clampValue(core.cells[pointer].bField - 1)
return address
} else if mode == addrIMMEDIATE {
return base
}
panic("Illegal address mode")
}
func (core *Core) clampValue(address int) int {
return (address%core.size + core.size) % core.size
}
func clampValue(coreSize, address int) int {
return (address%coreSize + coreSize) % coreSize
}
|
package problems
func checkIfExist(arr []int) bool {
var set = make(map[int]bool)
for _, n := range arr {
if set[n*2] {
return true
}
if n%2 == 0 && set[n/2] {
return true
}
set[n] = true
}
return false
}
|
package handler
import (
"backend-github-trending/log"
"backend-github-trending/model"
req "backend-github-trending/model/req"
"backend-github-trending/repository"
"github.com/dgrijalva/jwt-go"
"github.com/google/uuid"
"github.com/labstack/echo/v4"
"net/http"
"strings"
)
type RepoHandler struct {
GithubRepo repository.GithubRepo
}
// RepoTrending godoc
// @Summary Get all repo Trending on
// @Tags -service
// @Accept json
// @Produce json
// @Success 200 {object} model.Response
// @Router /github/trending [GET]
// @return Repo trending
func (r RepoHandler) RepoTrending(c echo.Context) error {
token := c.Get("user").(*jwt.Token)
claims := token.Claims.(*model.JwtCustomClaims)
repos, _ := r.GithubRepo.SelectRepos(c.Request().Context(), claims.UserId, 25)
for i, repo := range repos {
repos[i].Contributors = strings.Split(repo.BuildBy, ",")
}
return c.JSON(http.StatusOK, model.Response{
StatusCode: http.StatusOK,
Message: "Xử lý thành công",
Data: repos,
})
}
// RepoTrending godoc
// @Summary Get all repo Trending on
// @Tags -service
// @Accept json
// @Produce json
// @Success 200 {object} model.Response
// @Router /github/trending [GET]
// @return Repo trending
func (r RepoHandler) SelectBookmarks(c echo.Context) error {
token := c.Get("user").(*jwt.Token)
claims := token.Claims.(*model.JwtCustomClaims)
repos, _ := r.GithubRepo.SelectAllBookmarks(c.Request().Context(), claims.UserId)
for i, repo := range repos {
repos[i].Contributors = strings.Split(repo.BuildBy, ",")
}
return c.JSON(http.StatusOK, model.Response{
StatusCode: http.StatusOK,
Message: "Success!",
Data: repos,
})
}
// InSertBookmark godoc
// @Summary Insert Repo to bookmark table
// @Tags -service
// @Accept json
// @Produce json
// @Param data body req.ReqBookmark true "GithubRepo"
// @Success 200 {object} model.Response
// @Success 403 {object} model.Response
// @Success 400 {object} model.Response
// @Success 500 {object} model.Response
// @Router /bookmark/add [POST]
func (r RepoHandler) InsertBookmark(c echo.Context) error {
req := req.ReqBookmark{}
if err := c.Bind(&req); err != nil {
return err
}
// validate thông tin gửi lên
err := c.Validate(req)
if err != nil {
return c.JSON(http.StatusBadRequest, model.Response{
StatusCode: http.StatusBadRequest,
Message: err.Error(),
})
}
token := c.Get("user").(*jwt.Token)
claims := token.Claims.(*model.JwtCustomClaims)
bId, err := uuid.NewUUID()
if err != nil {
log.Error(err.Error())
return c.JSON(http.StatusForbidden, model.Response{
StatusCode: http.StatusForbidden,
Message: err.Error(),
Data: nil,
})
}
err = r.GithubRepo.InsertBookmark(
c.Request().Context(),
bId.String(),
req.RepoName,
claims.UserId)
if err != nil {
return c.JSON(http.StatusInternalServerError, model.Response{
StatusCode: http.StatusInternalServerError,
Message: err.Error(),
Data: nil,
})
}
return c.JSON(http.StatusOK, model.Response{
StatusCode: http.StatusOK,
Message: "Bookmark thành công",
Data: nil,
})
}
// DelBookmark godoc
// @Summary Delete Repo to bookmark table
// @Tags -service
// @Accept json
// @Produce json
// @Param data body req.ReqBookmark true "GithubRepo"
// @Success 200 {object} model.Response
// @Success 400 {object} model.Response
// @Success 500 {object} model.Response
// @Router /bookmark/delete [DELETE]
func (r RepoHandler) DelBookmark(c echo.Context) error {
req := req.ReqBookmark{}
if err := c.Bind(&req); err != nil {
return err
}
// validate thông tin gửi lên
err := c.Validate(req)
if err != nil {
return c.JSON(http.StatusBadRequest, model.Response{
StatusCode: http.StatusBadRequest,
Message: err.Error(),
})
}
token := c.Get("user").(*jwt.Token)
claims := token.Claims.(*model.JwtCustomClaims)
err = r.GithubRepo.DelBookmark(
c.Request().Context(),
req.RepoName, claims.UserId)
if err != nil {
return c.JSON(http.StatusInternalServerError, model.Response{
StatusCode: http.StatusInternalServerError,
Message: err.Error(),
Data: nil,
})
}
return c.JSON(http.StatusOK, model.Response{
StatusCode: http.StatusOK,
Message: "Xoá bookmark thành công",
Data: nil,
})
} |
package main
import "fmt"
type BasicColor struct {
R float32
G float32
B float32
}
type Color struct {
Basic BasicColor
Alpha float32
}
func main() {
var c Color
c.Basic.R = 1
c.Basic.G = 1
c.Basic.B = 0
c.Alpha = 1
fmt.Printf("%+v", c)
}
|
package release
import (
"encoding/json"
"fmt"
"html/template"
"github.com/bosh-io/web/ui/nav"
bprel "github.com/cppforlife/bosh-provisioner/release"
semiver "github.com/cppforlife/go-semi-semantic/version"
"github.com/microcosm-cc/bluemonday"
"github.com/russross/blackfriday"
bhrelsrepo "github.com/bosh-io/web/release/releasesrepo"
)
type Release struct {
relVerRec bhrelsrepo.ReleaseVersionRec
Source Source
Name string
Version semiver.Version
IsLatest bool
CommitHash string
Jobs []Job
Packages []Package
Graph Graph
NavPrimary nav.Link
// memoized notes
notesInMarkdown *[]byte
}
type Graph interface {
SVG() template.HTML
}
type releaseAPIRecord struct {
Name string `json:"name"`
Version string `json:"version"`
URL string `json:"url"`
SHA1 string `json:"sha1"`
}
type ReleaseSorting []Release
func NewRelease(relVerRec bhrelsrepo.ReleaseVersionRec, r bprel.Release) Release {
rel := Release{
relVerRec: relVerRec,
Source: NewSource(relVerRec.AsSource()),
Name: r.Name,
Version: relVerRec.Version(),
CommitHash: r.CommitHash,
IsLatest: false,
}
rel.Jobs = NewJobs(r.Jobs, rel)
rel.Packages = NewPackages(r.Packages, rel)
return rel
}
func NewIncompleteRelease(relVerRec bhrelsrepo.ReleaseVersionRec, name string) Release {
return Release{
relVerRec: relVerRec,
Source: NewSource(relVerRec.AsSource()),
Name: name,
Version: relVerRec.Version(),
}
}
func (r Release) BuildNavigation(active string) nav.Link {
root := Navigation()
relnav := nav.Link{Title: r.Name}
relnav.Add(nav.Link{
Title: "All Versions",
URL: r.AllVersionsURL(),
})
relnav.Add(r.Navigation())
root.Add(relnav)
root.Activate(active)
return root
}
func (r Release) AllURL() string { return "/releases" }
func (r Release) AllVersionsURL() string {
return fmt.Sprintf("/releases/%s?all=1", r.Source)
}
func (r Release) AvatarURL() string { return r.relVerRec.AvatarURL() }
func (r Release) URL() string {
return fmt.Sprintf("/releases/%s?version=%s", r.Source, r.Version)
}
func (r Release) DownloadURL() string {
return fmt.Sprintf("/d/%s?v=%s", r.Source, r.Version)
}
func (r Release) UserVisibleDownloadURL() string {
// todo make domain configurable
return fmt.Sprintf("https://bosh.io/d/%s?v=%s", r.Source, r.Version)
}
func (r Release) UserVisibleLatestDownloadURL() string {
// todo make domain configurable
return fmt.Sprintf("https://bosh.io/d/%s", r.Source)
}
func (r Release) GraphURL() string { return r.URL() + "&graph=1" }
func (r Release) HasGithubURL() bool { return r.Source.FromGithub() }
func (r Release) GithubURL() string {
return r.GithubURLForPath("", "")
}
func (r Release) GithubURLOnMaster() string {
return r.GithubURLForPath("", "master")
}
func (r Release) GithubURLForPath(path, ref string) string {
if len(ref) > 0 {
// nothing
} else if len(r.CommitHash) > 0 {
ref = r.CommitHash
} else {
// Some releases might not have CommitHash
ref = "<missing>"
}
// e.g. https://github.com/cloudfoundry/cf-release/tree/1c96107/jobs/hm9000
return fmt.Sprintf("%s/tree/%s/%s", r.Source.GithubURL(), ref, path)
}
func (r Release) IsBOSH() bool { return r.Source.IsBOSH() }
func (r Release) IsCPI() bool { return r.Source.IsCPI() }
func (r Release) CPIDocsLink() template.HTML {
cpi, found := KnownCPIs.FindByShortName(r.Source.ShortName())
if found {
return template.HTML(cpi.DocPageLink())
}
return template.HTML("")
}
func (r Release) TarballSHA1() (string, error) {
relTarRec, err := r.relVerRec.Tarball()
if err != nil {
return "", err
}
return relTarRec.SHA1, nil
}
func (r *Release) NotesInMarkdown() (template.HTML, error) {
if r.notesInMarkdown == nil {
// Do not care about found -> no UI indicator
noteRec, _, err := r.relVerRec.Notes()
if err != nil {
return template.HTML(""), err
}
unsafeMarkdown := blackfriday.MarkdownCommon([]byte(noteRec.Content))
safeMarkdown := bluemonday.UGCPolicy().SanitizeBytes(unsafeMarkdown)
r.notesInMarkdown = &safeMarkdown
}
// todo sanitized markdown
return template.HTML(*r.notesInMarkdown), nil
}
func (r Release) MarshalJSON() ([]byte, error) {
sha1, err := r.TarballSHA1()
if err != nil {
return nil, err
}
record := releaseAPIRecord{
Name: r.Source.Full(),
Version: r.Version.AsString(),
URL: r.UserVisibleDownloadURL(),
SHA1: sha1,
}
return json.Marshal(record)
}
func (r Release) Navigation() nav.Link {
releaseNav := nav.Link{
Title: fmt.Sprintf("%s", r.Version),
URL: r.URL(),
}
releaseNav.Add(nav.Link{
Title: "Overview",
URL: r.URL(),
})
{
jobsNav := nav.Link{Title: "Jobs"}
for _, job := range r.Jobs {
jobsNav.Add(nav.Link{
Title: job.Name,
URL: job.URL(),
})
}
releaseNav.Add(jobsNav)
}
{
pkgsNav := nav.Link{Title: "Packages"}
for _, pkg := range r.Packages {
pkgsNav.Add(nav.Link{
Title: pkg.Name,
URL: pkg.URL(),
})
}
releaseNav.Add(pkgsNav)
}
return releaseNav
}
func (s ReleaseSorting) Len() int { return len(s) }
func (s ReleaseSorting) Less(i, j int) bool { return s[i].Version.IsLt(s[j].Version) }
func (s ReleaseSorting) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func parseVersion(versionRaw string) semiver.Version {
ver, err := semiver.NewVersionFromString(versionRaw)
if err != nil {
panic(fmt.Sprintf("Version '%s' is not valid: %s", versionRaw, err))
}
return ver
}
|
package main
import (
"flag"
"fmt"
"os"
log "github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
)
var argDebug = flag.Bool("debug", false, "run in debug mode")
var argLogJSON = flag.Bool("logjson", false, "output log in JSON format")
// Info is the info logger
var Info *log.Logger
// Error is the error logger
var Error *log.Logger
const logo = `
___. .__ __ .__ __ .__
\_ |__ |__|/ |_ ____ | | ____ ______ _____/ |_ _____ ______ |__| ____ ____
| __ \| \ __\/ ___\| | / _ \/ ___// __ \ __\ ______ \__ \ \____ \| | ______ / ___\ / _ \
| \_\ \ || | \ \___| |_( <_> )___ \\ ___/| | /_____/ / __ \| |_> > | /_____/ / /_/ > <_> )
|___ /__||__| \___ >____/\____/____ >\___ >__| (____ / __/|__| \___ / \____/
\/ \/ \/ \/ \/|__| /_____/
`
// BitClosetIndex returns the index for the bitcloset resource
func BitClosetIndex(c *gin.Context) {
c.String(200, "hello\n")
}
func main() {
flag.Parse()
// Print logo.
fmt.Print(logo)
// Initialize the Loggers. First Info Logger
Info = log.New()
Info.Out = os.Stdout
// Initialize Error Logger
Error = log.New()
Error.Out = os.Stderr
if *argDebug {
log.SetLevel(log.DebugLevel)
}
log.SetFormatter(&log.JSONFormatter{})
router := gin.Default()
v1 := router.Group("/api/v1/bitcloset")
{
v1.GET("/", BitClosetIndex)
}
router.Run()
}
|
package main
func main() {
}
func makeSmallestPalindrome(s string) string {
bs := []byte(s)
for left, right := 0, len(s)-1; left < right; left, right = left+1, right-1 {
if bs[left] != bs[right] {
if bs[left] < bs[right] {
bs[right] = bs[left]
} else {
bs[left] = bs[right]
}
}
}
return string(bs)
}
|
package adutils
import (
"fmt"
"log"
"os"
"strconv"
"time"
)
// exists returns whether the given file or directory exists or not
func Exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
//delete file if the error delay after 10s and try it again
func Unlink(file string) error {
count := 5
var ret error
for i := 0; i < count; i++ {
ret = os.Remove(file)
if ret == nil {
return nil
}
time.Sleep(10)
}
return ret
}
//rename file if error delay after 10s and try it again
func Rename(oldfile string, newfile string) error {
count := 5
for i := 0; i < count; i++ {
err := os.Rename(oldfile, newfile)
if err == nil {
return nil
}
log.Println(err)
log.Println("Rename error!")
time.Sleep(10)
}
return nil
}
func GetAdvFilename(index string) (string, int, error) {
i, err := strconv.Atoi(index)
if err != nil {
// handle error
fmt.Println(err)
return " ", 0, err
}
fmt.Println(index)
page, err := ContentParse()
if err != nil {
// handle error
fmt.Println(err)
return " ", 0, err
}
disLen := len(page.Display)
findex := i % disLen
if page.Display[findex].Type == "link" {
return page.Display[findex].Link, 1, nil
}
filename := "../static/" + page.Display[findex].Link
ret, _ := Exists(filename)
if ret != true {
fmt.Println(filename + "not exist in the file")
}
return filename, 0, nil
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
)
func main() {
resp, err := http.Get("http://httpbin.org/html")
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
result := make(map[string]interface{})
dec := json.NewDecoder(resp.Body)
if err := dec.Decode(&result); err != nil {
log.Fatal(err)
}
for key, value := range result {
fmt.Println("Key:", key, "Value:", value)
}
}
|
package trie
import (
"testing"
"github.com/gioui/uax/internal/tracing"
)
func TestEnterSimple(t *testing.T) {
tracing.SetTestingLog(t)
//
trie, _ := NewTinyHashTrie(139, 46)
p1 := trie.AllocPositionForWord([]byte{13, 20})
t.Logf("p=%d", p1)
p2 := trie.AllocPositionForWord([]byte{13, 21})
t.Logf("p=%d", p2)
if p2 != p1+1 {
t.Errorf("expected to be p1 and p2 consecutive, aren't: %d / %d", p1, p2)
}
p3 := trie.AllocPositionForWord([]byte{13, 21, 10})
t.Logf("p=%d", p3)
if p3 == p2 {
t.Errorf("expected p2 and p3 to be different, aren't")
}
}
func TestEnterZero(t *testing.T) {
tracing.SetTestingLog(t)
//
trie, _ := NewTinyHashTrie(139, 46)
p1 := trie.AllocPositionForWord([]byte{0, 0})
t.Logf("p=%d", p1)
if p1 == 0 {
t.Errorf("no entry for [0,0]")
}
}
func TestIterator(t *testing.T) {
tracing.SetTestingLog(t)
//
trie, _ := NewTinyHashTrie(139, 46)
word := []byte{13, 20}
p := trie.AllocPositionForWord(word)
t.Logf("p=%d\nFreeze----", p)
trie.Freeze()
q := trie.AllocPositionForWord(word)
t.Logf("lookup p=%d", q)
if p != q {
t.Fatalf("expected to find word again in trie, couldn't: %d != %d", q, p)
}
it := trie.Iterator()
for i, w := range word {
p = it.Next(int8(w))
if p == 0 {
t.Errorf("no position for byte #%d=%v", i, w)
}
t.Logf("p=%d", p)
}
if p != q {
t.Fatalf("expected to iterate to word in trie, couldn't: %d != %d", q, p)
}
}
|
package main
import (
"encoding/json"
"github.com/valyala/fasthttp"
)
// GetItems will return our items object
func GetItems(ctx *fasthttp.RequestCtx) {
enc := json.NewEncoder(ctx)
items := ReadItems()
enc.Encode(&items)
ctx.SetStatusCode(fasthttp.StatusOK)
}
// AddItems modifies our array
func AddItems(ctx *fasthttp.RequestCtx) {
item, ok := ctx.UserValue("item").(string)
if !ok {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
return
}
AddItem(item)
ctx.SetStatusCode(fasthttp.StatusOK)
}
|
package tbhandler
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
"github.com/narrowizard/tinysql"
)
var isNeedImport bool
// CreateModelAll 生成一个数据库的所有表结构
// dbName: 数据库名
func CreateModelAll(dbName string) {
dbName = strings.Trim(dbName, " ")
var tbVal = getAllTable(dbName)
var val = strings.Split(tbVal, "|||")
fmt.Println(dbName, " 数据库共有", len(val), "表")
for _, v := range val {
CreateModelOne(v, dbName)
}
}
// CreateModelOne 生成表结构
// tableName: 表名
// dbName: 数据库名
func CreateModelOne(tableName, dbName string) {
tableName = strings.Trim(tableName, " ")
dbName = strings.Trim(dbName, " ")
fmt.Print(tableName, " 表 ")
var dir = "../tablemodel/" + dbName
var directory, err = os.Open(dir)
if os.IsNotExist(err) { //判断文件夹是否存在
fmt.Println("文件夹不存在!")
err = os.MkdirAll(dir, 0777) //创建目录
if err != nil {
}
} else {
directory.Close()
}
//生成go文件
var path = dir + "/" + tableName + ".go"
//从数据库获取表字段数据
var val = getTableData(tableName, dbName)
//解析数据
var res = parseValue(val)
//获取表描述
var tbComment = getTableComment(tableName, dbName)
//替换模版字符串中的相应值
var strData = replaceTbName(tableName, tbComment, getStructModel())
strData = replaceStructValue(res, strData)
//将结果写入go文件
var data = []byte(strData)
er := ioutil.WriteFile(path, data, 0666)
if er != nil {
fmt.Println("writefile__err数据写入文件错误:---------", er)
}
}
// initTableName 初始化表名(首字母大写,下划线+小写字母替换成大写字母)
// tableName: 表名
// return: string 返回初始化后的名称
func initTableName(tableName string) string {
var reg = regexp.MustCompile(`^([a-z])|(_[a-z])+`)
var name = reg.ReplaceAllStringFunc(tableName, strings.ToUpper)
reg = regexp.MustCompile(`(_)+`)
return reg.ReplaceAllString(name, "")
}
// getTableData 连接数据库获取表字段数据
// tableName:表名称
// dbName:数据库名称
// return: string 返回数据库所有字段等数据
func getTableData(tableName, dbName string) string {
var builder = tinysql.Open(dbschemas).NewBuilder()
var sql = `select group_concat(column_name," *,* ",data_type,"*,* // ",replace(column_comment,char(10),"") SEPARATOR '|||')from columns where table_Name = ? and table_schema=?;`
var value = ""
_, err := builder.QuerySql(sql, tableName, dbName).Scan(&value)
if err != nil {
fmt.Println("sql_err查询出错:-------------", err)
}
return value
}
// getTableComment 连接数据库获取表的描述
// tableName:表名称
// dbName:数据库名称
// return: string 返回数据库表的描述
func getTableComment(tableName, dbName string) string {
var builder = tinysql.Open(dbschemas).NewBuilder()
var sql = `select replace(table_comment,char(10),"") from tables where table_name=? and table_schema=?;`
var val = ""
_, er := builder.QuerySql(sql, tableName, dbName).Scan(&val)
if er != nil {
fmt.Println("sql___er获取表描述出错!")
}
return val
}
// getAllTable 获取所有表名数据
// dbName: 数据库名称
// return: string 返回数据库所有表数据
func getAllTable(dbName string) string {
var builder = tinysql.Open(dbschemas).NewBuilder()
var sql = `select group_concat(table_name separator '|||') from information_schema.tables where table_schema=?;`
var value = ""
_, err := builder.QuerySql(sql, dbName).Scan(&value)
if err != nil {
fmt.Println("sql_err查询出错:-------------", err)
}
return value
}
// getStructModel 获取模型
// return: string 模型字符串
func getStructModel() string {
return structTemplate
}
// replaceTbName 在struct模版中替换表名称
// tbName: 表名称
// strModel: 结构体模版数据
// return: string 返回替换后的数据
func replaceTbName(tbName, tbComment string, strModel string) string {
var tName = initTableName(tbName)
if tbComment == "" {
tbComment = tName + " 表"
} else {
tbComment = tName + " " + tbComment
}
var reg = regexp.MustCompile(`tb_comment`)
strModel = reg.ReplaceAllString(strModel, tbComment)
reg = regexp.MustCompile(`import`)
if isNeedImport {
strModel = reg.ReplaceAllString(strModel, ` import "time" `)
isNeedImport = false
} else {
strModel = reg.ReplaceAllString(strModel, ` `)
}
reg = regexp.MustCompile(`tb_name`)
return reg.ReplaceAllString(strModel, tName)
}
// replaceStructValue 在struct模版中替换结构体内容数据
// value: 字段内容值
// strModel: 结构体模版字符串
// return: string 返回替换后的数据
func replaceStructValue(value, strModel string) string {
var reg = regexp.MustCompile(`value`)
return reg.ReplaceAllString(strModel, initTableName(value))
}
// parseValue 解析数据库表结构数据
// value:表结构数据
// return: string 返回解析后的数据
func parseValue(value string) string {
var result = ""
var columnVal = strings.Split(value, "|||")
fmt.Println("共有", len(columnVal), "个字段!")
for _, v := range columnVal {
var val = strings.Split(v, "*,*")
result += " \r\n "
for k1, v1 := range val {
switch k1 { //将字段类型转化成go语言相对应类型
case 0:
result += initTableName(v1)
case 1:
v1 = strings.ToLower(v1)
if strings.Contains(v1, "int") {
result += " int "
}
if strings.Contains(v1, "time") || strings.Contains(v1, "date") {
isNeedImport = true
result += " time.Time"
}
if strings.Contains(v1, "char") || strings.Contains(v1, "text") || strings.Contains(v1, "blob") {
result += " string "
}
if strings.Contains(v1, "decimal") || strings.Contains(v1, "float") || strings.Contains(v1, "double") {
result += " float64 "
}
case 2:
result += v1
}
}
}
return result
}
|
package main
wss
import (
"fmt"
"keepassapi/handler"
"keepassapi/helper"
"net/http"
"os"
"github.com/gorilla/mux"
)
func main() {
var port string
if len(os.Args) >= 3 {
port = os.Args[1]
helper.Keepassdbpath = os.Args[2]
} else {
port = os.Getenv("KEEPASS_PORT")
helper.Keepassdbpath = os.Getenv("KEEPASS_DBPATH")
}
if len(port) == 0 || len(helper.Keepassdbpath) == 0 {
fmt.Println("Usage: keepassapi <port> <dbpath>")
os.Exit(1)
}
r := mux.NewRouter()
r.Handle("/{path:.*}", handler.NewSimpleFilter(handler.Get)).Methods("GET")
fmt.Println("running at port:", port)
fmt.Println("keepass db path:", helper.Keepassdbpath)
http.ListenAndServe("0.0.0.0:"+port, r)
}
|
package controllers
import (
"golang.org/x/crypto/bcrypt"
"github.com/revel/revel"
"goblog/app/models"
)
type User struct {
App
}
func (c User) CheckUser() revel.Result {
switch c.MethodName {
case "Login", "CreateSession":
return nil
}
if c.CurrentUser == nil {
c.Flash.Error("Please log in first")
return c.Redirect(User.Login)
}
return nil
}
func (c User) Edit() revel.Result {
user := c.CurrentUser
return c.Render(user)
}
func (c User) Update(name, oldPassword, newPassword, newPasswordConfirm string) revel.Result {
if err := bcrypt.CompareHashAndPassword(c.CurrentUser.Password, []byte(oldPassword)); err != nil {
c.Flash.Error("Old password isn't valid.")
return c.Redirect(User.Edit)
}
var user models.User
c.Txn.First(&user, c.CurrentUser.Id)
user.Name = name
if newPassword != "" && newPasswordConfirm != "" {
if newPassword == newPasswordConfirm {
bcryptPassword, _ := bcrypt.GenerateFromPassword([]byte(newPassword), bcrypt.DefaultCost)
user.Password = bcryptPassword
} else {
c.Flash.Error("Password doesn't match the confirmation.")
return c.Redirect(User.Edit)
}
}
c.Txn.Save(&user)
return c.Redirect(Home.Index)
}
func (c User) Login() revel.Result {
return c.Render()
}
func (c User) CreateSession(username, password string) revel.Result {
var user models.User
c.Txn.Where(&models.User{Username: username}).First(&user)
err := bcrypt.CompareHashAndPassword(user.Password, []byte(password))
if err == nil {
authKey := revel.Sign(user.Username)
c.Session["authKey"] = authKey
c.Session["username"] = user.Username
c.Session["userId"] = string(user.Id)
if user.Role == "admin" {
c.Session["isAdmin"] = "true"
}
c.Flash.Success("Welcome, " + user.Name)
return c.Redirect(Post.Index)
}
// clear session
for k := range c.Session {
delete(c.Session, k)
}
c.Flash.Out["username"] = username
c.Flash.Error("Login failed")
return c.Redirect(Home.Index)
}
func (c User) DestroySession() revel.Result {
// clear session
for k := range c.Session {
delete(c.Session, k)
}
return c.Redirect(Home.Index)
}
|
/*
*
* pk.go
* schemas
*
* Created by lintao on 2020/5/18 3:39 下午
* Copyright © 2020-2020 LINTAO. All rights reserved.
*
*/
package schemas
import (
"bytes"
"encoding/gob"
"github.com/5xxxx/pie/utils"
)
type PK []interface{}
func NewPK(pks ...interface{}) *PK {
p := PK(pks)
return &p
}
func (p *PK) IsZero() bool {
for _, k := range *p {
if utils.IsZero(k) {
return true
}
}
return false
}
func (p *PK) ToString() (string, error) {
buf := new(bytes.Buffer)
enc := gob.NewEncoder(buf)
err := enc.Encode(*p)
return buf.String(), err
}
func (p *PK) FromString(content string) error {
dec := gob.NewDecoder(bytes.NewBufferString(content))
err := dec.Decode(p)
return err
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extension_test
import (
"context"
"encoding/binary"
"sort"
"strings"
"testing"
"time"
"github.com/pingcap/tidb/extension"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/server"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/sessionstates"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/types"
"github.com/stretchr/testify/require"
)
type stmtEventRecord struct {
tp extension.StmtEventTp
user *auth.UserIdentity
originalText string
redactText string
params []types.Datum
connInfo *variable.ConnectionInfo
sessionAlias string
err string
tables []stmtctx.TableEntry
affectedRows uint64
stmtNode ast.StmtNode
executeStmtNode *ast.ExecuteStmt
preparedNode ast.StmtNode
}
type sessionHandler struct {
records []stmtEventRecord
}
func (h *sessionHandler) OnStmtEvent(tp extension.StmtEventTp, info extension.StmtEventInfo) {
tables := make([]stmtctx.TableEntry, len(info.RelatedTables()))
copy(tables, info.RelatedTables())
redactText, _ := info.SQLDigest()
r := stmtEventRecord{
tp: tp,
user: info.User(),
originalText: info.OriginalText(),
redactText: redactText,
params: info.PreparedParams(),
connInfo: info.ConnectionInfo(),
sessionAlias: info.SessionAlias(),
tables: tables,
affectedRows: info.AffectedRows(),
stmtNode: info.StmtNode(),
executeStmtNode: info.ExecuteStmtNode(),
preparedNode: info.ExecutePreparedStmt(),
}
if err := info.GetError(); err != nil {
r.err = err.Error()
}
h.records = append(h.records, r)
}
func (h *sessionHandler) Reset() {
h.records = nil
}
func (h *sessionHandler) GetHandler() *extension.SessionHandler {
return &extension.SessionHandler{
OnStmtEvent: h.OnStmtEvent,
}
}
func registerHandler(t *testing.T) *sessionHandler {
h := &sessionHandler{}
err := extension.Register(
"test",
extension.WithSessionHandlerFactory(h.GetHandler),
)
require.NoError(t, err)
return h
}
func getPreparedID(t *testing.T, sctx sessionctx.Context) uint32 {
sessStates := &sessionstates.SessionStates{}
require.NoError(t, sctx.GetSessionVars().EncodeSessionStates(context.Background(), sessStates))
return sessStates.PreparedStmtID
}
type stmtEventCase struct {
sql string
binaryExecute uint32
executeParams []paramInfo
err string
originalText string
redactText string
affectedRows uint64
tables []stmtctx.TableEntry
parseError bool
prepareNotFound bool
multiQueryCases []stmtEventCase
dispatchData []byte
sessionAlias string
}
func TestExtensionStmtEvents(t *testing.T) {
defer extension.Reset()
extension.Reset()
h := registerHandler(t)
require.NoError(t, extension.Setup())
store := testkit.CreateMockStore(t)
serv := server.CreateMockServer(t, store)
defer serv.Close()
conn := server.CreateMockConn(t, serv)
defer conn.Close()
require.NoError(t, conn.HandleQuery(context.Background(), "SET tidb_enable_non_prepared_plan_cache=0")) // sctx.InMultiStmts cannot be set correctly in this UT.
require.NoError(t, conn.HandleQuery(context.Background(), "SET tidb_multi_statement_mode='ON'"))
require.NoError(t, conn.HandleQuery(context.Background(), "use test"))
require.NoError(t, conn.HandleQuery(context.Background(), "create table t1(a int, b int)"))
require.NoError(t, conn.HandleQuery(context.Background(), "create table t2(id int primary key)"))
require.NoError(t, conn.HandleQuery(context.Background(), "create database test2"))
require.NoError(t, conn.HandleQuery(context.Background(), "create table test2.t1(c int, d int)"))
require.NoError(t, conn.HandleQuery(context.Background(), "set @a=1"))
require.NoError(t, conn.HandleQuery(context.Background(), "set @b=2"))
cmd := append([]byte{mysql.ComStmtPrepare}, []byte("select ?")...)
require.NoError(t, conn.Dispatch(context.Background(), cmd))
stmtID1 := getPreparedID(t, conn.Context())
cmd = append(
[]byte{mysql.ComStmtPrepare},
[]byte("select a, b from t1 left join test2.t1 as t2 on t2.c = t1.a where t1.a = 3 and t1.b = ? and t2.d = ?")...)
require.NoError(t, conn.Dispatch(context.Background(), cmd))
stmtID2 := getPreparedID(t, conn.Context())
require.NoError(t, conn.HandleQuery(context.Background(), "create table tnoexist(n int)"))
cmd = append([]byte{mysql.ComStmtPrepare}, []byte("select * from tnoexist where n=?")...)
require.NoError(t, conn.Dispatch(context.Background(), cmd))
stmtID3 := getPreparedID(t, conn.Context())
require.NoError(t, conn.HandleQuery(context.Background(), "drop table tnoexist"))
cmd = append([]byte{mysql.ComStmtPrepare}, []byte("insert into t2 values(?)")...)
require.NoError(t, conn.Dispatch(context.Background(), cmd))
stmtID4 := getPreparedID(t, conn.Context())
connID := conn.Context().Session.GetSessionVars().ConnectionID
require.NotEqual(t, uint64(0), connID)
cases := []stmtEventCase{
{
sql: "select 1",
redactText: "select ?",
},
{
sql: "invalid sql",
parseError: true,
err: "[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 7 near \"invalid sql\" ",
},
{
binaryExecute: stmtID1,
executeParams: []paramInfo{
{value: 7},
},
originalText: "select ?",
redactText: "select ?",
},
{
sql: "select a, b from t1 where a > 1 and b < 2",
redactText: "select `a` , `b` from `t1` where `a` > ? and `b` < ?",
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t1"},
},
},
{
sql: "insert into t2 values(1)",
redactText: "insert into `t2` values ( ? )",
affectedRows: 1,
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t2"},
},
},
{
binaryExecute: stmtID2,
executeParams: []paramInfo{
{value: 3},
{value: 4},
},
originalText: "select a, b from t1 left join test2.t1 as t2 on t2.c = t1.a where t1.a = 3 and t1.b = ? and t2.d = ?",
redactText: "select `a` , `b` from `t1` left join `test2` . `t1` as `t2` on `t2` . `c` = `t1` . `a` where `t1` . `a` = ? and `t1` . `b` = ? and `t2` . `d` = ?",
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t1"},
{DB: "test2", Table: "t1"},
},
},
{
binaryExecute: stmtID3,
executeParams: []paramInfo{
{value: 5},
},
originalText: "select * from tnoexist where n=?",
redactText: "select * from `tnoexist` where `n` = ?",
tables: []stmtctx.TableEntry{
{DB: "test", Table: "tnoexist"},
},
err: "select * from tnoexist where n=? [arguments: 5]: [planner:8113]Schema change caused error: [schema:1146]Table 'test.tnoexist' doesn't exist",
},
{
binaryExecute: stmtID4,
executeParams: []paramInfo{
{value: 3},
},
originalText: "insert into t2 values(?)",
redactText: "insert into `t2` values ( ? )",
affectedRows: 1,
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t2"},
},
},
{
sql: "prepare s from 'select * from t1 where a=1 and b>? and b<?'",
redactText: "prepare `s` from ?",
},
{
sql: "execute s using @a, @b",
originalText: "select * from t1 where a=1 and b>? and b<?",
redactText: "select * from `t1` where `a` = ? and `b` > ? and `b` < ?",
executeParams: []paramInfo{
{value: 1},
{value: 2},
},
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t1"},
},
},
{
sql: "execute sn using @a, @b",
redactText: "execute `sn` using @a , @b",
executeParams: []paramInfo{
{value: 1},
{value: 2},
},
prepareNotFound: true,
err: "[planner:8111]Prepared statement not found",
},
{
sql: "insert into t1 values(1, 10), (2, 20)",
redactText: "insert into `t1` values ( ... )",
affectedRows: 2,
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t1"},
},
},
{
sql: "insert into t2 values(1)",
redactText: "insert into `t2` values ( ? )",
affectedRows: 0,
err: "[kv:1062]Duplicate entry '1' for key 't2.PRIMARY'",
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t2"},
},
},
{
sql: "select 1;select * from t1 where a > 1",
multiQueryCases: []stmtEventCase{
{
originalText: "select 1;",
redactText: "select ?",
},
{
originalText: "select * from t1 where a > 1",
redactText: "select * from `t1` where `a` > ?",
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t1"},
},
},
},
},
{
binaryExecute: stmtID4,
executeParams: []paramInfo{
{value: 3},
},
err: "insert into t2 values(?) [arguments: 3]: [kv:1062]Duplicate entry '3' for key 't2.PRIMARY'",
originalText: "insert into t2 values(?)",
redactText: "insert into `t2` values ( ? )",
affectedRows: 0,
tables: []stmtctx.TableEntry{
{DB: "test", Table: "t2"},
},
},
{
sql: "create database db1",
redactText: "create database `db1`",
tables: []stmtctx.TableEntry{
{DB: "db1", Table: ""},
},
},
{
sql: "kill query 1",
redactText: "kill query ?",
},
{
sql: "create placement policy p1 followers=1",
redactText: "create placement policy `p1` followers = ?",
},
{
dispatchData: append([]byte{mysql.ComInitDB}, []byte("db1")...),
originalText: "use `db1`",
redactText: "use `db1`",
},
{
dispatchData: append([]byte{mysql.ComInitDB}, []byte("noexistdb")...),
originalText: "use `noexistdb`",
redactText: "use `noexistdb`",
err: "[schema:1049]Unknown database 'noexistdb'",
},
{
sql: "set @@tidb_session_alias='alias123'",
redactText: "set @@tidb_session_alias = ?",
sessionAlias: "alias123",
},
{
sql: "select 123",
redactText: "select ?",
sessionAlias: "alias123",
},
{
sql: "set @@tidb_session_alias=''",
redactText: "set @@tidb_session_alias = ?",
sessionAlias: "",
},
{
sql: "select 123",
redactText: "select ?",
sessionAlias: "",
},
}
for i, c := range cases {
h.Reset()
conn.Context().SetProcessInfo("", time.Now(), mysql.ComSleep, 0)
var err error
switch {
case c.sql != "":
err = conn.HandleQuery(context.Background(), c.sql)
if c.originalText == "" {
c.originalText = c.sql
}
if c.redactText == "" {
c.redactText = c.sql
}
case c.binaryExecute != 0:
err = conn.Dispatch(context.Background(), getExecuteBytes(c.binaryExecute, false, true, c.executeParams...))
case c.dispatchData != nil:
err = conn.Dispatch(context.Background(), c.dispatchData)
}
if c.err != "" {
require.EqualError(t, err, c.err)
} else {
require.NoError(t, err)
}
subCases := c.multiQueryCases
if subCases == nil {
subCases = []stmtEventCase{c}
}
require.Equal(t, len(subCases), len(h.records), "%d", i)
for j, subCase := range subCases {
record := h.records[j]
if subCase.err != "" {
require.Equal(t, subCase.err, record.err)
require.Equal(t, extension.StmtError, record.tp)
} else {
require.Empty(t, record.err)
require.Equal(t, extension.StmtSuccess, record.tp)
}
require.NotNil(t, record.connInfo)
if subCase.parseError {
require.Nil(t, record.stmtNode)
require.Nil(t, record.executeStmtNode)
require.Nil(t, record.preparedNode)
} else {
require.NotNil(t, record.stmtNode)
if subCase.binaryExecute != 0 || strings.HasPrefix(strings.ToLower(subCase.sql), "execute ") {
require.NotNil(t, record.executeStmtNode)
require.Equal(t, record.stmtNode, record.executeStmtNode)
if c.prepareNotFound {
require.Nil(t, record.preparedNode)
} else {
require.NotNil(t, record.preparedNode)
require.NotEqual(t, record.preparedNode, record.executeStmtNode)
}
} else {
require.Nil(t, record.executeStmtNode)
require.Nil(t, record.preparedNode)
}
}
require.Equal(t, connID, record.connInfo.ConnectionID)
require.Equal(t, "root", record.user.Username)
require.Equal(t, "localhost", record.user.Hostname)
require.Equal(t, "root", record.user.AuthUsername)
require.Equal(t, "%", record.user.AuthHostname)
require.Equal(t, subCase.sessionAlias, record.sessionAlias)
require.Equal(t, subCase.originalText, record.originalText)
require.Equal(t, subCase.redactText, record.redactText)
require.Equal(t, subCase.affectedRows, record.affectedRows)
if subCase.tables == nil {
subCase.tables = []stmtctx.TableEntry{}
}
sort.Slice(subCase.tables, func(i, j int) bool {
l := subCase.tables[i]
r := subCase.tables[j]
return l.DB < r.DB || (l.DB == r.DB && l.Table < r.Table)
})
sort.Slice(record.tables, func(i, j int) bool {
l := record.tables[i]
r := record.tables[j]
return l.DB < r.DB || (l.DB == r.DB && l.Table < r.Table)
})
require.Equal(t, subCase.tables, record.tables)
require.Equal(t, len(subCase.executeParams), len(record.params))
for k, param := range subCase.executeParams {
require.Equal(t, uint64(param.value), record.params[k].GetUint64())
}
}
}
}
type paramInfo struct {
value uint32
isNull bool
}
// create bytes for COM_STMT_EXECUTE. It only supports int type for convenience.
func getExecuteBytes(stmtID uint32, useCursor bool, newParam bool, params ...paramInfo) []byte {
nullBitmapLen := (len(params) + 7) >> 3
buf := make([]byte, 11+nullBitmapLen+len(params)*6)
pos := 0
buf[pos] = mysql.ComStmtExecute
pos++
binary.LittleEndian.PutUint32(buf[pos:], stmtID)
pos += 4
if useCursor {
buf[pos] = 1
}
pos++
binary.LittleEndian.PutUint32(buf[pos:], 1)
pos += 4
for i, param := range params {
if param.isNull {
buf[pos+(i>>3)] |= 1 << (i % 8)
}
}
pos += nullBitmapLen
if newParam {
buf[pos] = 1
pos++
for i := 0; i < len(params); i++ {
buf[pos] = mysql.TypeLong
pos++
buf[pos] = 0
pos++
}
} else {
buf[pos] = 0
pos++
}
for _, param := range params {
if !param.isNull {
binary.LittleEndian.PutUint32(buf[pos:], param.value)
pos += 4
}
}
return buf[:pos]
}
|
// +build !release,!nodebug
package errutil
import (
"fmt"
"testing"
)
func TestBug(t *testing.T) {
format := "horrendous error %d %d"
data := []interface{}{5, 7}
err := recovered(func() { Bug(format, data...) })
if err == nil {
t.Fatal("expected Bug() to happen, but it hasn't")
}
if want, got := fmt.Sprintf("BUG: "+format, data...), err.Error(); want != got {
t.Errorf("unexpected error value, want = %q, got = %q", want, got)
}
}
func TestBugOn(t *testing.T) {
var tests = []struct {
cond bool
format string
data []interface{}
}{
{false, "this one should pass", nil},
{true, "dreadful situation %v", []interface{}{"boo"}},
{true, "w/o arguments", nil},
}
for _, test := range tests {
err := recovered(func() { BugOn(test.cond, test.format, test.data...) })
if !test.cond {
if err != nil {
t.Fatalf("expected BugOn(%v, %v, %v) to happen, but it hasn't",
test.cond, test.format, test.data)
}
continue
}
if want, got := fmt.Sprintf("BUG: "+test.format, test.data...), err.Error(); want != got {
t.Errorf("unexpected error value, want = %q, got = %q", want, got)
}
}
}
|
package tkapi
//淘抢购api
import (
"bytes"
"encoding/json"
"errors"
"github.com/mrxiaojie/taobaoke"
)
type JuTqg struct {
ReqParam JuTqgParam
}
//请求参数
type JuTqgParam struct {
AdZoneId int
Fields string
StartTime string
EndTime string
PageNo int
PageSize int
}
//初始化api
func (t *JuTqg) Init() {
t.ReqParam.AdZoneId = 0 //推广位id 需要在淘宝联盟后台创建;且属于appkey备案的媒体id(siteid)下的推广位管理中的,例如:mm_400001_1111111_22222 ,这个22222就是推广位ID
t.ReqParam.Fields = "click_url,pic_url,reserve_price,zk_final_price,total_amount,sold_num,title,category_name,start_time,end_time" //链接形式:1:PC,2:无线,默认:1
t.ReqParam.StartTime = "" //最早开团时间 例如:2016-08-09 09:00:00
t.ReqParam.EndTime = "" //最晚开团时间 例如:2016-08-09 16:00:00
t.ReqParam.PageNo = 1 //第几页,默认1,1~100
t.ReqParam.PageSize = 40 //页大小,默认40,1~40
}
func (t *JuTqg) GetParam() map[string]interface{} {
paramMap := make(map[string]interface{})
paramMap["adzone_id"] = t.ReqParam.AdZoneId
paramMap["fields"] = t.ReqParam.Fields
paramMap["start_time"] = t.ReqParam.StartTime
paramMap["end_time"] = t.ReqParam.EndTime
paramMap["page_no"] = t.ReqParam.PageNo
paramMap["page_size"] = t.ReqParam.PageNo
return paramMap
}
func (t *JuTqg) ApiName() (s string) {
return "taobao.tbk.ju.tqg.get"
}
func (t *JuTqg) Run(appKey string,appSecret string,resIsMap bool) (interface{},error) {
if t.ReqParam.AdZoneId <1 {
return make(map[string]string) , errors.New("推广位ID未设置")
}
if t.ReqParam.Fields == "" {
return make(map[string]string) , errors.New("返回字段fields未设置")
}
if t.ReqParam.StartTime == "" {
return make(map[string]string) , errors.New("最早开团时间未设置")
}
if t.ReqParam.EndTime == "" {
return make(map[string]string) , errors.New("最晚开团时间未设置")
}
TopClient := taobaoke.TopClient{}
TopClient.SetConf(appKey,appSecret)
respByte,err :=TopClient.Exec(t)
if resIsMap {
mapDate := make(map[string]interface{})
json.NewDecoder(bytes.NewBuffer(respByte)).Decode(&mapDate)
return mapDate,err
}
return respByte,err
} |
package main
import "fmt"
func main() {
return
fmt.Println("hello")
}
//return
//return 使用在方法或者函数中,表示跳出所在方法或函数,用在main函数中表示,结束运行
|
package main
import "fmt"
//结构体内部属性的初始化=======
type Stu struct {
Name string
Age int
}
//实现String()
func (s *Stu) String() string {
str := fmt.Sprintf("Stu{Name=%v,Age=%v}", s.Name, s.Age)
return str
}
func main() {
var stu1 = Stu{"tom", 18}
stu2 := Stu{"mark", 28}
var stu3 = Stu{Name: "any", Age: 38}
stu4 := Stu{Name: "jack", Age: 48}
//&stu4: Stu{Name=jack,Age=48}
fmt.Println(stu1, stu2, stu3, &stu4)
//获取指针类型的结构体,并直接初始化-----------------------------------------
var stu5 *Stu = &Stu{"aaa", 10}
stu6 := &Stu{"bbb", 20}
var stu7 = &Stu{"ccc", 30}
stu8 := &Stu{"ddd", 40}
//[注意:] 如果没有实现String(),则需要"*"获取指向值!
fmt.Println(stu5, stu6, stu7, stu8)
}
|
package main
import(
"manager"
"manager/stmanager"
"manager/accmanager"
"fmt"
"time"
"flag"
)
func Call(m manager.Manager) {
m.Process()
}
func main() {
var m manager.Manager
start := time.Now()
t := flag.String("t", "list", "stock manager type")
flag.Parse()
fmt.Println(*t, t)
switch *t {
case "list": //get stock list
m = manager.NewStockListManager()
case "nlist": //get new stock list
m = stmanager.NewListManager()
case "compdata": //get company data
m = stmanager.NewCompanyManager()
case "strtdata": //get stock daily data
m = manager.NewStockRtDataManager()
case "sthdata": //get stock historical data
m = manager.NewStockHistDataManager()
case "nsthdata": //get new stock historical data
m = stmanager.NewStockHistDataManager()
case "accdata": //get account data
m = accmanager.NewAccountManager()
}
Call(m)
duration := time.Since(start)
fmt.Printf("Complete to process: %v s\n", duration.Seconds())
}
|
package primitives_test
import (
"github.com/plandem/xlsx/internal/ml/primitives"
"github.com/stretchr/testify/require"
"testing"
)
func TestCelRef(t *testing.T) {
require.Equal(t, primitives.CellRef(""), primitives.CellRefFromIndexes(-1, -1))
require.Equal(t, primitives.CellRef(""), primitives.CellRefFromIndexes(0, -1))
require.Equal(t, primitives.CellRef(""), primitives.CellRefFromIndexes(-1, 0))
require.Equal(t, primitives.CellRef("A1"), primitives.CellRefFromIndexes(0, 0))
ref := primitives.CellRefFromIndexes(100, 100)
require.Equal(t, primitives.CellRef("CW101"), ref)
col, row := ref.ToIndexes()
require.Equal(t, 100, col)
require.Equal(t, 100, row)
}
|
// Copyright (c) 2015 Andrea Masi. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE.txt file.
// Package middle exposes functions useful
// building http services.
package middle
import (
"log"
"math/rand"
"net/http"
"sync"
"time"
)
const (
authCookieName = "session-id"
)
// FIXME drop sessions every x time or memory leak here.
var sessions map[string]struct{} = make(map[string]struct{})
var sessionsMut sync.RWMutex
// Authorizer models credentials verification
// to permit different backends and hash algorithms.
// Implementation MUST be concurrency safe.
type Authorizer interface {
// Verify uses its backend to verify password
// for a given username.
Verify(user, passw string) bool
}
// CORS adds necessary headers to response
// to permit GET/POST CORS requests.
func CORS(next http.Handler) http.HandlerFunc {
// BUG(eraclitux) fully implement CORS.
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
next.ServeHTTP(w, r)
}
}
// Log calls Println on logger
// with following arguments:
//
// <http method> <remote addr> <requested url>
//
// If X-Real-IP is found in headers it is used as <remote addr>
// with (X-Real-IP) added.
func Log(logger *log.Logger, next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
remoteAddr := r.Header.Get("X-Real-IP")
if remoteAddr == "" {
remoteAddr = r.RemoteAddr
} else {
// FIXME write a benchmark to check if is worth string concatenation
// optimization using []byte.
remoteAddr += " (X-Real-IP)"
}
logger.Println(r.Method, remoteAddr, r.URL)
next.ServeHTTP(w, r)
}
}
// Auth checks if request is authenticated with basic auth verifying
// that its cookie is present in registered sessions.
// If request if from browser it will prompt for credentials
// with no valid session.
//
// BUG(eraclitux) session storage leaks.
func Auth(authorizer Authorizer, next http.Handler) http.HandlerFunc {
// Heavily inspired by:
// https://github.com/syncthing/syncthing/blob/161326c5489d000972a6846564f0ce12779bd8f2/cmd/syncthing/gui_auth.go
return func(w http.ResponseWriter, r *http.Request) {
cookie, err := r.Cookie(authCookieName)
if err == nil && cookie != nil {
// FIXME use RWMutex
sessionsMut.Lock()
_, ok := sessions[cookie.Value]
sessionsMut.Unlock()
if ok {
next.ServeHTTP(w, r)
return
}
}
error := func() {
// Mitigate risk of timing attacks.
// https://en.wikipedia.org/wiki/Timing_attack
// FIXME use crypto/rand
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
w.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
}
username, passwd, ok := r.BasicAuth()
if !ok {
error()
return
}
if !authorizer.Verify(username, passwd) {
error()
return
}
sessionid := randomString(32)
sessionsMut.Lock()
sessions[sessionid] = struct{}{}
sessionsMut.Unlock()
http.SetCookie(w, &http.Cookie{
Name: authCookieName,
Value: sessionid,
MaxAge: 0,
})
next.ServeHTTP(w, r)
}
}
|
package accountstable
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"database/sql"
"encoding/hex"
"os"
"reflect"
"testing"
"github.com/SIGBlockchain/project_aurum/internal/accountinfo"
"github.com/SIGBlockchain/project_aurum/internal/constants"
"github.com/SIGBlockchain/project_aurum/internal/contracts"
"github.com/SIGBlockchain/project_aurum/internal/hashing"
"github.com/SIGBlockchain/project_aurum/internal/publickey"
"github.com/SIGBlockchain/project_aurum/internal/sqlstatements"
_ "github.com/mattn/go-sqlite3"
)
func TestInsertAccountIntoAccountBalanceTable(t *testing.T) {
somePrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
dbName := constants.AccountsTable
dbc, _ := sql.Open("sqlite3", dbName)
defer func() {
err := dbc.Close()
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
err = os.Remove(dbName)
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
}()
statement, _ := dbc.Prepare(sqlstatements.CREATE_ACCOUNT_BALANCES_TABLE)
statement.Exec()
type args struct {
dbConnection *sql.DB
pkhash []byte
value uint64
}
encodedSomePublicKey, _ := publickey.Encode(&somePrivateKey.PublicKey)
tests := []struct {
name string
args args
wantErr bool
}{
{
args: args{
dbc,
hashing.New(encodedSomePublicKey),
1000,
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := InsertAccountIntoAccountBalanceTable(tt.args.dbConnection, tt.args.pkhash, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("InsertAccountIntoAccountBalanceTable() error = %v, wantErr %v", err, tt.wantErr)
}
var pkhash string
var balance uint64
var nonce uint64
rows, err := dbc.Query(sqlstatements.GET_PUB_KEY_HASH_BALANCE_NONCE_FROM_ACCOUNT_BALANCES)
if err != nil {
t.Errorf("Failed to acquire rows from table")
}
for rows.Next() {
err = rows.Scan(&pkhash, &balance, &nonce)
if err != nil {
t.Errorf("failed to scan rows: %s", err)
}
decodedPkhash, err := hex.DecodeString(pkhash)
if err != nil {
t.Errorf("failed to decode public key hash")
}
if bytes.Equal(decodedPkhash, hashing.New(encodedSomePublicKey)) {
if balance != 1000 {
t.Errorf("Invalid balance: %d", balance)
}
if nonce != 0 {
t.Errorf("Invalid nonce: %d", nonce)
}
}
}
})
}
}
func TestExchangeAndUpdateAccounts(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedSenderPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
spkh := hashing.New(encodedSenderPublicKey)
recipientPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedRecipientPublicKey, _ := publickey.Encode(&recipientPrivateKey.PublicKey)
rpkh := hashing.New(encodedRecipientPublicKey)
dbName := constants.AccountsTable
dbc, _ := sql.Open("sqlite3", dbName)
defer func() {
err := dbc.Close()
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
err = os.Remove(dbName)
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
}()
statement, _ := dbc.Prepare(sqlstatements.CREATE_ACCOUNT_BALANCES_TABLE)
statement.Exec()
err := InsertAccountIntoAccountBalanceTable(dbc, spkh, 1000)
if err != nil {
t.Errorf("failed to insert sender account")
}
err = InsertAccountIntoAccountBalanceTable(dbc, rpkh, 1000)
if err != nil {
t.Errorf("failed to insert sender account")
}
c, err := contracts.New(1, senderPrivateKey, rpkh, 250, 1)
if err != nil {
t.Errorf("failed to create new contract")
}
err = c.Sign(senderPrivateKey)
if err != nil {
t.Errorf("failed to sign contract")
}
type args struct {
dbConnection *sql.DB
contract *contracts.Contract
}
tests := []struct {
name string
args args
wantErr bool
}{
{
args: args{
dbConnection: dbc,
contract: c,
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ExchangeAndUpdateAccounts(tt.args.dbConnection, tt.args.contract); (err != nil) != tt.wantErr {
t.Errorf("ExchangeAndUpdateAccounts() error = %v, wantErr %v", err, tt.wantErr)
var pkhash string
var balance uint64
var nonce uint64
rows, err := dbc.Query(sqlstatements.GET_PUB_KEY_HASH_BALANCE_NONCE_FROM_ACCOUNT_BALANCES)
if err != nil {
t.Errorf("Failed to acquire rows from table")
}
for rows.Next() {
err = rows.Scan(&pkhash, &balance, &nonce)
if err != nil {
t.Errorf("failed to scan rows: %s", err)
}
decodedPkhash, err := hex.DecodeString(pkhash)
if err != nil {
t.Errorf("failed to decode public key hash")
}
if bytes.Equal(decodedPkhash, spkh) {
if balance != 750 {
t.Errorf("Invalid sender balance: %d", balance)
}
if nonce != 1 {
t.Errorf("Invalid sender nonce: %d", nonce)
}
} else if bytes.Equal(decodedPkhash, rpkh) {
if balance != 1250 {
t.Errorf("Invalid recipient balance: %d", balance)
}
if nonce != 1 {
t.Errorf("Invalid recipient nonce: %d", nonce)
}
}
}
}
})
}
}
func TestMintAurumUpdateAccountBalanceTable(t *testing.T) {
somePrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedSomePublicKey, _ := publickey.Encode(&somePrivateKey.PublicKey)
spkh := hashing.New(encodedSomePublicKey)
dbName := constants.AccountsTable
dbc, _ := sql.Open("sqlite3", dbName)
defer func() {
err := dbc.Close()
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
err = os.Remove(dbName)
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
}()
statement, _ := dbc.Prepare(sqlstatements.CREATE_ACCOUNT_BALANCES_TABLE)
statement.Exec()
err := InsertAccountIntoAccountBalanceTable(dbc, spkh, 1000)
if err != nil {
t.Errorf("failed to insert account into balance table")
}
type args struct {
dbConnection *sql.DB
pkhash []byte
value uint64
}
tests := []struct {
name string
args args
wantErr bool
}{
{
args: args{
dbConnection: dbc,
pkhash: spkh,
value: 1500,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := MintAurumUpdateAccountBalanceTable(tt.args.dbConnection, tt.args.pkhash, tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("MintAurumUpdateAccountBalanceTable() error = %v, wantErr %v", err, tt.wantErr)
var pkhash string
var balance uint64
var nonce uint64
rows, err := dbc.Query(sqlstatements.GET_PUB_KEY_HASH_BALANCE_NONCE_FROM_ACCOUNT_BALANCES)
if err != nil {
t.Errorf("Failed to acquire rows from table")
}
for rows.Next() {
err = rows.Scan(&pkhash, &balance, &nonce)
if err != nil {
t.Errorf("failed to scan rows: %s", err)
}
decodedPkhash, err := hex.DecodeString(pkhash)
if err != nil {
t.Errorf("failed to decode public key hash")
}
if bytes.Equal(decodedPkhash, spkh) {
if balance != 2500 {
t.Errorf("Invalid balance: %d", balance)
}
if nonce != 1 {
t.Errorf("Invalid nonce: %d", nonce)
}
}
}
}
})
}
}
func TestGetBalance(t *testing.T) {
somePrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedSomePublicKey, _ := publickey.Encode(&somePrivateKey.PublicKey)
spkh := hashing.New(encodedSomePublicKey)
dbName := constants.AccountsTable
dbc, _ := sql.Open("sqlite3", dbName)
defer func() {
err := dbc.Close()
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
err = os.Remove(dbName)
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
}()
statement, _ := dbc.Prepare(sqlstatements.CREATE_ACCOUNT_BALANCES_TABLE)
statement.Exec()
err := InsertAccountIntoAccountBalanceTable(dbc, spkh, 1000)
if err != nil {
t.Errorf("failed to insert sender account")
}
type args struct {
pkhash []byte
}
tests := []struct {
name string
args args
want uint64
wantErr bool
}{
{
args: args{spkh},
want: 1000,
wantErr: false,
},
{
args: args{[]byte("doesn't exist in table")},
want: 0,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetBalance(dbc, tt.args.pkhash)
if (err != nil) != tt.wantErr {
t.Errorf("GetBalance() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("GetBalance() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetStateNonce(t *testing.T) {
somePrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedSomePublicKey, _ := publickey.Encode(&somePrivateKey.PublicKey)
spkh := hashing.New(encodedSomePublicKey)
dbName := constants.AccountsTable
dbc, _ := sql.Open("sqlite3", dbName)
defer func() {
err := dbc.Close()
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
err = os.Remove(dbName)
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
}()
statement, _ := dbc.Prepare(sqlstatements.CREATE_ACCOUNT_BALANCES_TABLE)
statement.Exec()
err := InsertAccountIntoAccountBalanceTable(dbc, spkh, 1000)
if err != nil {
t.Errorf("failed to insert sender account")
}
type args struct {
pkhash []byte
}
tests := []struct {
name string
args args
want uint64
wantErr bool
}{
{
args: args{spkh},
want: 0,
wantErr: false,
},
{
args: args{[]byte("doesn't exist in table")},
want: 0,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetStateNonce(dbc, tt.args.pkhash)
if (err != nil) != tt.wantErr {
t.Errorf("GetStateNonce() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("GetStateNonce() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetAccountInfo(t *testing.T) {
somePrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedSomePublicKey, _ := publickey.Encode(&somePrivateKey.PublicKey)
spkh := hashing.New(encodedSomePublicKey)
dbName := constants.AccountsTable
dbc, _ := sql.Open("sqlite3", dbName)
defer func() {
err := dbc.Close()
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
err = os.Remove(dbName)
if err != nil {
t.Errorf("Failed to remove database: %s", err)
}
}()
statement, _ := dbc.Prepare(sqlstatements.CREATE_ACCOUNT_BALANCES_TABLE)
statement.Exec()
err := InsertAccountIntoAccountBalanceTable(dbc, spkh, 1000)
if err != nil {
t.Errorf("failed to insert sender account")
}
type args struct {
pkhash []byte
}
tests := []struct {
name string
args args
want *accountinfo.AccountInfo
wantErr bool
}{
{
args: args{spkh},
want: &accountinfo.AccountInfo{1000, 0},
wantErr: false,
},
{
args: args{[]byte("this account doesn't exit")},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetAccountInfo(dbc, tt.args.pkhash)
if (err != nil) != tt.wantErr {
t.Errorf("GetAccountInfo() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetAccountInfo() = %v, want %v", got, tt.want)
}
})
}
}
|
/*
Links
* http://afecymog.github.com/1.html
* http://afecymog.github.com/2.html
* http://afecymog.github.com/3.html
* http://afecymog.github.com/4.html
* http://afecymog.github.com/5.html
* http://afecymog.github.com/6.html
* http://afecymog.github.com/7.html
* http://afecymog.github.com/8.html
* http://afecymog.github.com/9.html
* http://afecymog.github.com/10.html
*/
package abc
|
package main
import (
"github.com/spf13/cobra"
"os"
"github.com/alejandroEsc/maas-cli/pkg/cli"
"github.com/spf13/viper"
"github.com/alejandroEsc/golang-maas-client/pkg/api"
"github.com/alejandroEsc/golang-maas-client/pkg/api/v2"
"encoding/json"
"fmt"
"net/url"
)
func versionCmd() *cobra.Command {
vo := &cli.VersionOptions{}
cmd := &cobra.Command{
Use: "version",
Short: "Get Version info",
Long: "",
Run: func(cmd *cobra.Command, args []string) {
if err := runVersionCmd(vo); err != nil {
logger.Criticalf(err.Error())
os.Exit(1)
}
},
}
fs := cmd.Flags()
fs.StringVar(&vo.APIKey, "api-key", viper.GetString(keyAPIKey), "maas apikey")
fs.StringVar(&vo.MAASURLKey, "maas-url", viper.GetString(keyMAASURL), "maas url")
fs.StringVar(&vo.MAASAPIVersionKey, "api-version", viper.GetString(keyMAASAPIVersion), "maas api version")
return cmd
}
func runVersionCmd(o *cli.VersionOptions) error {
var err error
maas, err := api.NewMASS(o.MAASURLKey, o.MAASAPIVersionKey, o.APIKey)
if err != nil {
return err
}
versionBytes, err := maas.Get("version", "", url.Values{})
if err != nil {
return err
}
var version v2.Version
err = json.Unmarshal(versionBytes, &version)
if err != nil {
return err
}
fmt.Printf("Version: %s\nSubVersion %s\n", version.Version, version.SubVersion)
return nil
}
|
package main
import (
"fmt"
"strconv"
)
// define person struct
type Person struct {
// firstName string
// lastName string
// age int
// gender string
firstName, lastName, gender string
age int
}
// greeting method (value reciever)
func (p Person) greet() string {
return "Hello, my name is " + p.firstName + " " + p.lastName + " and I am " + strconv.Itoa(p.age) + " years old."
}
// has Bday (ptr reciever)
func (p *Person) hasBday() {
p.age++
}
// getMarried (ptr reciever)
func (p *Person) getMarried(spouceLastName string) {
if p.gender == "F" {
p.lastName = spouceLastName
} else {
return
}
}
func main() {
// init person struct
p1 := Person{"Abhinav", "Robinson", "M", 20}
fmt.Println(p1)
// get single field
fmt.Println(p1.firstName)
// change value with ptr reciever
p1.hasBday()
// create another person
p2 := Person{"Samantha", "Smith", "F", 20}
fmt.Println(p2.greet())
// value reciever
fmt.Println(p1.greet())
p1.getMarried("Smith")
p2.getMarried("Robinson")
fmt.Println(p2.greet())
}
|
package main
import (
"github.com/walln/flurry2/flurry"
)
func main() {
server := flurry.Initialize()
server.ListenAndServe()
}
|
package couchdb
import (
)
func (c *CouchDb)DatabaseCreate(name string) error {
c.CountCalls++
buf,err:=c.call("PUT",name,nil,nil)
if err!=nil {
return err
}
_,err=parseGenericReturn(buf)
return err
}
func (c *CouchDb)DatabaseDelete(name string) error {
c.CountCalls++
buf,err:=c.call("DELETE",name,nil,nil)
if err!=nil {
return err
}
_,err=parseGenericReturn(buf)
return err
}
|
package main
import (
"launchpad.net/xmlpath"
"log"
"sync"
"strings"
//"fmt"
//"text/template"
)
func front_process(list_urls []string, c_front_urls chan string, c_doc_urls chan string) {
var wg sync.WaitGroup
c_front_page := make(chan []byte, 1000)
for _, url := range list_urls {
c_front_urls <- url
wg.Add(1)
go front_worker(c_front_urls, c_front_page, c_doc_urls, &wg)
}
wg.Wait()
log.Println("Front Process Done")
}
func front_worker(c_front_urls chan string, c_front_page chan []byte, c_doc_urls chan string, wg *sync.WaitGroup) {
defer wg.Done() //call at front_worker func exit
front_url := <-c_front_urls
curl(front_url, c_front_page)
front_parse(c_front_urls, c_front_page, c_doc_urls, wg)
}
func front_parse(c_front_urls chan string, c_front_page chan []byte, c_doc_urls chan string, wg *sync.WaitGroup) {
front_page := <-c_front_page
//fmt.Printf("%s\n", string(front_page))
//path := xmlpath.MustCompile("/html/body/div/div[2]/div/div[3]/div/div[1]/div[1]/h1/text()") //title
doc_urls_xpath := xmlpath.MustCompile("/html/body/div[@id=\"page_align\"]/div[@id=\"page_width\"]/div[@id=\"ContainerMain\"]/div[@class=\"content-border list\"]/div[@class=\"content-color\"]/div[@class=\"list-lbc\"]//a/@href") //doc urls
next_front_urls_xpath := xmlpath.MustCompile("/html/body/div[@id=\"page_align\"]/div[@id=\"page_width\"]/div[@id=\"ContainerMain\"]/nav/ul[@id=\"paging\"]/li[@class=\"page\"]") //next url
/*
front_page_noscript := remove_noscript(front_page)
fix_html := fix_broken_html(front_page_noscript)
utf8_reader := decode_utf8(fix_html)
root, err := xmlpath.ParseHTML(utf8_reader)*/
utf8_reader := decode_utf8( string(front_page) )
doc_page_noscript := remove_noscript( utf8_reader )
fix_html := fix_broken_html(doc_page_noscript)
//fmt.Println(string(fix_html))
root, err := xmlpath.ParseHTML( strings.NewReader(fix_html) )
if err != nil {
//log.Println("ca rentre")
log.Fatal("FRONT PAGE",err)
}
doc_urls := doc_urls_xpath.Iter(root)
for doc_urls.Next() {
doc_url := doc_urls.Node().String()
c_doc_urls <- doc_url
//log.Println( "Doc URL:", doc_url) //<-- DOC URL
}
prev_next_front_urls := next_front_urls_xpath.Iter(root)
var node *xmlpath.Node
for prev_next_front_urls.Next() {
node = prev_next_front_urls.Node()
}
href_xpath := xmlpath.MustCompile("a/@href")
if next_front_url, ok := href_xpath.String(node); ok {
c_front_urls <- next_front_url
log.Println("Next Front URL:", next_front_url)
wg.Add(1)
go front_worker(c_front_urls, c_front_page, c_doc_urls, wg)
} else {
log.Println("No Next Front URL")
log.Println("Front DONE")
return
}
}
|
package main
import "fmt"
func main() {
for i := 1; i <= 9; i++ {
for start := 1; start <= i; start++ {
if start == i {
fmt.Printf("%d*%d=%d\n", start, i, start*i)
} else {
fmt.Printf("%d*%d=%d ", start, i, start*i)
}
}
}
}
|
package model
import "github.com/SDkie/metric_collector/db"
//go:generate easytags metric_pg.go json
//go:generate easytags metric_pg.go sql
type MetricPg struct {
Id int64 `sql:"id" gorm:"primary_key" json:"id"`
MetricStruct
}
func InitPg() {
db.InitPg()
db.GetPg().CreateTable(&MetricPg{})
}
func (m *MetricPg) Insert() error {
return db.GetPg().Create(m).Error
}
|
/*
Given the heads of two singly linked-lists headA and headB, return the node at which the two lists intersect. If the two linked lists have no intersection at all, return null.
For example, the following two linked lists begin to intersect at node c1:
The test cases are generated such that there are no cycles anywhere in the entire linked structure.
Note that the linked lists must retain their original structure after the function returns.
Custom Judge:
The inputs to the judge are given as follows (your program is not given these inputs):
intersectVal - The value of the node where the intersection occurs. This is 0 if there is no intersected node.
listA - The first linked list.
listB - The second linked list.
skipA - The number of nodes to skip ahead in listA (starting from the head) to get to the intersected node.
skipB - The number of nodes to skip ahead in listB (starting from the head) to get to the intersected node.
The judge will then create the linked structure based on these inputs and pass the two heads, headA and headB to your program. If you correctly return the intersected node, then your solution will be accepted.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,6,1,8,4,5], skipA = 2, skipB = 3
Output: Intersected at '8'
Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect).
From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,6,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
- Note that the intersected node's value is not 1 because the nodes with value 1 in A and B (2nd node in A and 3rd node in B) are different node references. In other words, they point to two different locations in memory, while the nodes with value 8 in A and B (3rd node in A and 4th node in B) point to the same location in memory.
Example 2:
Input: intersectVal = 2, listA = [1,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Intersected at '2'
Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect).
From the head of A, it reads as [1,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: No intersection
Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Constraints:
The number of nodes of listA is in the m.
The number of nodes of listB is in the n.
1 <= m, n <= 3 * 104
1 <= Node.val <= 105
0 <= skipA < m
0 <= skipB < n
intersectVal is 0 if listA and listB do not intersect.
intersectVal == listA[skipA] == listB[skipB] if listA and listB intersect.
Follow up: Could you write a solution that runs in O(m + n) time and use only O(1) memory?
*/
package main
import "fmt"
func main() {
test([]int{4, 1}, []int{5, 6, 1}, []int{8, 4, 5})
test([]int{1, 9, 1}, []int{3}, []int{2, 4})
test([]int{2, 6, 4}, []int{1, 5}, nil)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(a, b, c []int) {
h1, t1 := newlist(a...)
h2, t2 := newlist(b...)
h3, _ := newlist(c...)
t1.next = h3
t2.next = h3
h4 := intersect(h1, h2)
printlist(h4)
assert(h4 == h3)
}
type List struct {
val int
next *List
}
func newlist(v ...int) (h, t *List) {
for _, v := range v {
l := &List{val: v}
if h == nil {
h, t = l, l
} else {
t.next, t = l, l
}
}
return
}
func printlist(l *List) {
if l == nil {
fmt.Println("(nil)")
return
}
for ; l != nil; l = l.next {
fmt.Printf("%d", l.val)
if l.next != nil {
fmt.Printf(" -> ")
}
}
fmt.Println()
}
// https://www.geeksforgeeks.org/write-a-function-to-get-the-intersection-point-of-two-linked-lists/
func intersect(a, b *List) *List {
p, q := a, b
if p == nil || q == nil {
return nil
}
for p != q {
p, q = p.next, q.next
if p == q {
return p
}
if p == nil {
p = b
}
if q == nil {
q = a
}
}
return p
}
|
package kinetic
import (
"runtime"
"sync/atomic"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/firehose"
. "github.com/smartystreets/goconvey/convey"
)
func TestFireHose(t *testing.T) {
producer, _ := new(Firehose).InitC("your-stream", "", "", "accesskey", "secretkey", "us-east-1", 4)
producer.NewEndpoint("localhost", "your-stream")
producer.(*Firehose).client = new(fakefirehose)
producer.ReInit()
Convey("Given a running firehose producer", t, func() {
Convey("it should send data to the firehose stream", func() {
for i := 0; i < 100; i++ {
producer.Send(new(Message).Init([]byte("this is a message"), ""))
runtime.Gosched()
}
time.Sleep(10 * time.Second)
So(atomic.LoadInt64(&(producer.(*Firehose).client.(*fakefirehose).count)), ShouldEqual, 100)
So(producer.(*Firehose).getMsgCount(), ShouldEqual, 100)
})
})
}
func TestFireHoseSendSync(t *testing.T) {
producer, _ := new(Firehose).InitC("your-stream", "", "", "accesskey", "secretkey", "us-east-1", 4)
producer.NewEndpoint("localhost", "your-stream")
producer.(*Firehose).client = new(fakefirehose)
producer.ReInit()
Convey("Given a running firehose producer", t, func() {
Convey("it should send data to the firehose stream", func() {
producer.Send(new(Message).Init([]byte("this is a message"), ""))
runtime.Gosched()
time.Sleep(1 * time.Second)
So(atomic.LoadInt64(&(producer.(*Firehose).client.(*fakefirehose).count)), ShouldEqual, 1)
So(producer.(*Firehose).getMsgCount(), ShouldEqual, 1)
})
})
}
// Mocks for aws Firehose.
// This implements github.com/aws/aws-sdk-go/service/firehose/firehoseiface.FirehoseAPI
type fakefirehose struct {
count int64
}
func (f *fakefirehose) CreateDeliveryStreamRequest(*firehose.CreateDeliveryStreamInput) (*request.Request, *firehose.CreateDeliveryStreamOutput) {
return nil, nil
}
func (f *fakefirehose) CreateDeliveryStream(*firehose.CreateDeliveryStreamInput) (*firehose.CreateDeliveryStreamOutput, error) {
return nil, nil
}
func (f *fakefirehose) DeleteDeliveryStreamRequest(*firehose.DeleteDeliveryStreamInput) (*request.Request, *firehose.DeleteDeliveryStreamOutput) {
return nil, nil
}
func (f *fakefirehose) DeleteDeliveryStream(*firehose.DeleteDeliveryStreamInput) (*firehose.DeleteDeliveryStreamOutput, error) {
return nil, nil
}
func (f *fakefirehose) DescribeDeliveryStreamRequest(*firehose.DescribeDeliveryStreamInput) (*request.Request, *firehose.DescribeDeliveryStreamOutput) {
return nil, nil
}
func (f *fakefirehose) DescribeDeliveryStream(*firehose.DescribeDeliveryStreamInput) (*firehose.DescribeDeliveryStreamOutput, error) {
return nil, nil
}
func (f *fakefirehose) ListDeliveryStreamsRequest(*firehose.ListDeliveryStreamsInput) (*request.Request, *firehose.ListDeliveryStreamsOutput) {
return nil, nil
}
func (f *fakefirehose) ListDeliveryStreams(*firehose.ListDeliveryStreamsInput) (*firehose.ListDeliveryStreamsOutput, error) {
return nil, nil
}
func (f *fakefirehose) PutRecordRequest(*firehose.PutRecordInput) (*request.Request, *firehose.PutRecordOutput) {
return nil, nil
}
func (f *fakefirehose) PutRecord(*firehose.PutRecordInput) (*firehose.PutRecordOutput, error) {
return nil, nil
}
func (f *fakefirehose) PutRecordBatchRequest(*firehose.PutRecordBatchInput) (*request.Request, *firehose.PutRecordBatchOutput) {
return nil, nil
}
func (f *fakefirehose) PutRecordBatch(*firehose.PutRecordBatchInput) (*firehose.PutRecordBatchOutput, error) {
atomic.AddInt64(&(f.count), 1)
return &firehose.PutRecordBatchOutput{
RequestResponses: []*firehose.PutRecordBatchResponseEntry{
&firehose.PutRecordBatchResponseEntry{},
},
}, nil
}
func (f *fakefirehose) UpdateDestinationRequest(*firehose.UpdateDestinationInput) (*request.Request, *firehose.UpdateDestinationOutput) {
return nil, nil
}
func (f *fakefirehose) UpdateDestination(*firehose.UpdateDestinationInput) (*firehose.UpdateDestinationOutput, error) {
return nil, nil
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//792. Number of Matching Subsequences
//Given string S and a dictionary of words words, find the number of words[i] that is a subsequence of S.
//Example :
//Input:
//S = "abcde"
//words = ["a", "bb", "acd", "ace"]
//Output: 3
//Explanation: There are three words in words that are a subsequence of S: "a", "acd", "ace".
//Note:
//All words in words and S will only consists of lowercase letters.
//The length of S will be in the range of [1, 50000].
//The length of words will be in the range of [1, 5000].
//The length of words[i] will be in the range of [1, 50].
//func numMatchingSubseq(S string, words []string) int {
//}
// Time Is Money |
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package consumer
import (
"testing"
"time"
)
func almostEqual(a, b float64) bool {
diff := abs(a - b)
return diff/a < 0.01
}
func abs(a float64) float64 {
if a > 0 {
return a
}
return -a
}
func TestNextMinuteTime(t *testing.T) {
nextMinute := nextMinutesTime()
minuteElapse := nextMinute.Sub(time.Now()).Minutes()
if !almostEqual(minuteElapse, 1.0) {
t.Errorf("wrong next one minute. want=%f, got=%f", 1.0, minuteElapse)
}
}
func TestNextHourTime(t *testing.T) {
nextHour := nextHourTime()
hourElapse := nextHour.Sub(time.Now()).Hours()
if !almostEqual(hourElapse, 1.0) {
t.Errorf("wrong next one hour. want=%f, got=%f", 1.0, hourElapse)
}
}
func TestIncreasePullRTGetPullRT(t *testing.T) {
mgr := NewStatsManager()
mgr.ShutDownStat()
tests := []struct {
RT int64
ExpectSum int64
}{
{1, 0},
{1, 1},
{1, 2},
{1, 3},
{1, 4},
{1, 5},
{1, 6},
{1, 6},
}
for _, tt := range tests {
mgr.increasePullRT("rocketmq", "default", tt.RT)
mgr.topicAndGroupPullRT.samplingInSeconds()
snapshot := mgr.getPullRT("rocketmq", "default")
if snapshot.sum != tt.ExpectSum {
t.Errorf("wrong Pull RT sum. want=%d, got=%d", tt.ExpectSum, snapshot.sum)
}
}
}
//func TestIncreaseConsumeRTGetConsumeRT(t *testing.T) {
// ShutDownStat()
// tests := []struct {
// RT int64
// ExpectSum int64
// }{
// {1, 0},
// {1, 1},
// {1, 2},
// {1, 3},
// {1, 4},
// {1, 5},
// {1, 6},
// {1, 6},
// }
// for _, tt := range tests {
// increaseConsumeRT("rocketmq", "default", tt.RT)
// topicAndGroupConsumeRT.samplingInMinutes()
// snapshot := getConsumeRT("rocketmq", "default")
// if snapshot.sum != tt.ExpectSum {
// t.Errorf("wrong consume RT sum. want=%d, got=%d", tt.ExpectSum, snapshot.sum)
// }
// }
//}
func TestIncreasePullTPSGetPullTPS(t *testing.T) {
mgr := NewStatsManager()
mgr.ShutDownStat()
tests := []struct {
RT int
ExpectSum int64
}{
{1, 0},
{1, 1},
{1, 2},
{1, 3},
{1, 4},
{1, 5},
{1, 6},
{1, 6},
}
for _, tt := range tests {
mgr.increasePullTPS("rocketmq", "default", tt.RT)
mgr.topicAndGroupPullTPS.samplingInSeconds()
snapshot := mgr.getPullTPS("rocketmq", "default")
if snapshot.sum != tt.ExpectSum {
t.Errorf("wrong Pull TPS sum. want=%d, got=%d", tt.ExpectSum, snapshot.sum)
}
}
}
func TestIncreaseConsumeOKTPSGetConsumeOKTPS(t *testing.T) {
mgr := NewStatsManager()
mgr.ShutDownStat()
tests := []struct {
RT int
ExpectSum int64
}{
{1, 0},
{1, 1},
{1, 2},
{1, 3},
{1, 4},
{1, 5},
{1, 6},
{1, 6},
}
for _, tt := range tests {
mgr.increaseConsumeOKTPS("rocketmq", "default", tt.RT)
mgr.topicAndGroupConsumeOKTPS.samplingInSeconds()
snapshot := mgr.getConsumeOKTPS("rocketmq", "default")
if snapshot.sum != tt.ExpectSum {
t.Errorf("wrong Consume OK TPS sum. want=%d, got=%d", tt.ExpectSum, snapshot.sum)
}
}
}
func TestIncreaseConsumeFailedTPSGetConsumeFailedTPS(t *testing.T) {
mgr := NewStatsManager()
mgr.ShutDownStat()
tests := []struct {
RT int
ExpectSum int64
}{
{1, 0},
{1, 1},
{1, 2},
{1, 3},
{1, 4},
{1, 5},
{1, 6},
{1, 6},
}
for _, tt := range tests {
mgr.increaseConsumeFailedTPS("rocketmq", "default", tt.RT)
mgr.topicAndGroupConsumeFailedTPS.samplingInSeconds()
snapshot := mgr.getConsumeFailedTPS("rocketmq", "default")
if snapshot.sum != tt.ExpectSum {
t.Errorf("wrong Consume Failed TPS sum. want=%d, got=%d", tt.ExpectSum, snapshot.sum)
}
}
}
func TestGetConsumeStatus(t *testing.T) {
mgr := NewStatsManager()
mgr.ShutDownStat()
group, topic := "rocketmq", "default"
tests := []struct {
RT int
ExpectFailMessage int64
}{
{1, 0},
{1, 1},
{1, 2},
{1, 3},
{1, 4},
}
for _, tt := range tests {
mgr.increasePullRT(group, topic, int64(tt.RT))
mgr.increasePullTPS(group, topic, tt.RT)
mgr.increaseConsumeRT(group, topic, int64(tt.RT))
mgr.increaseConsumeOKTPS(group, topic, tt.RT)
mgr.increaseConsumeFailedTPS(group, topic, tt.RT)
mgr.topicAndGroupPullRT.samplingInSeconds()
mgr.topicAndGroupPullTPS.samplingInSeconds()
mgr.topicAndGroupConsumeRT.samplingInMinutes()
mgr.topicAndGroupConsumeOKTPS.samplingInSeconds()
mgr.topicAndGroupConsumeFailedTPS.samplingInMinutes()
status := mgr.GetConsumeStatus(group, topic)
if status.ConsumeFailedMsgs != tt.ExpectFailMessage {
t.Errorf("wrong ConsumeFailedMsg. want=0, got=%d", status.ConsumeFailedMsgs)
}
}
}
func TestNewStatsManager(t *testing.T) {
stats := NewStatsManager()
st := time.Now()
for {
stats.increasePullTPS("rocketmq", "default", 1)
time.Sleep(500*time.Millisecond)
if time.Now().Sub(st) > 5*time.Minute {
break
}
}
stats.ShutDownStat()
}
|
package base
import (
"context"
"github.com/thoohv5/template/internal/ent"
"github.com/thoohv5/template/pkg/log"
)
type Base struct {
options
}
func New(opts ...Option) *Base {
options := options{}
for _, o := range opts {
o.apply(&options)
}
return &Base{
options: options,
}
}
type options struct {
tx *ent.Tx
log log.ILog
client *ent.Client
}
type Option interface {
apply(*options)
}
type optionFunc func(*options)
func (f optionFunc) apply(opts *options) {
f(opts)
}
func WithTx(tx *ent.Tx) Option {
return optionFunc(func(o *options) {
o.tx = tx
})
}
func WithLog(log log.ILog) Option {
return optionFunc(func(o *options) {
o.log = log
})
}
func WithClient(client *ent.Client) Option {
return optionFunc(func(o *options) {
o.client = client
})
}
// GetTx Tx
func (b *Base) GetTx() *ent.Tx {
return b.options.tx
}
// GetLogger log
func (b *Base) GetLogger() log.ILog {
return b.options.log
}
// GetClient Client
func (b *Base) GetClient() *ent.Client {
return b.options.client
}
// Exist 是否存在
func (b *Base) Exist(ctx context.Context, identity string) (bool, error) {
return true, nil
}
// Create 创建
func (b *Base) Create(ctx context.Context, identity string, extra string) error {
return nil
}
|
package layers
import (
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"grm-service/common"
"grm-service/geoserver"
"grm-service/log"
. "grm-service/util"
"data-manager/types"
"github.com/emicklei/go-restful"
//"grm-service/log"
)
func (s DataLayerSvc) getLayers(req *restful.Request, res *restful.Response) {
userId, err := s.DynamicDB.GetUserId(req.HeaderParameter("auth-session"))
if err != nil {
ResWriteError(res, err)
return
}
dataId := req.PathParameter("data-id")
// 获取数据信息
name, _, err := s.MetaDB.GetDataPath(dataId)
if err != nil {
ResWriteError(res, err)
return
}
ret, err := s.SysDB.GetDataLayers(dataId, userId, name)
if err != nil {
ResWriteError(res, err)
return
}
if len(ret) > 0 {
ResWriteHeaderEntity(res, &ret)
} else {
ResWriteHeaderEntity(res, nil)
}
}
func (s DataLayerSvc) getLayer(req *restful.Request, res *restful.Response) {
dataId := req.PathParameter("data-id")
layerId := req.PathParameter("layer-id")
// 获取数据信息
name, _, err := s.MetaDB.GetDataPath(dataId)
if err != nil {
ResWriteError(res, err)
return
}
ret, err := s.SysDB.GetDataLayer(dataId, layerId)
if err != nil {
ResWriteError(res, err)
return
}
ret.DataName = name
ResWriteHeaderEntity(res, ret)
}
func (s DataLayerSvc) addLayer(req *restful.Request, res *restful.Response) {
userId, err := s.DynamicDB.GetUserId(req.HeaderParameter("auth-session"))
if err != nil {
ResWriteError(res, err)
return
}
var args addLayerReq
if err := req.ReadEntity(&args); err != nil {
ResWriteError(res, err)
return
}
if len(args.Name) == 0 || len(userId) == 0 {
ResWriteError(res, fmt.Errorf(TR("Invalid dataset name or user id")))
return
}
dataId := req.PathParameter("data-id")
layer := common.DataLayer{
Layer: NewUUID(),
Name: args.Name,
Data: dataId,
User: userId,
Style: args.Style,
Description: args.Description,
IsDefault: args.IsDefault,
}
// 发布geoserver 图层
geoStorage, err := s.MetaDB.GetDeviceGeoStorage(dataId)
if err != nil {
ResWriteError(res, err)
return
}
srs, wmsUrl, wmtsUrl, wms, wfs, wmts, err := s.GeoServer.AddShpLayer(geoserver.GeoWorkSpace,
geoStorage, dataId, layer.Layer)
if err != nil {
ResWriteError(res, err)
return
}
layer.Srs = srs
layer.WmsUrl = wmsUrl
layer.WmtsUrl = wmtsUrl
layer.WMS = wms
layer.Wfs = wfs
layer.Wmts = wmts
// 设置图层样式
layerId := fmt.Sprintf("lyr-%s_%s", dataId, layer.Layer)
var style string
switch layer.Style {
case "1":
style = "point"
case "2":
style = "line"
case "3":
style = "polygon"
default:
style = userId + "_" + layer.Style
}
if len(layer.Style) > 0 {
fmt.Println("layer:", layerId, ",style:", style)
if err := s.GeoServer.SetLayerStyle(layerId, style); err != nil {
ResWriteError(res, err)
return
}
}
ret, err := s.SysDB.AddDataLayer(&layer)
if err != nil {
ResWriteError(res, err)
return
}
ResWriteHeaderEntity(res, ret)
}
func (s DataLayerSvc) delLayer(req *restful.Request, res *restful.Response) {
userId, err := s.DynamicDB.GetUserId(req.HeaderParameter("auth-session"))
if err != nil {
ResWriteError(res, err)
return
}
if err := s.SysDB.DelDataLayer(req.PathParameter("layer-id"), userId); err != nil {
ResWriteError(res, err)
return
}
name := fmt.Sprintf("lyr-%s_%s", req.PathParameter("data-id"), req.PathParameter("layer-id"))
if err := s.GeoServer.DeleteShpLayer(name); err != nil {
ResWriteError(res, err)
return
}
ResWriteHeaderEntity(res, nil)
}
func (s DataLayerSvc) updateLayer(req *restful.Request, res *restful.Response) {
userId, err := s.DynamicDB.GetUserId(req.HeaderParameter("auth-session"))
if err != nil {
ResWriteError(res, err)
return
}
dataId := req.PathParameter("data-id")
layerId := req.PathParameter("layer-id")
layer := fmt.Sprintf("lyr-%s_%s", dataId, layerId)
var args types.UpdateLayerReq
if err := req.ReadEntity(&args); err != nil {
ResWriteError(res, err)
return
}
// 编辑style
if len(args.Style) > 0 {
if err := s.GeoServer.SetLayerStyle(layer, args.Style); err != nil {
ResWriteError(res, err)
return
}
}
// 更新pg
if err := s.SysDB.UpdateDataLayer(dataId, layerId, userId, &args); err != nil {
ResWriteError(res, err)
return
}
ResWriteHeaderEntity(res, nil)
}
// 更新数据快试图
func (s DataLayerSvc) updateDataSnapShot(req *restful.Request, res *restful.Response) {
// 数据路径
dataId := req.PathParameter("data-id")
layerId := req.PathParameter("layer-id")
dataDir := filepath.Join(s.ConfigDir, "data", dataId)
if err := CheckDir(dataDir); err != nil {
log.Error("Failed to create data dir :", dataDir)
ResWriteError(res, err)
return
}
var thumbFile string
var pic []byte
var err error
uploadType := req.QueryParameter("type")
fmt.Println(uploadType)
if uploadType == common.FileUpload {
file, fh, err := req.Request.FormFile("snapshot")
if err != nil {
ResWriteError(res, err)
return
}
defer file.Close()
pic, err = ioutil.ReadAll(file)
if err != nil {
ResWriteError(res, err)
return
}
thumbFile = filepath.Join(dataDir, fh.Filename)
} else if uploadType == common.Base64Upload {
var args types.SnapshotReq
if err := req.ReadEntity(&args); err != nil {
ResWriteError(res, err)
return
}
if len(args.Image) == 0 {
ResWriteError(res, fmt.Errorf(TR("Invalid snapshot image")))
return
}
picbase64 := args.Image[strings.Index(args.Image, ",")+1:]
pic, err = base64.StdEncoding.DecodeString(picbase64)
if err != nil {
ResWriteError(res, err)
return
}
if len(args.FileName) > 0 {
thumbFile = fmt.Sprintf("%s/%s", dataDir, args.FileName)
} else {
ext := args.Image[strings.Index(args.Image, "/")+1 : strings.Index(args.Image, ";")]
thumbFile = fmt.Sprintf("%s/%s.%s", dataDir, layerId, ext)
}
}
fmt.Println(thumbFile)
if err := ioutil.WriteFile(thumbFile, pic, os.ModePerm); err != nil {
ResWriteError(res, err)
return
}
url := strings.Replace(thumbFile, s.ConfigDir, common.FilePre, -1)
if err := s.SysDB.UpdateLayerSnap(layerId, url); err != nil {
ResWriteError(res, err)
return
}
ResWriteHeaderEntity(res, nil)
}
|
// +build dev
package bundle
import "net/http"
var SqlMap = func() http.FileSystem {
return http.Dir("test/sqlmap")
}()
|
package main
import "fmt"
func sayHi(name string) string {
return "hello, " + name + "!"
}
func init() {
fmt.Println("function init will be executed first.")
}
func main() {
fmt.Println(sayHi("vivia"))
const PI = 3.14
fmt.Printf("PI=%v\n", PI)
fmt.Printf("%d\n", 100)
fmt.Printf("%d\n", 0x100)
fmt.Printf("%x\n", 0x100)
fmt.Printf("%X\n", 0x100)
fmt.Printf("%X\n", 256)
fmt.Printf("%5d\n", 256)
fmt.Println("===运算符===")
fmt.Printf("%d\n", 2 ^ 3)
fmt.Printf("%d\n", ^ 2)
}
|
package core
import (
"encoding/json"
"er"
"fwb"
"hlf"
"sgs"
)
type playerImp struct {
app fwb.FwApp
id int
name string
lg hlf.Logger
}
func (me *playerImp) ID() int {
return me.id
}
func (me *playerImp) Name() string {
return me.name
}
func (me *playerImp) SendCommand(command sgs.Command) *er.Err {
if command.InCategory(sgs.CMD_C_APP_TO_CLIENT) {
return me.app.GetSession().ForwardToClient(me.id, command)
}
if command.ID == sgs.CMD_FORWARD_TO_APP {
actualCmd := sgs.Command{}
cmdBytes, ok := command.Payload.([]byte)
if !ok {
return er.Throw(fwb.E_INVALID_CMD, er.EInfo{
"details": "payload to forward is not bytes",
"payload": command.Payload,
}).To(me.lg)
}
e := json.Unmarshal(cmdBytes, &actualCmd)
if e != nil {
return er.Throw(fwb.E_CMD_PAYLOAD_NOT_DECODABLE, er.EInfo{
"details": "payload with command sent to app is not able to be decoded to sgs.Command",
"payload": command.Payload,
}).To(me.lg)
}
return me.app.SendToGame(actualCmd)
}
return er.Throw(fwb.E_CMD_NOT_EXECUTABLE, er.EInfo{
"details": "playerImp is not supposed to receive the command",
"command": command.HexID(),
})
}
//makePlayer Init a new player
func makePlayer(app fwb.FwApp, id int, name string) fwb.PlayerAgent {
app.GetLogger().Dbg("Make player: client ID 0x%x, name %v", id, name)
player := playerImp{}
player.app = app
player.id = id
player.name = name
player.lg = app.GetLogger()
return &player
}
|
package domain
import (
"log"
"testing"
"github.com/matryer/is"
)
func TestGetMessage(t *testing.T) {
t.Run("When get encoded message with symmetrical messages", func(t *testing.T) {
is := is.New(t)
kenobi := []string{"este", "", "", "mensaje", ""}
skywalker := []string{"", "es", "", "", "secreto"}
sato := []string{"este", "", "un", "", ""}
decodedMessage, _ := GetMessage(kenobi, skywalker, sato)
log.Print(decodedMessage, decodedMessage == "este es un mensaje secreto")
is.Equal("este es un mensaje secreto", decodedMessage)
})
t.Run("When get encoded message with asymmetrical messages", func(t *testing.T) {
is := is.New(t)
kenobi := []string{"", "este", "es", "un", "mensaje"}
skywalker := []string{"este", "", "un", "mensaje"}
sato := []string{"", "", "es", "", "mensaje"}
decodedMessage, _ := GetMessage(kenobi, skywalker, sato)
is.Equal("este es un mensaje", decodedMessage)
})
}
|
package utils
import (
"regexp"
"time"
)
const (
emailPattern = "^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$"
format = "2006-01-02T15:04:05Z"
mySQLDbFormat = "2006-01-02 15:04:05"
)
func IsValidEmail(email string) bool {
emailRegex := regexp.MustCompile(emailPattern)
return emailRegex.MatchString(email)
}
func GetDBTime() string {
return time.Now().UTC().Format(mySQLDbFormat)
}
func GetTime() string {
return time.Now().UTC().Format(format)
}
|
package main
import (
"io"
"os"
)
func main() {
// open input file
input_file, error := os.Open("input.txt")
if error != nil { panic(error) }
// close file on exit and check for its returned error
defer func() {
if error := input_file.Close(); error != nil {
panic(error)
}
}()
// open output file
output_file, error := os.Create("output.txt")
if error != nil { panic(error) }
// close output_file on exit and check for its returned error
defer func() {
if error := output_file.Close(); error != nil {
panic(error)
}
}()
// make a buffer to keep chunks that are read
buffer := make([]byte, 1024)
for {
// read a chunk
n, error := input_file.Read(buffer)
if error != nil && error != io.EOF { panic(error) }
if n == 0 { break }
// write a chunk
if _, error := output_file.Write(buffer[:n]); error != nil {
panic(error)
}
}
}
|
package controllor
import (
"encoding/json"
"os"
"strconv"
"strings"
"xiaodaimeng/public"
)
type XDM struct {
Menu []string `json:"menu"`
About string `json:"about"`
Update Update `json:"update"`
}
type Update struct {
Version int `json:"version"` //当前
Info []UpdateInfo `json:"info"`
}
type UpdateInfo struct {
Version int `json:"version"`
Msg string `json:"msg"`
}
//菜单函数
var Menu = new(XDM)
var menuFunc = map[string]func(msg Msg){}
var MenuText = ""
func init() {
//获取菜单
menuPtr, _ := os.Open("data/menu.json")
defer menuPtr.Close()
decoder := json.NewDecoder(menuPtr)
err := decoder.Decode(&Menu)
if err != nil {
public.Printf("菜单文件解码失败,", err.Error())
return
}
public.Debug("about : ", Menu.About)
for i, f := range Menu.Menu {
funcNames := strings.Split(f, "/")
ff := func(msg Msg) {}
//"帮助/h",
// "成语接龙/cyjl",
// "自定义问题答案/pa",
// "日记本/diary",
// "待办事项/todo"
if len(funcNames) == 2 {
public.Debug(funcNames[0])
switch funcNames[0] {
case "帮助":
ff = Help
break
case "关于":
ff = About
break
case "成语接龙":
ff = Cyjl
break
case "自定义问题答案":
ff = Pa
break
case "日记":
ff = Diary
break
case "待办事项":
ff = WTodo
break
case "更新信息":
ff = GetUpdateInfo
break
case "抽签":
ff = Draw
case "解签":
ff = UnDraw
break
case "新闻":
ff = News
break
case "小灰":
ff = OffCatch
break
case "小呆萌":
ff = OnCatch
break
}
key := strconv.Itoa(i)
menuFunc[md5V(strings.ToUpper(funcNames[0]))] = ff
menuFunc[md5V(strings.ToUpper(funcNames[1]))] = ff
menuFunc[md5V(key)] = ff
MenuText += key + ". " + f + "\n"
}
}
if MenuText != "" {
MenuText += "\n直接回复序号或文字获取功能"
}
}
//菜单函数
//判断是不是菜单函数
func IsMenuFunc(ff string) func(msg Msg) {
ff = md5V(strings.ToUpper(ff))
if _, ok := menuFunc[ff]; ok {
return menuFunc[ff]
}
return nil
}
//帮助
func Help(msg Msg) {
if MenuText != "" {
SendMsg(GetReceiver(msg), MenuText, TXT_MSG)
} else {
//没有菜单
GetAnswer(msg)
}
}
//关于
func About(msg Msg) {
public.Debug("About")
SendMsg(GetReceiver(msg), Menu.About, TXT_MSG)
}
//成语接龙
func Cyjl(msg Msg) {
public.Debug("Cyjl")
msg.Content = "成语接龙"
GetAnswer(msg)
}
//自定义问题答案
func Pa(msg Msg) {
public.Debug("Pa")
SendMsg(GetReceiver(msg), NotDoneText, TXT_MSG)
}
//日记
func Diary(msg Msg) {
public.Debug("Diary")
SendMsg(GetReceiver(msg), NotDoneText, TXT_MSG)
}
//待办事项
func WTodo(msg Msg) {
public.Debug("Todo")
SendMsg(GetReceiver(msg), NotDoneText, TXT_MSG)
}
//获取更新信息
func GetUpdateInfo(msg Msg) {
public.Debug("GetUpdateInfo")
updateInfo := Menu.Update.Info[0].Msg
for _, info := range Menu.Update.Info {
if info.Version == Menu.Update.Version {
updateInfo = info.Msg
break
}
}
updateInfo = "当前版本:" + strconv.Itoa(Menu.Update.Version) + "\n" + updateInfo
SendMsg(GetReceiver(msg), updateInfo, TXT_MSG)
}
//新闻 News
func News(msg Msg) {
public.Debug("News")
GetAnswer(msg)
}
|
package main
import (
"database/sql"
"fmt"
"log"
"net/http"
"github.com/julienschmidt/httprouter"
)
var db *sql.DB
func main() {
fmt.Println("Building router..")
router := httprouter.New()
router.GET("/", Index)
router.GET("/plot", Plot)
router.GET("/split", Split)
router.GET("/train", Train)
router.GET("/neural", ExecuteNN)
fmt.Println("Listening and Serving..")
log.Fatal(http.ListenAndServe(":8081", router))
}
func initDb() {
var err error
fmt.Println("initializing database")
db, err = sql.Open("postgres", "host=localhost port=32772 user=admin password=admin dbname=postgres sslmode=disable")
if err != nil {
fmt.Println("error connection to db", err.Error())
}
}
|
//Package glog 学习自FLogger https://github.com/cyfonly/FLogger.git
package glog
import (
"fmt"
"runtime"
"runtime/debug"
"time"
"github.com/dalixu/glogger"
)
// import (
// "log"
// "os"
// )
//Properties LogEvent属性 方便添加自定义字段
type Properties map[string]interface{}
//LogEvent log的具体内容
type LogEvent struct {
Properties
Level LogLevel
LevelDesc string //level的文本描述
Name string
Format string //format或者message
Args []interface{}
StackTrace string
Time string
}
//Logger 日志打印接口 方便替换为第三方log
type Logger interface {
glogger.GLogger
WriteEvent(e LogEvent) //也许应该用*LogEvent
}
//newLogger 返回Flogger
func newLogger(mr Manager, name string) Logger {
return &logger{
Manager: mr,
name: name,
}
}
type logger struct {
Manager
name string
}
func (lr *logger) WriteEvent(e LogEvent) {
lr.Manager.WriteEvent(e)
}
//Trace 实现接口
func (lr *logger) Trace(v ...interface{}) {
lr.write(TraceLevel, "TRACE", v...)
}
//Tracef 实现接口
func (lr *logger) Tracef(format string, v ...interface{}) {
lr.writef(TraceLevel, "TRACE", format, v...)
}
//Debug 实现接口
func (lr *logger) Debug(v ...interface{}) {
lr.write(DebugLevel, "DEBUG", v...)
}
//Debugf 实现接口
func (lr *logger) Debugf(format string, v ...interface{}) {
lr.writef(DebugLevel, "DEBUG", format, v...)
}
//Info 实现接口
func (lr *logger) Info(v ...interface{}) {
lr.write(InfoLevel, "INFO", v...)
}
//Infof 实现接口
func (lr *logger) Infof(format string, v ...interface{}) {
lr.writef(InfoLevel, "INFO", format, v...)
}
//Warn 实现接口
func (lr *logger) Warn(v ...interface{}) {
lr.write(WarnLevel, "WARN", v...)
}
//Warnf 实现接口
func (lr *logger) Warnf(format string, v ...interface{}) {
lr.writef(WarnLevel, "WARN", format, v...)
}
//Error 实现接口
func (lr *logger) Error(v ...interface{}) {
lr.write(ErrorLevel, "ERROR", v...)
}
//Errorf 实现接口
func (lr *logger) Errorf(format string, v ...interface{}) {
lr.writef(ErrorLevel, "ERROR", format, v...)
}
//Critical 实现接口
func (lr *logger) Fatal(v ...interface{}) {
lr.write(FatalLevel, "FATAL", v...)
}
//Criticalf 实现接口
func (lr *logger) Fatalf(format string, v ...interface{}) {
lr.writef(FatalLevel, "FATAL", format, v...)
}
func (lr *logger) write(level LogLevel, desc string, args ...interface{}) {
stackTrace := ""
if level >= ErrorLevel {
stackTrace = string(debug.Stack())
} else {
_, file, line, ok := runtime.Caller(2)
if ok {
stackTrace = fmt.Sprintf("%s:%d", file, line)
}
}
lr.WriteEvent(LogEvent{
Level: level,
LevelDesc: desc,
Name: lr.name,
Args: args,
StackTrace: stackTrace,
Time: time.Now().Format("2006-01-02 15:04:05.0000"),
})
}
func (lr *logger) writef(level LogLevel, desc string, format string, args ...interface{}) {
stackTrace := ""
if level >= ErrorLevel {
stackTrace = string(debug.Stack())
} else {
_, file, line, ok := runtime.Caller(2)
if ok {
stackTrace = fmt.Sprintf("%s:%d", file, line)
}
}
lr.WriteEvent(LogEvent{
Level: level,
LevelDesc: desc,
Name: lr.name,
Format: format,
Args: args,
StackTrace: stackTrace,
Time: time.Now().Format("2006-01-02 15:04:05.0000"),
})
}
|
package cmd
import (
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/Zenika/marcel/config"
)
func preRunForServer(cfg *config.Config) func(*cobra.Command, []string) error {
return func(_ *cobra.Command, _ []string) error {
log.SetOutput(os.Stdout)
bindLogLevel(cfg)
if err := cfg.Read(configFile); err != nil {
return err
}
config.SetDefault(cfg)
setLogLevel(cfg)
cfg.Debug()
return nil
}
}
|
package main
import (
"fmt"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
)
var messages []string
// func GetMessages(c *gin.Context) {
// version := c.Param("version")
// fmt.Println("Version", version)
// c.JSON(http.StatusOK, gin.H{"messages": messages})
// }
func OptionMessage(c *gin.Context) {
c.Header("Access-Control-Allow-Origin", "http://localhost:8080")
c.Header("Access-Control-Allow-Methods", "GET, OPTIONS, POST, PUT")
c.Header("Access-Control-Allow-Headers", "X-Token")
}
func GetMessages(c *gin.Context) {
version := c.Param("version")
fmt.Println("Version", version)
if version == "v2" {
c.Header("Access-Control-Allow-Origin", "http://localhost:8080")
}
c.JSON(http.StatusOK, gin.H{"messages": messages})
}
func PutMessage(c *gin.Context) {
version := c.Param("version")
id, _ := strconv.Atoi(c.Param("id"))
text := c.PostForm("puttext")
messages[id] = text
if version == "v2" {
c.Header("Access-Control-Allow-Origin", "http://localhost:8080")
c.Header("Access-Control-Expose-Headers", "X-Custom")
}
c.Header("X-Custom", "123456789")
c.JSON(http.StatusOK, gin.H{"messages": messages})
}
func main() {
messages = append(messages, "Hello CORS!")
r := gin.Default()
r.GET("/api/:version/messages", GetMessages)
r.PUT("/api/:version/messages/:id", PutMessage)
r.OPTIONS("/api/v2/messages/:id", OptionMessage)
r.Run(":8000")
} |
package server
import (
"net/http"
"reflect"
"github.com/ItsJimi/casa/logger"
"github.com/ItsJimi/casa/utils"
"github.com/labstack/echo"
)
type addHomeReq struct {
Name string
Address string
WifiSSID string
}
// AddHome route create and add user to an home
func AddHome(c echo.Context) error {
req := new(addHomeReq)
if err := c.Bind(req); err != nil {
logger.WithFields(logger.Fields{"code": "CSHAH001"}).Errorf("%s", err.Error())
return c.JSON(http.StatusBadRequest, ErrorResponse{
Code: "CSRAR001",
Message: "Wrong parameters",
})
}
if err := utils.MissingFields(c, reflect.ValueOf(req).Elem(), []string{"Name"}); err != nil {
logger.WithFields(logger.Fields{"code": "CSHAH002"}).Errorf("%s", err.Error())
return c.JSON(http.StatusBadRequest, ErrorResponse{
Code: "CSHAH002",
Message: err.Error(),
})
}
user := c.Get("user").(User)
row, err := DB.Query("INSERT INTO homes (id, name, address, creator_id) VALUES (generate_ulid(), $1, $2, $3) RETURNING id;", req.Name, req.Address, user.ID)
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHAH003"}).Errorf("%s", err.Error())
return c.JSON(http.StatusBadRequest, ErrorResponse{
Code: "CSHAH003",
Message: "Home can't be added",
})
}
var homeID string
row.Next()
err = row.Scan(&homeID)
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHAH004"}).Errorf("%s", err.Error())
return c.JSON(http.StatusBadRequest, ErrorResponse{
Code: "CSHAH004",
Message: "Home can't be added",
})
}
newPermission := Permission{
UserID: user.ID,
Type: "home",
TypeID: homeID,
Read: true,
Write: true,
Manage: true,
Admin: true,
}
_, err = DB.NamedExec("INSERT INTO permissions (id, user_id, type, type_id, read, write, manage, admin) VALUES (generate_ulid(), :user_id, :type, :type_id, :read, :write, :manage, :admin)", newPermission)
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHAH005"}).Errorf("%s", err.Error())
return c.JSON(http.StatusInternalServerError, ErrorResponse{
Code: "CSHAH005",
Message: "Home can't be added",
})
}
return c.JSON(http.StatusCreated, MessageResponse{
Message: homeID,
})
}
// UpdateHome route update home
func UpdateHome(c echo.Context) error {
req := new(addHomeReq)
if err := c.Bind(req); err != nil {
logger.WithFields(logger.Fields{"code": "CSHUH001"}).Errorf("%s", err.Error())
return c.JSON(http.StatusBadRequest, ErrorResponse{
Code: "CSHUH001",
Message: "Wrong parameters",
})
}
_, err := DB.Exec("UPDATE homes SET name=COALESCE($1, name), address=COALESCE($2, address), wifi_ssid=COALESCE($3, wifi_ssid) WHERE id=$4", utils.NewNullString(req.Name), utils.NewNullString(req.Address), utils.NewNullString(req.WifiSSID), c.Param("homeId"))
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHUH005"}).Errorf("%s", err.Error())
return c.JSON(http.StatusInternalServerError, ErrorResponse{
Code: "CSHUH005",
Message: "Home can't be updated",
})
}
return c.JSON(http.StatusOK, MessageResponse{
Message: "Home updated",
})
}
// DeleteHome route delete home
func DeleteHome(c echo.Context) error {
_, err := DB.Exec("DELETE FROM homes WHERE id=$1", c.Param("homeId"))
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHDH001"}).Errorf("%s", err.Error())
return c.JSON(http.StatusInternalServerError, ErrorResponse{
Code: "CSHDH001",
Message: "Home can't be deleted",
})
}
_, err = DB.Exec("DELETE FROM permissions WHERE type=$1 AND type_id=$2", "home", c.Param("homeId"))
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHDH002"}).Errorf("%s", err.Error())
return c.JSON(http.StatusInternalServerError, ErrorResponse{
Code: "CSHDH002",
Message: "Home can't be deleted",
})
}
return c.JSON(http.StatusOK, MessageResponse{
Message: "Home deleted",
})
}
type permissionHome struct {
Permission
User
HomeID string `db:"h_id"`
HomeName string `db:"h_name"`
HomeAddress string `db:"h_address"`
HomeCreatedAt string `db:"h_createdat"`
}
type homeRes struct {
ID string `json:"id"`
Name string `json:"name"`
Address string `json:"address"`
CreatedAt string `json:"created_at"`
Creator User `json:"creator"`
Read bool `json:"read"`
Write bool `json:"write"`
Manage bool `json:"manage"`
Admin bool `json:"admin"`
}
// GetHomes route get list of user homes
func GetHomes(c echo.Context) error {
user := c.Get("user").(User)
rows, err := DB.Queryx(`
SELECT permissions.*, users.*, homes.id as h_id, homes.name AS h_name, homes.address AS h_address, homes.created_at AS h_createdat FROM permissions
JOIN homes ON permissions.type_id = homes.id
JOIN users ON homes.creator_id = users.id
WHERE permissions.type=$1 AND permissions.user_id=$2
`, "home", user.ID)
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHGHS001"}).Errorf("%s", err.Error())
return c.JSON(http.StatusInternalServerError, ErrorResponse{
Code: "CSHGHS001",
Message: "Homes can't be retrieved",
})
}
defer rows.Close()
var homes []homeRes
for rows.Next() {
var permission permissionHome
err := rows.StructScan(&permission)
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHGHS002"}).Errorf("%s", err.Error())
return c.JSON(http.StatusInternalServerError, ErrorResponse{
Code: "CSHGHS002",
Message: "Homes can't be retrieved",
})
}
homes = append(homes, homeRes{
ID: permission.HomeID,
Name: permission.HomeName,
Address: permission.HomeAddress,
CreatedAt: permission.HomeCreatedAt,
Creator: permission.User,
Read: permission.Permission.Read,
Write: permission.Permission.Write,
Manage: permission.Permission.Manage,
Admin: permission.Permission.Admin,
})
}
return c.JSON(http.StatusOK, DataReponse{
Data: homes,
})
}
// GetHome route get specific home with id
func GetHome(c echo.Context) error {
user := c.Get("user").(User)
row := DB.QueryRowx(`
SELECT permissions.*, users.*, homes.id as h_id, homes.name AS h_name, homes.address AS h_address, homes.created_at AS h_createdat FROM permissions
JOIN homes ON permissions.type_id = homes.id
JOIN users ON homes.creator_id = users.id
WHERE type=$1 AND type_id=$2 AND user_id=$3
`, "home", c.Param("homeId"), user.ID)
if row == nil {
logger.WithFields(logger.Fields{"code": "CSHGH001"}).Errorf("QueryRowx: Select error")
return c.JSON(http.StatusNotFound, ErrorResponse{
Code: "CSHGH001",
Message: "Home can't be found",
})
}
var permission permissionHome
err := row.StructScan(&permission)
if err != nil {
logger.WithFields(logger.Fields{"code": "CSHGH002"}).Errorf("QueryRowx: Select error")
return c.JSON(http.StatusInternalServerError, ErrorResponse{
Code: "CSHGH002",
Message: "Home can't be found",
})
}
return c.JSON(http.StatusOK, DataReponse{
Data: homeRes{
ID: permission.HomeID,
Name: permission.HomeName,
Address: permission.HomeAddress,
CreatedAt: permission.HomeCreatedAt,
Creator: permission.User,
Read: permission.Permission.Read,
Write: permission.Permission.Write,
Manage: permission.Permission.Manage,
Admin: permission.Permission.Admin,
},
})
}
|
package main
import (
"fmt"
"encoding/json"
)
type Usuario struct{
Nome string
SobreNome string
}
func main(){
usuario := Usuario{"vinicius","dias das silva"}
usuAux,_ := json.Marshal(usuario)
fmt.Println(string(usuAux))
}
|
package model
import (
// "encoding/json"
"github.com/astaxie/beego/orm"
)
func init() {
orm.RegisterModel(new(RainlabBlogPosts))
}
|
package main
import(
"fmt"
"net/http"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"log"
)
func reqHandler(w http.ResponseWriter, req *http.Request) {
fmt.Fprint( w, "Hello From Server 1\n" )
}
func main() {
http.HandleFunc("/", reqHandler)
// Add the selfca certificate to the certificate pool
// Adding selfca to the trusted CAs helps to verify the certificate
// presented by the client (i.e., to trust the client)
cacert, err := ioutil.ReadFile("selfca.crt")
if err != nil {
log.Fatal(err)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(cacert)
// tls config (adds the CA cert pool and
// the option to verify client cert)
tlsConfig := &tls.Config{
ClientCAs: caCertPool,
ClientAuth: tls.RequireAndVerifyClientCert,
}
//http server setting using the tls config above
httpserver := &http.Server{
Addr: ":9443",
TLSConfig: tlsConfig,
}
// Listen
log.Fatal( httpserver.ListenAndServeTLS("server1.crt", "server1.key") )
}
|
package products
// User provides Use function for interact with products
type User interface {
Use() string
} |
package rpc
type RGBA struct {
R int32 `json:"r"`
G int32 `json:"g"`
B int32 `json:"b"`
A int32 `json:"a"`
}
func (rgba RGBA) ToRGBInt() int32 {
rgb := rgba.R<<16 + rgba.G<<8 + rgba.B
return rgb
}
func (rgba RGBA) ToRGB() (int32, int32, int32) {
return rgba.R, rgba.G, rgba.B
}
type Theme struct {
Accent *RGBA `json:"accent"`
ActiveGuide *RGBA `json:"active_guide"`
Bg *RGBA `json:"background"`
Fg *RGBA `json:"foreground"`
BracketContentsFg *RGBA `json:"bracket_contents_foreground"`
BracketsFg *RGBA `json:"brackets_foreground"`
Caret *RGBA `json:"caret"`
Gutter *RGBA `json:"gutter"`
LineHightlight *RGBA `json:"line_highlight"`
Misspelling *RGBA `json:"misspelling"`
Selection *RGBA `json:"selection"`
SelectionBorder *RGBA `json:"selection_border"`
// TODO: Add all and document
}
type ThemeChanged struct {
Name string `json:"name"`
Theme Theme `json:"theme"`
}
|
package mysqldb
import (
"context"
"time"
)
// QRCode 二维码
type QRCode struct {
SceneID int32 `gorm:"primary_key"` // 场景ID
RawURL string `gorm:"column:raw_url"` // 原始URL
Ticket string `gorm:"column:ticket"` // Ticket
Account string `gorm:"column:account"` // account
MachineUUID string `gorm:"column:machine_uuid"` // machine_uuid
OriginID string `gorm:"column:origin_id"` // originID
ExpiredAt time.Time // 二维码的过期时间
CreatedAt time.Time // 创建时间
UpdatedAt time.Time // 更新时间
DeletedAt *time.Time // 删除时间
}
// TableName 返回 QRCode 所在的表名
func (q QRCode) TableName() string {
return "wxmp_tmp_qrcode"
}
// CreateQRCode 创建二维码信息
func (db *DbClient) CreateQRCode(ctx context.Context, qrcode *QRCode) (*QRCode, error) {
if err := db.GetDB(ctx).Create(qrcode).Error; err != nil {
return nil, err
}
return qrcode, nil
}
// UpdateQRCode 更新二维码信息
func (db *DbClient) UpdateQRCode(ctx context.Context, qrcode *QRCode) error {
return db.GetDB(ctx).Model(&QRCode{}).Where("scene_id = ?", qrcode.SceneID).Updates(map[string]interface{}{
"raw_url": qrcode.RawURL,
"ticket": qrcode.Ticket,
"account": qrcode.Account,
"machine_uuid": qrcode.MachineUUID,
"expired_at": qrcode.ExpiredAt,
"ori_id": qrcode.OriginID,
"updated_at": qrcode.UpdatedAt,
}).Error
}
// FindQRCodeBySceneID 通过SceneID拿到QRCode
func (db *DbClient) FindQRCodeBySceneID(ctx context.Context, SceneID int32) (*QRCode, error) {
var qrcode QRCode
err := db.GetDB(ctx).First(&qrcode, "scene_id = ? ", SceneID).Error
if err != nil {
return nil, err
}
return &qrcode, nil
}
|
package cmd
import (
"fmt"
"os"
"sort"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var targetsPruneCmd = &cobra.Command{
Use: "prune <target> [<target>...]",
Short: "Prune target(s)",
Run: doTargetsPrune,
Args: cobra.MinimumNArgs(1),
}
var (
pruneNoTail bool
pruneByTag bool
pruneDryRun bool
)
func init() {
targetsCmd.AddCommand(targetsPruneCmd)
targetsPruneCmd.Flags().BoolVarP(&pruneNoTail, "no-tail", "", false, "Don't tail output of CI Job")
targetsPruneCmd.Flags().BoolVarP(&pruneByTag, "by-tag", "", false, "Prune all targets by tags instead of name")
targetsPruneCmd.Flags().BoolVarP(&pruneDryRun, "dryrun", "", false, "Only show what would be pruned")
}
func intersectionInSlices(list1, list2 []string) bool {
for _, a := range list1 {
for _, b := range list2 {
if b == a {
return true
}
}
}
return false
}
func sortedListsMatch(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
func doTargetsPrune(cmd *cobra.Command, args []string) {
factory := viper.GetString("factory")
targets, err := api.TargetsList(factory)
if err != nil {
fmt.Print("ERROR: ")
fmt.Println(err)
os.Exit(1)
}
var target_names []string
if pruneByTag {
sort.Strings(args)
target_names = make([]string, 0, 10)
for name, target := range targets.Signed.Targets {
custom, err := api.TargetCustom(target)
if err != nil {
fmt.Printf("ERROR: %s\n", err)
} else {
sort.Strings(custom.Tags)
if sortedListsMatch(args, custom.Tags) {
target_names = append(target_names, name)
}
}
}
} else {
for _, name := range args {
if _, ok := targets.Signed.Targets[name]; !ok {
fmt.Printf("Target(%s) not found in targets.json\n", name)
os.Exit(1)
}
}
target_names = args
}
fmt.Printf("Deleting targets:\n %s\n", strings.Join(target_names, "\n "))
if pruneDryRun {
fmt.Println("Dry run, exiting")
return
}
url, err := api.TargetDeleteTargets(factory, target_names)
if err != nil {
fmt.Printf("ERROR: %s\n", err)
os.Exit(1)
}
fmt.Printf("CI URL: %s\n", url)
if !pruneNoTail {
api.JobservTail(url)
}
}
|
package main
import (
"golang/helper"
"testing"
)
func TestRottingOranges(t *testing.T) {
input := [][]int{{2, 1, 1}, {1, 1, 0}, {0, 1, 1}}
helper.AssertInt(orangesRotting(input), 4, t)
input = [][]int{{2, 1, 1}, {0, 1, 1}, {1, 0, 1}}
helper.AssertInt(orangesRotting(input), -1, t)
input = [][]int{{0, 2}}
helper.AssertInt(orangesRotting(input), 0, t)
}
|
package full_test
import (
"testing"
"github.com/mkamadeus/cipher/cipher/vigenere/full"
)
func TestEncrypt(t *testing.T) {
plain := "thisisplaintext"
key := "sony"
expected := "DPYFNWKHRDCABKO"
encrypted := full.Encrypt(plain, key)
if encrypted != expected {
t.Fatalf("full vignere encryption failed, expected %v, found %v", expected, encrypted)
}
}
func TestEncrypt2(t *testing.T) {
plain := "t hi(*&"
key := "so(*&^ny"
expected := "DPY"
encrypted := full.Encrypt(plain, key)
if encrypted != expected {
t.Fatalf("full vignere encryption failed, expected %v, found %v", expected, encrypted)
}
}
func TestEncryptWithUpperCase(t *testing.T) {
plain := "dCode Vigenere automatically!#!#!#!#"
key := "KEY"
expected := "YMKYAMPVWNAZVNXIPTHUQCNHET"
encrypted := full.Encrypt(plain, key)
if encrypted != expected {
t.Fatalf("full vignere encryption failed, expected %v, found %v", expected, encrypted)
}
}
|
package oidc
import (
"time"
"github.com/google/uuid"
"github.com/mohae/deepcopy"
"github.com/ory/fosite"
"github.com/ory/fosite/handler/openid"
"github.com/ory/fosite/token/jwt"
)
// Session holds OpenID Connect 1.0 Session information.
type Session struct {
*openid.DefaultSession `json:"id_token"`
ChallengeID uuid.NullUUID `json:"challenge_id"`
KID string `json:"kid"`
ClientID string `json:"client_id"`
ExcludeNotBeforeClaim bool `json:"exclude_nbf_claim"`
AllowedTopLevelClaims []string `json:"allowed_top_level_claims"`
Extra map[string]any `json:"extra"`
}
// GetChallengeID returns the challenge id.
func (s *Session) GetChallengeID() uuid.NullUUID {
return s.ChallengeID
}
// GetJWTHeader returns the *jwt.Headers for the OAuth 2.0 JWT Profile Access Token.
func (s *Session) GetJWTHeader() (headers *jwt.Headers) {
headers = &jwt.Headers{
Extra: map[string]any{
JWTHeaderKeyType: JWTHeaderTypeValueAccessTokenJWT,
},
}
if len(s.KID) != 0 {
headers.Extra[JWTHeaderKeyIdentifier] = s.KID
}
return headers
}
// GetJWTClaims returns the jwt.JWTClaimsContainer for the OAuth 2.0 JWT Profile Access Tokens.
func (s *Session) GetJWTClaims() jwt.JWTClaimsContainer {
//nolint:prealloc
var (
allowed []string
amr bool
)
for _, cl := range s.AllowedTopLevelClaims {
switch cl {
case ClaimJWTID, ClaimIssuer, ClaimSubject, ClaimAudience, ClaimExpirationTime, ClaimNotBefore, ClaimIssuedAt, ClaimClientIdentifier, ClaimScopeNonStandard, ClaimExtra:
continue
case ClaimAuthenticationMethodsReference:
amr = true
continue
}
allowed = append(allowed, cl)
}
claims := &jwt.JWTClaims{
Subject: s.Subject,
ExpiresAt: s.GetExpiresAt(fosite.AccessToken),
IssuedAt: time.Now().UTC(),
Extra: map[string]any{},
}
if len(s.Extra) > 0 {
claims.Extra[ClaimExtra] = s.Extra
}
if s.DefaultSession != nil && s.DefaultSession.Claims != nil {
for _, allowedClaim := range allowed {
if cl, ok := s.DefaultSession.Claims.Extra[allowedClaim]; ok {
claims.Extra[allowedClaim] = cl
}
}
claims.Issuer = s.DefaultSession.Claims.Issuer
if amr && len(s.DefaultSession.Claims.AuthenticationMethodsReferences) != 0 {
claims.Extra[ClaimAuthenticationMethodsReference] = s.DefaultSession.Claims.AuthenticationMethodsReferences
}
}
if len(s.ClientID) != 0 {
claims.Extra[ClaimClientIdentifier] = s.ClientID
}
return claims
}
// GetIDTokenClaims returns the *jwt.IDTokenClaims for this session.
func (s *Session) GetIDTokenClaims() *jwt.IDTokenClaims {
if s.DefaultSession == nil {
return nil
}
return s.DefaultSession.Claims
}
// GetExtraClaims returns the Extra/Unregistered claims for this session.
func (s *Session) GetExtraClaims() map[string]any {
if s.DefaultSession != nil && s.DefaultSession.Claims != nil {
return s.DefaultSession.Claims.Extra
}
return s.Extra
}
// Clone copies the OpenIDSession to a new fosite.Session.
func (s *Session) Clone() fosite.Session {
if s == nil {
return nil
}
return deepcopy.Copy(s).(fosite.Session)
}
|
package main
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
)
// サーバーサイドのDBと仮定
var DB = map[string]string{
"TEST1Key": "TEST1Secret",
}
// サーバーサイドでクライアントから受け取った情報が正しいものか判定する関数
// 引数はクライアントから送られてきたデータというイメージ
func Server(apiKey, sign string, data []byte) {
// TEST1のSecretはDBから参照する
apiSecret := DB[apiKey]
// hmacのNew関数でapiSecretmを暗号化
h := hmac.New(sha256.New, []byte(apiSecret))
// 上記のhにクライアントから送られてきたdataを追加(dataはServer関数の第3引数)
h.Write(data)
// hをEncodeしてexpectedHMACに格納
expectedHMAC := hex.EncodeToString(h.Sum(nil))
// クライアントから送られてきたsign(Server関数の第二引数)とexpextedHMACが同じかチェックする、この場合trueになる
fmt.Println(sign == expectedHMAC)
}
func main() {
const apiKey = "TEST1Key"
const apiSecret = "TEST1Secret"
// byteの何らかのデータを送ると仮定
data := []byte("data")
h := hmac.New(sha256.New, []byte(apiSecret))
h.Write(data)
sign := hex.EncodeToString(h.Sum(nil))
fmt.Println(sign)
Server(apiKey, sign, data)
}
|
// go test -v -race
package main
import (
"os"
"os/exec"
"sync"
"testing"
)
func TestNotRace(t *testing.T) {
cl := []string{"sh", "-c", `for x in $(seq 3); do echo "$x loops"; sleep 1; done`}
wg := new(sync.WaitGroup)
run := func() {
defer wg.Done()
cmd := exec.Command(cl[0], cl[1:]...)
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
t.Error(err)
}
}
worker := 4
wg.Add(worker)
for i := 0; i < worker; i++ {
go run()
}
wg.Wait()
}
|
/*
* Copyright 2020 The Multicluster-Scheduler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"context"
"flag"
"log"
"time"
"admiralty.io/multicluster-controller/pkg/cluster"
mcmgr "admiralty.io/multicluster-controller/pkg/manager"
"admiralty.io/multicluster-service-account/pkg/config"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
vklog "github.com/virtual-kubelet/virtual-kubelet/log"
logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/klog"
"k8s.io/sample-controller/pkg/signals"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"admiralty.io/multicluster-scheduler/pkg/apis"
agentconfig "admiralty.io/multicluster-scheduler/pkg/config/agent"
"admiralty.io/multicluster-scheduler/pkg/controller"
"admiralty.io/multicluster-scheduler/pkg/controllers/chaperon"
"admiralty.io/multicluster-scheduler/pkg/controllers/feedback"
"admiralty.io/multicluster-scheduler/pkg/controllers/globalsvc"
"admiralty.io/multicluster-scheduler/pkg/controllers/svcreroute"
"admiralty.io/multicluster-scheduler/pkg/generated/clientset/versioned"
clientset "admiralty.io/multicluster-scheduler/pkg/generated/clientset/versioned"
informers "admiralty.io/multicluster-scheduler/pkg/generated/informers/externalversions"
"admiralty.io/multicluster-scheduler/pkg/generated/informers/externalversions/multicluster/v1alpha1"
"admiralty.io/multicluster-scheduler/pkg/vk/node"
"admiralty.io/multicluster-scheduler/pkg/webhooks/proxypod"
)
// TODO standardize logging
func main() {
stopCh := signals.SetupSignalHandler()
agentCfg := agentconfig.New()
cfg, _, err := config.ConfigAndNamespaceForKubeconfigAndContext("", "")
utilruntime.Must(err)
k, err := kubernetes.NewForConfig(cfg)
utilruntime.Must(err)
startOldStyleControllers(stopCh, agentCfg, cfg, k)
startWebhook(stopCh, agentCfg, cfg)
if len(agentCfg.Targets) > 0 || agentCfg.Raw.TargetSelf {
startControllers(stopCh, agentCfg, cfg)
startVirtualKubelet(stopCh, agentCfg, k)
}
<-stopCh
}
func startOldStyleControllers(stopCh <-chan struct{}, agentCfg agentconfig.Config, cfg *rest.Config, k *kubernetes.Clientset) {
customClient, err := versioned.NewForConfig(cfg)
utilruntime.Must(err)
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(k, time.Second*30)
customInformerFactory := informers.NewSharedInformerFactory(customClient, time.Second*30)
n := len(agentCfg.Targets)
if agentCfg.Raw.TargetSelf {
n++
}
targetCustomClients := make(map[string]clientset.Interface, n)
targetCustomInformerFactories := make(map[string]informers.SharedInformerFactory, n)
targetPodChaperonInformers := make(map[string]v1alpha1.PodChaperonInformer, n)
for _, target := range agentCfg.Targets {
c, err := versioned.NewForConfig(target.ClientConfig)
targetCustomClients[target.Name] = c
utilruntime.Must(err)
f := informers.NewSharedInformerFactoryWithOptions(c, time.Second*30, informers.WithNamespace(target.Namespace))
targetCustomInformerFactories[target.Name] = f
targetPodChaperonInformers[target.Name] = f.Multicluster().V1alpha1().PodChaperons()
}
if agentCfg.Raw.TargetSelf {
targetCustomClients[agentCfg.Raw.ClusterName] = customClient
targetPodChaperonInformers[agentCfg.Raw.ClusterName] = customInformerFactory.Multicluster().V1alpha1().PodChaperons()
}
podInformer := kubeInformerFactory.Core().V1().Pods()
podChaperonInformer := customInformerFactory.Multicluster().V1alpha1().PodChaperons()
chapCtrl := chaperon.NewController(k, customClient, podInformer, podChaperonInformer)
var feedbackCtrl *controller.Controller
if n > 0 {
feedbackCtrl = feedback.NewController(k, targetCustomClients, podInformer, targetPodChaperonInformers)
}
kubeInformerFactory.Start(stopCh)
customInformerFactory.Start(stopCh)
for _, f := range targetCustomInformerFactories {
f.Start(stopCh)
}
go func() {
if err = chapCtrl.Run(2, stopCh); err != nil {
klog.Fatalf("Error running controller: %s", err.Error())
}
}()
if feedbackCtrl != nil {
go func() {
if err = feedbackCtrl.Run(2, stopCh); err != nil {
klog.Fatalf("Error running controller: %s", err.Error())
}
}()
}
}
func startControllers(stopCh <-chan struct{}, agentCfg agentconfig.Config, cfg *rest.Config) {
m := mcmgr.New()
o := cluster.Options{}
resync := 30 * time.Second
o.Resync = &resync
src := cluster.New(agentCfg.Raw.ClusterName, cfg, cluster.Options{})
if err := apis.AddToScheme(src.GetScheme()); err != nil {
log.Fatalf("adding APIs to member cluster's scheme: %v", err)
}
sourceClusters := []*cluster.Cluster{src}
n := len(agentCfg.Targets)
if agentCfg.Raw.TargetSelf {
n++
}
targetClusters := make([]*cluster.Cluster, n)
for i, target := range agentCfg.Targets {
o := cluster.Options{}
o.Namespace = target.Namespace
o.Resync = &resync
t := cluster.New(target.Name, target.ClientConfig, o)
if err := apis.AddToScheme(t.GetScheme()); err != nil {
log.Fatalf("adding APIs to member cluster's scheme: %v", err)
}
targetClusters[i] = t
}
if agentCfg.Raw.TargetSelf {
targetClusters[n-1] = src
}
co, err := svcreroute.NewController(src)
if err != nil {
log.Fatalf("cannot create svcreroute controller: %v", err)
}
m.AddController(co)
co, err = globalsvc.NewController(sourceClusters, targetClusters)
if err != nil {
log.Fatalf("cannot create feedback controller: %v", err)
}
m.AddController(co)
go func() {
if err := m.Start(stopCh); err != nil {
log.Fatalf("while or after starting multi-cluster manager: %v", err)
}
}()
}
func startWebhook(stopCh <-chan struct{}, agentCfg agentconfig.Config, cfg *rest.Config) {
webhookMgr, err := manager.New(cfg, manager.Options{Port: agentCfg.Raw.Webhook.Port, CertDir: agentCfg.Raw.Webhook.CertDir, MetricsBindAddress: "0"})
utilruntime.Must(err)
hookServer := webhookMgr.GetWebhookServer()
hookServer.Register("/mutate-v1-pod", &webhook.Admission{Handler: &proxypod.Handler{}})
go func() {
utilruntime.Must(webhookMgr.Start(stopCh))
}()
}
func startVirtualKubelet(stopCh <-chan struct{}, agentCfg agentconfig.Config, k kubernetes.Interface) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopCh
cancel()
}()
var logLevel string
flag.StringVar(&logLevel, "log-level", "info", `set the log level, e.g. "debug", "info", "warn", "error"`)
klog.InitFlags(nil)
flag.Parse()
vklog.L = logruslogger.FromLogrus(logrus.NewEntry(logrus.StandardLogger()))
if logLevel != "" {
lvl, err := logrus.ParseLevel(logLevel)
if err != nil {
vklog.G(ctx).Fatal(errors.Wrap(err, "could not parse log level"))
}
logrus.SetLevel(lvl)
}
for _, target := range agentCfg.Targets {
n := "admiralty-" + target.Name
go func(nodeName string) {
if err := node.Run(ctx, node.Opts{NodeName: n}, k); err != nil && errors.Cause(err) != context.Canceled {
vklog.G(ctx).Fatal(err)
}
}(n)
}
if agentCfg.Raw.TargetSelf {
go func() {
if err := node.Run(ctx, node.Opts{NodeName: "admiralty-" + agentCfg.Raw.ClusterName}, k); err != nil && errors.Cause(err) != context.Canceled {
vklog.G(ctx).Fatal(err)
}
}()
}
}
|
package config
import "fmt"
// StorageConfig describes various configurations of a storage layer
type StorageConfig struct {
Postgres *PostgresConfig `yaml:"postgres"`
}
func (c *StorageConfig) validate() error {
var err error
if c.Postgres != nil {
err = c.Postgres.validate()
}
return err
}
// PostgresConfig describes configuration of PostgreSQL client
type PostgresConfig struct {
Endpoint string `yaml:"endpoint"`
User string `yaml:"user"`
Password string `yaml:"password"`
Database string `yaml:"database"`
}
func (pc *PostgresConfig) validate() error {
if pc.Endpoint == "" {
return fmt.Errorf("Wrong PostgresConfig.Endpoint value: %s", pc.Endpoint)
}
if pc.User == "" {
return fmt.Errorf("Wrong PostgresConfig.User value: %s", pc.User)
}
if pc.Password == "" {
return fmt.Errorf("Wrong PostgresConfig.Password value: %s", pc.Password)
}
if pc.Database == "" {
return fmt.Errorf("Wrong PostgresConfig.Database value: %s", pc.Password)
}
return nil
}
func (pc *PostgresConfig) URL() string {
// TODO: TLS
return fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", pc.User, pc.Password, pc.Endpoint, pc.Database)
}
|
package bot
import (
"fmt"
"strings"
"github.com/bwmarrin/discordgo"
)
type Command interface {
Run(bot *Bot, args string, message *discordgo.Message) Sendable
}
type CommandGroup map[string]Command
func (c CommandGroup) Run(
bot *Bot,
args string,
message *discordgo.Message,
) Sendable {
spaceIndex := strings.IndexRune(args, ' ')
var commandName, subArgs string
if spaceIndex == -1 {
commandName = args
subArgs = ""
} else {
commandName = args[:spaceIndex]
subArgs = args[spaceIndex+1:]
}
if command := c[commandName]; command != nil {
return command.Run(bot, subArgs, message)
}
var oneOf string
for name := range c {
oneOf += "\n- " + name
}
title := "Missing subcommand"
if args != "" {
title = fmt.Sprintf("Unknown subcommand: `%s`", commandName)
}
return ErrorMessage{
title,
fmt.Sprintf(
"Must be one of: %s\n",
oneOf,
),
}
}
|
package users
import (
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"strconv"
jwt "github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
. "go-sugar/config"
. "go-sugar/db"
"go-sugar/db/request"
)
// Columns
const (
ID string = "id"
Name string = "name"
Password string = "password"
Role string = "role"
Status string = "status"
Email string = "email"
Phone string = "phone"
CreatedAt string = "created_at"
UpdatedAt string = "updated_at"
DeletedAt string = "deleted_at"
)
// Repository User Repository
type Repository struct {
tableName string
salt string
secretForToken string
Context *gin.Context
expiresAt int64
}
// Repo users repository
var Repo = Repository{
tableName: Config.DB.Schema + ".users",
salt: "sweet_sugar_67n334g6",
secretForToken: "sweet_sugar_346436bb43gh463",
expiresAt: 60 * 60 * 24,
}
// GetAll Users
func (r *Repository) GetAll() []User {
Request := request.New(DB)
rows, err := Request.Select([]string{}).From(r.tableName).Query()
if err != nil {
return []User{}
}
return parseRows(rows)
}
// Create new User
func (r *Repository) Create(user *User) (*User, error) {
user.Password = r.CreateHash(user.Password)
str := `INSERT INTO ` + r.tableName + ` (role, email, phone, name, status, password) values(?, ?, ?, ?, ?, ?)`
result, err := DB.Exec(str, user.Role, user.Email, user.Phone, user.Name, user.Status, user.Password)
if err != nil {
fmt.Println(err)
return nil, err
}
if id, insertErr := result.LastInsertId(); insertErr == nil {
user.ID = int(id)
} else {
return nil, insertErr
}
return user, nil
}
// Validate return bool(valid or not) and ValidateError struct
func (r *Repository) Validate(user *User) (bool, ValidateError) {
valid := true
Request := request.New(DB)
id := strconv.Itoa(user.ID)
validateError := ValidateError{}
rows, err := Request.
Select([]string{}).
From(r.tableName).
Where(Request.NewCond(ID, "=", id)).
Where(Request.NewCond(Email, "=", user.Email)).
Where(Request.NewCond(Phone, "=", user.Phone)).
Query()
if err == nil {
selectedUsers := parseRows(rows)
for i := 0; i < len(selectedUsers); i++ {
current := selectedUsers[i]
if current.ID == user.ID {
validateError.ID = "User with this ID already exist"
validateError.AddToErrorMessage(validateError.ID)
valid = false
}
if current.Email == user.Email {
validateError.Email = "User with this email already exist"
validateError.AddToErrorMessage(validateError.Email)
valid = false
}
if current.Phone == user.Phone {
validateError.Phone = "User with this Phone already exist"
validateError.AddToErrorMessage(validateError.Phone)
valid = false
}
}
} else {
valid = false
validateError.ErrorMessage = err.Error()
}
return valid, validateError
}
// Update user in DB
func (r *Repository) Update(user *User) (bool, error) {
str := `UPDATE ` + r.tableName + ` SET name = ?, role = ?, status = ?, email = ?, phone = ? WHERE id = ?`
_, err := DB.Exec(str, user.Name, user.Role, user.Status, user.Email, user.Phone, user.ID)
if err != nil {
return false, err
}
return true, nil
}
// DeleteByID - remove user from DB
func (r *Repository) DeleteByID(id string) bool {
Request := request.New(DB)
str, sqlErr := Request.
Delete().
From(r.tableName).
Where(Request.NewCond(ID, "=", id)).
ToSQL()
if sqlErr != nil {
return false
}
_, err := DB.Exec(str)
if err != nil {
return false
}
return true
}
// FindByID - find user by ID
func (r *Repository) FindByID(id string) (*User, error) {
Request := request.New(DB)
var columns []string
rows, err := Request.
Select(columns).
From(r.tableName).
Where(Request.NewCond(ID, "=", id)).
Query()
if err == nil {
users := parseRows(rows)
if len(users) > 0 {
return &users[0], nil
}
}
return nil, errors.New("User not found")
}
// FindByEmail - find user by ID
func (r *Repository) FindByEmail(email string) (*User, error) {
Request := request.New(DB)
req := Request.Select([]string{}).
From(r.tableName).
Where(Request.NewCond(Email, "=", email))
rows, err := req.
Query()
if err == nil {
users := parseRows(rows)
if len(users) > 0 {
return &users[0], nil
}
}
return nil, errors.New("User not found")
}
// CreateHash return a hashed string
func (r *Repository) CreateHash(str string) string {
aStringToHash := []byte(str + r.salt)
sha1Bytes := sha1.Sum(aStringToHash)
encodedStr := hex.EncodeToString(sha1Bytes[:])
return encodedStr
}
// GetClaims return new claims with user
func (r *Repository) GetClaims(user *User) CustomClaims {
claims := CustomClaims{User: user}
claims.ExpiresAt = jwt.TimeFunc().Unix() + Repo.expiresAt
return claims
}
// CreateJWT return a JWT
func (r *Repository) CreateJWT(u *User) (string, error) {
claims := r.GetClaims(u)
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString([]byte(r.secretForToken))
return tokenString, err
}
// ParseJWT return a User
func (r *Repository) ParseJWT(tokenString string) (*User, error) {
claims := CustomClaims{}
_, err := jwt.ParseWithClaims(tokenString, &claims, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(r.secretForToken), nil
})
if err != nil {
return nil, err
}
return claims.User, nil
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//719. Find K-th Smallest Pair Distance
//Given an integer array, return the k-th smallest distance among all the pairs. The distance of a pair (A, B) is defined as the absolute difference between A and B.
//Example 1:
//Input:
//nums = [1,3,1]
//k = 1
//Output: 0
//Explanation:
//Here are all the pairs:
//(1,3) -> 2
//(1,1) -> 0
//(3,1) -> 2
//Then the 1st smallest distance pair is (1,1), and its distance is 0.
//Note:
//2 <= len(nums) <= 10000.
//0 <= nums[i] < 1000000.
//1 <= k <= len(nums) * (len(nums) - 1) / 2.
//func smallestDistancePair(nums []int, k int) int {
//}
// Time Is Money |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.