_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q16100 | GetKeys | train | func (r *FastHttpHeader) GetKeys() (value []string) {
addValue := func(k, v []byte) {
found := false
key := string(k)
for _, r := range value {
if key == r {
found = true
break
}
}
if !found {
value = append(value, key)
}
}
if !r.isResponse {
r.Source.(*FastHttpRequest).Original.Request.Header.VisitAll(addValue)
} else {
r.Source.(*FastHttpResponse).Original.Response.Header.VisitAll(addValue)
}
return
} | go | {
"resource": ""
} |
q16101 | Get | train | func (r *FastHttpHeader) Get(key string) (value []string) {
if !r.isResponse {
value = strings.Split(string(r.Source.(*FastHttpRequest).Original.Request.Header.Peek(key)), ",")
} else {
value = strings.Split(string(r.Source.(*FastHttpResponse).Original.Response.Header.Peek(key)), ",")
}
return
} | go | {
"resource": ""
} |
q16102 | SetStatus | train | func (r *FastHttpHeader) SetStatus(statusCode int) {
if r.isResponse {
r.Source.(*FastHttpResponse).Original.Response.SetStatusCode(statusCode)
}
} | go | {
"resource": ""
} |
q16103 | GetValues | train | func (f *FastHttpMultipartForm) GetValues() url.Values {
return url.Values(f.Form.Value)
} | go | {
"resource": ""
} |
q16104 | Render | train | func (acetmpl AceTemplate) Render(wr io.Writer, arg interface{}) error {
// We can redirect this render to another template if the arguments contain ace_content in them
if argmap, ok := arg.(map[string]interface{}); ok {
if acecontentraw, ok := argmap["ace-inner"]; ok {
acecontent := acecontentraw.(string)
newtemplatename := acetmpl.TemplateName + "-" + acecontent
// Now lookup the template again
if _, ok := acetmpl.engine.templatesByName[newtemplatename]; !ok {
if inner, ok := acetmpl.engine.templatesByName[acecontent]; !ok {
return fmt.Errorf("Inner content %s not found in ace templates", acecontent)
} else {
acetmpl.engine.templatesByName[newtemplatename] = &AceTemplate{
File: acetmpl.File,
Inner: inner.File,
engine: acetmpl.engine,
TemplateView: acetmpl.TemplateView}
}
}
return acetmpl.engine.templatesByName[newtemplatename].renderInternal(wr, arg)
}
}
return acetmpl.renderInternal(wr, arg)
} | go | {
"resource": ""
} |
q16105 | Init | train | func Init() {
// Read configuration.
var found bool
if Driver, found = revel.Config.String("db.driver"); !found {
revel.RevelLog.Fatal("db.driver not configured")
}
if Spec, found = revel.Config.String("db.spec"); !found {
revel.RevelLog.Fatal("db.spec not configured")
}
// Open a connection.
var err error
Db, err = sql.Open(Driver, Spec)
if err != nil {
revel.RevelLog.Fatal("Open database connection error", "error", err, "driver", Driver, "spec", Spec)
}
revel.OnAppStop(func() {
revel.RevelLog.Info("Closing the database (from module)")
if err := Db.Close(); err != nil {
revel.AppLog.Error("Failed to close the database", "error", err)
}
})
} | go | {
"resource": ""
} |
q16106 | InitDb | train | func InitDb(dbResult *DbGorp) error {
params := DbInfo{}
params.DbDriver = revel.Config.StringDefault("db.driver", "sqlite3")
params.DbHost = revel.Config.StringDefault("db.host", "localhost")
if params.DbDriver == "sqlite3" && params.DbHost == "localhost" {
params.DbHost = "/tmp/app.db"
}
params.DbUser = revel.Config.StringDefault("db.user", "default")
params.DbPassword = revel.Config.StringDefault("db.password", "")
params.DbName = revel.Config.StringDefault("db.name", "default")
params.DbConnection = revel.Config.StringDefault("db.connection", "")
params.DbSchema = revel.Config.StringDefault("db.schema", "")
dbResult.Info = ¶ms
return dbResult.InitDb(true)
} | go | {
"resource": ""
} |
q16107 | validToken | train | func validToken(token string, isSameOrigin, foundToken bool, c *revel.Controller) (result bool) {
// Token wasn't present at all
if !foundToken {
c.Result = c.Forbidden("REVEL CSRF: Session token missing.")
return
}
// Same origin
if !isSameOrigin {
c.Result = c.Forbidden("REVEL CSRF: Same origin mismatch.")
return
}
var requestToken string
// First check for token in post data
if c.Request.Method == "POST" {
requestToken = c.Params.Get("csrftoken")
}
// Then check for token in custom headers, as with AJAX
if requestToken == "" {
requestToken = c.Request.GetHttpHeader("X-CSRFToken")
}
if requestToken == "" || !compareToken(requestToken, token) {
c.Result = c.Forbidden("REVEL CSRF: Invalid token.")
return
}
return true
} | go | {
"resource": ""
} |
q16108 | getFullRequestURL | train | func getFullRequestURL(c *revel.Controller) (requestUrl *url.URL) {
requestUrl = c.Request.URL
c.Log.Debug("Using ", "request url host", requestUrl.Host, "request host", c.Request.Host, "cookie domain", revel.CookieDomain)
// Update any of the information based on the headers
if host := c.Request.GetHttpHeader("X-Forwarded-Host"); host != "" {
requestUrl.Host = strings.ToLower(host)
}
if scheme := c.Request.GetHttpHeader("X-Forwarded-Proto"); scheme != "" {
requestUrl.Scheme = strings.ToLower(scheme)
}
if scheme := c.Request.GetHttpHeader("X-Forwarded-Scheme"); scheme != "" {
requestUrl.Scheme = strings.ToLower(scheme)
}
// Use the revel.CookieDomain for the hostname, or the c.Request.Host
if requestUrl.Host == "" {
host := revel.CookieDomain
if host == "" && c.Request.Host != "" {
host = c.Request.Host
// Slice off any port information.
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
}
requestUrl.Host = host
}
// If no scheme found in headers use the revel server settings
if requestUrl.Scheme == "" {
// Fix the Request.URL, it is missing information, go http server does this
if revel.HTTPSsl {
requestUrl.Scheme = "https"
} else {
requestUrl.Scheme = "http"
}
fixedUrl := requestUrl.Scheme + "://" + c.Request.Host + c.Request.URL.Path
if purl, err := url.Parse(fixedUrl); err == nil {
requestUrl = purl
}
}
c.Log.Debug("getFullRequestURL ", "requesturl", requestUrl.String())
return
} | go | {
"resource": ""
} |
q16109 | compareToken | train | func compareToken(requestToken, token string) bool {
// ConstantTimeCompare will panic if the []byte aren't the same length
if len(requestToken) != len(token) {
return false
}
return subtle.ConstantTimeCompare([]byte(requestToken), []byte(token)) == 1
} | go | {
"resource": ""
} |
q16110 | sameOrigin | train | func sameOrigin(u1, u2 *url.URL) bool {
return u1.Scheme == u2.Scheme && u1.Hostname() == u2.Hostname()
} | go | {
"resource": ""
} |
q16111 | init | train | func init() {
revel.TemplateFuncs["csrftoken"] = func(viewArgs map[string]interface{}) template.HTML {
if tokenFunc, ok := viewArgs["_csrftoken"]; !ok {
panic("REVEL CSRF: _csrftoken missing from ViewArgs.")
} else {
return template.HTML(tokenFunc.(string))
}
}
} | go | {
"resource": ""
} |
q16112 | NewAdapter | train | func NewAdapter(params gormdb.DbInfo) *Adapter {
a := &Adapter{}
gormdb.InitDBWithParameters(params)
a.db = gormdb.DB
return a
} | go | {
"resource": ""
} |
q16113 | LoadPolicy | train | func (a *Adapter) LoadPolicy(model model.Model) error {
var lines []Line
err := a.db.Find(&lines).Error
if err != nil {
return err
}
for _, line := range lines {
loadPolicyLine(line, model)
}
return nil
} | go | {
"resource": ""
} |
q16114 | SavePolicy | train | func (a *Adapter) SavePolicy(model model.Model) error {
a.dropTable()
a.createTable()
for ptype, ast := range model["p"] {
for _, rule := range ast.Policy {
line := savePolicyLine(ptype, rule)
err := a.db.Create(&line).Error
if err != nil {
return err
}
}
}
for ptype, ast := range model["g"] {
for _, rule := range ast.Policy {
line := savePolicyLine(ptype, rule)
err := a.db.Create(&line).Error
if err != nil {
return err
}
}
}
return nil
} | go | {
"resource": ""
} |
q16115 | NewGracefulListener | train | func NewGracefulListener(ln net.Listener, maxWaitTime time.Duration) net.Listener {
return &GracefulListener{
ln: ln,
maxWaitTime: maxWaitTime,
done: make(chan struct{}),
}
} | go | {
"resource": ""
} |
q16116 | Close | train | func (ln *GracefulListener) Close() error {
err := ln.ln.Close()
if err != nil {
return nil
}
return ln.waitForZeroConns()
} | go | {
"resource": ""
} |
q16117 | Render | train | func (tmpl PongoTemplate) Render(wr io.Writer, arg interface{}) (err error) {
gls.With(gls.Values(map[interface{}]interface{}{"data": arg}), func() {
err = tmpl.template.ExecuteWriter(p2.Context(arg.(map[string]interface{})), wr)
if nil != err {
if e, ok := err.(*p2.Error); ok {
rerr := &revel.Error{
Title: "Template Execution Error",
Path: tmpl.TemplateName,
Description: e.Error(),
Line: e.Line,
}
if revel.DevMode {
rerr.SourceLines = tmpl.Content()
}
err = rerr
}
}
})
return err
} | go | {
"resource": ""
} |
q16118 | NewDbWorker | train | func NewDbWorker(db *DbGorp, workInfo DbWorkInfo, numWorkers int) (container *DbWorkerContainer) {
container = &DbWorkerContainer{
SharedWorker: SharedWorker{
InputChannel: make(chan interface{}, numWorkers),
OutputChannel: make(chan interface{}, numWorkers),
ControlChannel: make(chan func() (WorkerPhase, *DbWorker), numWorkers),
workInfo: workInfo,
},
NumWorkers: numWorkers,
Db: db,
StartWorkTimeout: 0,
LongWorkTimeout: 0,
}
return
} | go | {
"resource": ""
} |
q16119 | startWorker | train | func startWorker(container *DbWorkerContainer, db *DbGorp, id int) {
newDb, _ := db.CloneDb(true)
worker := &DbWorker{
Db: newDb,
Id: id,
SharedData: map[string]interface{}{},
SharedWorker: SharedWorker{
workInfo: container.workInfo,
InputChannel: container.InputChannel,
OutputChannel: container.OutputChannel,
ControlChannel: container.ControlChannel,
},
}
// Close the database after worker has ended (Start returned
defer worker.Db.Close()
container.mutex.Lock()
container.Workers = append(container.Workers, worker)
container.mutex.Unlock()
// Only monitor jobs if Status function defined and a timeout is also defined
if container.LongWorkTimeout > 0 {
worker.TimeoutChannel = make(chan *timeoutInfo)
go worker.TimeInfo.start(worker.TimeoutChannel, container.LongWorkTimeout)
}
worker.start()
} | go | {
"resource": ""
} |
q16120 | start | train | func (worker *DbWorker) start() {
worker.workInfo.Status(Start, worker)
worker.ControlChannel <- func() (WorkerPhase, *DbWorker) { return Start, worker }
for job := range worker.InputChannel {
worker.invoke(job)
}
worker.workInfo.Status(Stop, worker)
worker.ControlChannel <- func() (WorkerPhase, *DbWorker) { return Stop, worker }
if worker.TimeoutChannel != nil {
close(worker.TimeoutChannel)
}
} | go | {
"resource": ""
} |
q16121 | invoke | train | func (worker *DbWorker) invoke(job interface{}) {
defer func() {
if err := recover(); err != nil {
trace := make([]byte, 1024)
count := runtime.Stack(trace, true)
moduleLogger.Error("Recover from panic: ", "error", err)
moduleLogger.Error("Stack", "size", count, "trace", string(trace))
}
}()
// Setup the timeout information
if worker.TimeoutChannel != nil {
worker.TimeInfo = &timeoutInfo{worker: worker, started: time.Now(), state: StartJob}
worker.TimeoutChannel <- worker.TimeInfo
}
worker.workInfo.Work(job, worker)
if worker.TimeoutChannel != nil {
worker.TimeInfo.state = EndJob
worker.TimeoutChannel <- worker.TimeInfo
}
} | go | {
"resource": ""
} |
q16122 | MakeCallback | train | func MakeCallback(status func(phase WorkerPhase, worker *DbWorker), work func(value interface{}, worker *DbWorker)) DbWorkInfo {
return &DbCallbackImplied{StatusFn: status, WorkFn: work}
} | go | {
"resource": ""
} |
q16123 | Status | train | func (dbCallback *DbCallbackImplied) Status(phase WorkerPhase, worker *DbWorker) {
if dbCallback.StatusFn != nil {
dbCallback.StatusFn(phase, worker)
}
} | go | {
"resource": ""
} |
q16124 | Work | train | func (dbCallback *DbCallbackImplied) Work(value interface{}, worker *DbWorker) {
dbCallback.WorkFn(value, worker)
} | go | {
"resource": ""
} |
q16125 | start | train | func (_ *timeoutInfo) start(TimeoutChannel chan *timeoutInfo, timeout int64) {
for j := range TimeoutChannel {
j.started = time.Now()
j.state = StartJob
j.worker.workInfo.Status(j.state, j.worker)
for {
select {
case complete, ok := <-TimeoutChannel:
if !ok {
// Channel closed returning...
return
}
// Received new State, record and loop
complete.worker.workInfo.Status(complete.state, complete.worker)
break
case <-time.After(time.Second * time.Duration(timeout)):
j.worker.workInfo.Status(JobLongrunning, j.worker)
}
}
}
} | go | {
"resource": ""
} |
q16126 | CloneDb | train | func (dbGorp *DbGorp) CloneDb(open bool) (newDb *DbGorp, err error) {
dbInfo := *dbGorp.Info
newDb = &DbGorp{Info: &dbInfo}
newDb.dbInitFn = dbGorp.dbInitFn
err = newDb.InitDb(open)
return
} | go | {
"resource": ""
} |
q16127 | dbInit | train | func (dbGorp *DbGorp) dbInit() (err error) {
if dbGorp.dbInitFn != nil {
err = dbGorp.dbInitFn(dbGorp)
}
return
} | go | {
"resource": ""
} |
q16128 | SetDbInit | train | func (dbGorp *DbGorp) SetDbInit(dbInitFn func(dbMap *DbGorp) error) (err error) {
dbGorp.dbInitFn = dbInitFn
return dbGorp.dbInit()
} | go | {
"resource": ""
} |
q16129 | RenderAceTemplate | train | func (c *AceController) RenderAceTemplate(base, inner string) revel.Result {
c.ViewArgs["ace-inner"] = inner
return c.RenderTemplate(base)
} | go | {
"resource": ""
} |
q16130 | Begin | train | func (c *TxnController) Begin() revel.Result {
txn := gormdb.DB.Begin()
if txn.Error != nil {
c.Log.Panic("Transaction begine error", "error", txn.Error)
}
c.Txn = txn
return nil
} | go | {
"resource": ""
} |
q16131 | Commit | train | func (c *TxnController) Commit() revel.Result {
if c.Txn == nil {
return nil
}
c.Txn.Commit()
if c.Txn.Error != nil && c.Txn.Error != sql.ErrTxDone {
fmt.Println(c.Txn.Error)
panic(c.Txn.Error)
}
c.Txn = nil
return nil
} | go | {
"resource": ""
} |
q16132 | NewRelicFilter | train | func NewRelicFilter(c *revel.Controller, fc []revel.Filter) {
if nr, ok := revel.CurrentEngine.Engine().(*ServerNewRelic); ok {
if nr.NewRelicApp != nil {
txn := nr.NewRelicApp.StartTransaction(c.Action,
c.Response.Out.Server.(*revel.GoResponse).Original,
c.Request.In.(*revel.GoRequest).Original)
defer txn.End()
} else {
serverLog.Error("Newrelic application not initialized before filter called")
}
}
fc[0](c, fc[1:])
} | go | {
"resource": ""
} |
q16133 | ServeDir | train | func (c Static) ServeDir(prefix, filepath string) revel.Result {
// Fix for #503.
prefix = c.Params.Fixed.Get("prefix")
if prefix == "" {
return c.NotFound("")
}
return serve(c, prefix, filepath, true)
} | go | {
"resource": ""
} |
q16134 | processDir | train | func (c *Static) processDir(fullPath, basePath string) (args map[string]interface{}, err error) {
dirName := fpath.Base(fullPath)
args = map[string]interface{}{"dirName": dirName}
// Walk the folder showing up and down links
dirFiles := []model.FileInformation{}
symLinkPath, e := fpath.EvalSymlinks(fullPath)
if e != nil {
return args, e
}
// Get directory contents
files, e := ioutil.ReadDir(fullPath)
if e != nil {
return nil, e
}
if fullPath != basePath {
fileInfo := model.FileInformation{Icon: UP_DIR_ICON, Name: c.Message("static\\parent directory"), Relative: "../"}
dirFiles = append(dirFiles, fileInfo)
}
for _, f := range files {
fileInfo := model.FileInformation{Name: f.Name()}
if f.IsDir() {
fileInfo.Icon = DIR_ICON
// Check that it is not a symnlink
realFullPath, _ := fpath.EvalSymlinks(fpath.Join(fullPath, f.Name()))
if strings.HasPrefix(realFullPath, symLinkPath) {
// Valid to drill into
fileInfo.Relative = f.Name() + "/"
}
} else {
fileInfo.Icon = FILE_ICON
size := "bytes"
divider := int64(1)
fileInfo.Size = f.Size()
for x := 0; fileInfo.Size > byteSizeList[x][1].(int64); x++ {
size = byteSizeList[x][0].(string)
divider = byteSizeList[x][1].(int64)
}
fileInfo.Size = fileInfo.Size / divider
fileInfo.SizeType = size
fileInfo.NiceSize = fmt.Sprintf("%0.1d %s", fileInfo.Size, size)
fileInfo.Relative = fileInfo.Name
}
modified := f.ModTime()
fileInfo.Modified = &modified
dirFiles = append(dirFiles, fileInfo)
}
args["content"] = dirFiles
args["count"] = len(dirFiles)
args["dateformat"] = revel.Config.StringDefault("static.dateformat", "2006-01-02 15:04:05 MST")
return
} | go | {
"resource": ""
} |
q16135 | S3Endpoint | train | func (c *S3Cli) S3Endpoint() string {
if c.Host == "" {
return ""
}
if c.Port != 0 {
return fmt.Sprintf("%s:%d", c.Host, c.Port)
}
return c.Host
} | go | {
"resource": ""
} |
q16136 | AssertPutOptionsApplied | train | func AssertPutOptionsApplied(s3CLIPath string, cfg *config.S3Cli) {
expectedString := GenerateRandomString()
s3Filename := GenerateRandomString()
configPath := MakeConfigFile(cfg)
defer func() { _ = os.Remove(configPath) }()
contentFile := MakeContentFile(expectedString)
defer func() { _ = os.Remove(contentFile) }()
configFile, err := os.Open(configPath)
Expect(err).ToNot(HaveOccurred())
s3CLISession, err := RunS3CLI(s3CLIPath, configPath, "put", contentFile, s3Filename)
s3Config, err := config.NewFromReader(configFile)
Expect(err).ToNot(HaveOccurred())
client, _ := client.NewSDK(s3Config)
resp, err := client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(cfg.BucketName),
Key: aws.String(s3Filename),
})
Expect(err).ToNot(HaveOccurred())
Expect(s3CLISession.ExitCode()).To(BeZero())
if cfg.ServerSideEncryption == "" {
Expect(resp.ServerSideEncryption).To(BeNil())
} else {
Expect(*resp.ServerSideEncryption).To(Equal(cfg.ServerSideEncryption))
}
} | go | {
"resource": ""
} |
q16137 | AssertGetNonexistentFails | train | func AssertGetNonexistentFails(s3CLIPath string, cfg *config.S3Cli) {
configPath := MakeConfigFile(cfg)
defer func() { _ = os.Remove(configPath) }()
s3CLISession, err := RunS3CLI(s3CLIPath, configPath, "get", "non-existent-file", "/dev/null")
Expect(err).ToNot(HaveOccurred())
Expect(s3CLISession.ExitCode()).ToNot(BeZero())
Expect(s3CLISession.Err.Contents()).To(ContainSubstring("NoSuchKey"))
} | go | {
"resource": ""
} |
q16138 | New | train | func New(s3Client *s3.S3, s3cliConfig *config.S3Cli) (S3Blobstore, error) {
return S3Blobstore{s3Client: s3Client, s3cliConfig: s3cliConfig}, nil
} | go | {
"resource": ""
} |
q16139 | Get | train | func (client *S3Blobstore) Get(src string, dest io.WriterAt) error {
downloader := s3manager.NewDownloaderWithClient(client.s3Client)
_, err := downloader.Download(dest, &s3.GetObjectInput{
Bucket: aws.String(client.s3cliConfig.BucketName),
Key: client.key(src),
})
if err != nil {
return err
}
return nil
} | go | {
"resource": ""
} |
q16140 | Put | train | func (client *S3Blobstore) Put(src io.ReadSeeker, dest string) error {
cfg := client.s3cliConfig
if cfg.CredentialsSource == config.NoneCredentialsSource {
return errorInvalidCredentialsSourceValue
}
uploader := s3manager.NewUploaderWithClient(client.s3Client, func(u *s3manager.Uploader) {
u.LeavePartsOnError = false
if !cfg.MultipartUpload {
// disable multipart uploads by way of large PartSize configuration
u.PartSize = oneTB
}
})
uploadInput := &s3manager.UploadInput{
Body: src,
Bucket: aws.String(cfg.BucketName),
Key: client.key(dest),
}
if cfg.ServerSideEncryption != "" {
uploadInput.ServerSideEncryption = aws.String(cfg.ServerSideEncryption)
}
if cfg.SSEKMSKeyID != "" {
uploadInput.SSEKMSKeyId = aws.String(cfg.SSEKMSKeyID)
}
retry := 0
maxRetries := 3
for {
putResult, err := uploader.Upload(uploadInput)
if err != nil {
if _, ok := err.(s3manager.MultiUploadFailure); ok {
if retry == maxRetries {
log.Println("Upload retry limit exceeded:", err.Error())
return fmt.Errorf("upload retry limit exceeded: %s", err.Error())
}
retry++
time.Sleep(time.Second * time.Duration(retry))
continue
}
log.Println("Upload failed:", err.Error())
return fmt.Errorf("upload failure: %s", err.Error())
}
log.Println("Successfully uploaded file to", putResult.Location)
return nil
}
} | go | {
"resource": ""
} |
q16141 | Delete | train | func (client *S3Blobstore) Delete(dest string) error {
if client.s3cliConfig.CredentialsSource == config.NoneCredentialsSource {
return errorInvalidCredentialsSourceValue
}
deleteParams := &s3.DeleteObjectInput{
Bucket: aws.String(client.s3cliConfig.BucketName),
Key: client.key(dest),
}
_, err := client.s3Client.DeleteObject(deleteParams)
if err == nil {
return nil
}
if reqErr, ok := err.(awserr.RequestFailure); ok {
if reqErr.StatusCode() == 404 {
return nil
}
}
return err
} | go | {
"resource": ""
} |
q16142 | Exists | train | func (client *S3Blobstore) Exists(dest string) (bool, error) {
existsParams := &s3.HeadObjectInput{
Bucket: aws.String(client.s3cliConfig.BucketName),
Key: client.key(dest),
}
_, err := client.s3Client.HeadObject(existsParams)
if err == nil {
log.Printf("File '%s' exists in bucket '%s'\n", dest, client.s3cliConfig.BucketName)
return true, nil
}
if reqErr, ok := err.(awserr.RequestFailure); ok {
if reqErr.StatusCode() == 404 {
log.Printf("File '%s' does not exist in bucket '%s'\n", dest, client.s3cliConfig.BucketName)
return false, nil
}
}
return false, err
} | go | {
"resource": ""
} |
q16143 | PackageXml | train | func (pb PackageBuilder) PackageXml() []byte {
p := createPackage()
for _, metaType := range pb.Metadata {
p.Types = append(p.Types, metaType)
}
byteXml, _ := xml.MarshalIndent(p, "", " ")
byteXml = append([]byte(xml.Header), byteXml...)
//if err := ioutil.WriteFile("mypackage.xml", byteXml, 0644); err != nil {
//ErrorAndExit(err.Error())
//}
return byteXml
} | go | {
"resource": ""
} |
q16144 | ForceMetadataFiles | train | func (pb *PackageBuilder) ForceMetadataFiles() ForceMetadataFiles {
pb.Files["package.xml"] = pb.PackageXml()
return pb.Files
} | go | {
"resource": ""
} |
q16145 | MetaPathToSourcePath | train | func MetaPathToSourcePath(mpath string) (spath string) {
spath = strings.TrimSuffix(mpath, "-meta.xml")
if spath == mpath {
return
}
_, err := os.Stat(spath)
if err != nil {
spath = mpath
}
return
} | go | {
"resource": ""
} |
q16146 | AddFile | train | func (pb *PackageBuilder) AddFile(fpath string) (fname string, err error) {
fpath, err = filepath.Abs(fpath)
if err != nil {
return
}
_, err = os.Stat(fpath)
if err != nil {
return
}
isDestructiveChanges, err := regexp.MatchString("destructiveChanges(Pre|Post)?"+regexp.QuoteMeta(".")+"xml", fpath)
if err != nil {
return
}
fpath = MetaPathToSourcePath(fpath)
metaName, fname := getMetaTypeFromPath(fpath)
if !isDestructiveChanges && !strings.HasSuffix(fpath, "-meta.xml") {
pb.AddMetaToPackage(metaName, fname)
}
// If it's a push, we want to actually add the files
if pb.IsPush {
if isDestructiveChanges {
err = pb.addDestructiveChanges(fpath)
} else {
err = pb.addFileToWorkingDir(metaName, fpath)
}
}
return
} | go | {
"resource": ""
} |
q16147 | AddDirectory | train | func (pb *PackageBuilder) AddDirectory(fpath string) (namePaths map[string]string, badPaths []string, err error) {
namePaths = make(map[string]string)
files, err := ioutil.ReadDir(fpath)
if err != nil {
badPaths = append(badPaths, fpath)
return
}
for _, f := range files {
dirOrFilePath := fpath + "/" + f.Name()
if f.IsDir() {
dirNamePaths, dirBadPath, err := pb.AddDirectory(dirOrFilePath)
if err != nil {
badPaths = append(badPaths, dirBadPath...)
} else {
for dirContentName, dirContentPath := range dirNamePaths {
namePaths[dirContentName] = dirContentPath
}
}
}
name, err := pb.AddFile(dirOrFilePath)
if (err != nil) || (name == "") {
badPaths = append(badPaths, dirOrFilePath)
} else {
namePaths[name] = dirOrFilePath
}
}
return
} | go | {
"resource": ""
} |
q16148 | addFileToWorkingDir | train | func (pb *PackageBuilder) addFileToWorkingDir(metaName string, fpath string) (err error) {
// Get relative dir from source
srcDir := filepath.Dir(filepath.Dir(fpath))
for _, mp := range metapaths {
if metaName == mp.name && mp.hasFolder {
srcDir = filepath.Dir(srcDir)
}
}
frel, _ := filepath.Rel(srcDir, fpath)
// Try to find meta file
hasMeta := true
fmeta := fpath + "-meta.xml"
fmetarel := ""
if _, err = os.Stat(fmeta); err != nil {
if os.IsNotExist(err) {
hasMeta = false
} else {
// Has error
return
}
} else {
// Should be present since we worked back to srcDir
fmetarel, _ = filepath.Rel(srcDir, fmeta)
}
fdata, err := ioutil.ReadFile(fpath)
if err != nil {
return
}
pb.Files[frel] = fdata
if hasMeta {
fdata, err = ioutil.ReadFile(fmeta)
pb.Files[fmetarel] = fdata
return
}
return
} | go | {
"resource": ""
} |
q16149 | AddMetaToPackage | train | func (pb *PackageBuilder) AddMetaToPackage(metaName string, name string) {
mt := pb.Metadata[metaName]
if mt.Name == "" {
mt.Name = metaName
}
if !pb.contains(mt.Members, name) {
mt.Members = append(mt.Members, name)
pb.Metadata[metaName] = mt
}
} | go | {
"resource": ""
} |
q16150 | getMetaTypeFromPath | train | func getMetaTypeFromPath(fpath string) (metaName string, name string) {
fpath, err := filepath.Abs(fpath)
if err != nil {
ErrorAndExit("Cound not find " + fpath)
}
if _, err := os.Stat(fpath); err != nil {
ErrorAndExit("Cound not open " + fpath)
}
// Get the metadata type and name for the file
metaName, fileName := getMetaForPath(fpath)
name = strings.TrimSuffix(fileName, filepath.Ext(fileName))
//name = strings.TrimSuffix(name, filepath.Ext(name))
return
} | go | {
"resource": ""
} |
q16151 | getMetaForPath | train | func getMetaForPath(path string) (metaName string, objectName string) {
parentDir := filepath.Dir(path)
parentName := filepath.Base(parentDir)
grandparentName := filepath.Base(filepath.Dir(parentDir))
fileName := filepath.Base(path)
for _, mp := range metapaths {
if mp.hasFolder && grandparentName == mp.path {
metaName = mp.name
if mp.onlyFolder {
objectName = parentName
} else {
objectName = parentName + "/" + fileName
}
return
}
if mp.path == parentName {
metaName = mp.name
objectName = fileName
return
}
}
// Unknown, so use path
metaName = parentName
objectName = fileName
return
} | go | {
"resource": ""
} |
q16152 | getMDTypeFromXml | train | func getMDTypeFromXml(path string) (mdtype string, err error) {
xmlFile, err := ioutil.ReadFile(path)
mdtype = getFirstXmlElement(xmlFile)
return
} | go | {
"resource": ""
} |
q16153 | getFirstXmlElement | train | func getFirstXmlElement(xmlFile []byte) (firstElement string) {
decoder := xml.NewDecoder(strings.NewReader(string(xmlFile)))
for {
token, _ := decoder.Token()
if token == nil {
break
}
switch startElement := token.(type) {
case xml.StartElement:
firstElement = startElement.Name.Local
return
}
}
return
} | go | {
"resource": ""
} |
q16154 | findMetadataTypeFolder | train | func findMetadataTypeFolder(mdtype string, root string) (folder string) {
filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
firstEl, _ := getMDTypeFromXml(path)
if firstEl == mdtype {
// This is sufficient for MD that does not have sub folders (classes, pages, etc)
// It is NOT sufficient for aura bundles
if mdtype == "AuraDefinitionBundle" {
// Need the parent of this folder to get all aura bundles in the directory
folder = filepath.Dir(filepath.Dir(path))
} else {
folder = filepath.Dir(path)
}
return errors.New("walk canceled")
}
return nil
})
return
} | go | {
"resource": ""
} |
q16155 | zipResource | train | func zipResource(path string, topLevelFolder string) {
zipfile := new(bytes.Buffer)
zipper := zip.NewWriter(zipfile)
startPath := path + "/"
filepath.Walk(path, func(path string, f os.FileInfo, err error) error {
if strings.ToLower(filepath.Base(path)) != ".ds_store" {
// Can skip dirs since the dirs will be created when the files are added
if !f.IsDir() {
file, err := ioutil.ReadFile(path)
if err != nil {
return err
}
fl, err := zipper.Create(filepath.Join(topLevelFolder, strings.Replace(path, startPath, "", -1)))
if err != nil {
ErrorAndExit(err.Error())
}
_, err = fl.Write([]byte(file))
if err != nil {
ErrorAndExit(err.Error())
}
}
}
return nil
})
zipper.Close()
zipdata := zipfile.Bytes()
ioutil.WriteFile(path+".resource", zipdata, 0644)
return
} | go | {
"resource": ""
} |
q16156 | ForceSaveLogin | train | func ForceSaveLogin(creds ForceSession, output *os.File) (sessionName string, err error) {
userinfo, err := getUserInfo(creds)
if err != nil {
return
}
creds.UserInfo = &userinfo
creds.SessionOptions.ApiVersion = ApiVersionNumber()
fmt.Fprintf(output, "Logged in as '%s' (API %s)\n", creds.UserInfo.UserName, ApiVersionNumber())
if err = SaveLogin(creds); err != nil {
return
}
sessionName = creds.SessionName()
err = SetActiveLogin(sessionName)
return
} | go | {
"resource": ""
} |
q16157 | upgradeCredentials | train | func upgradeCredentials(creds *ForceSession) (err error) {
if creds.SessionOptions != nil && creds.UserInfo != nil {
return
}
if creds.SessionOptions == nil {
creds.SessionOptions = &SessionOptions{
ApiVersion: ApiVersionNumber(),
}
if creds.RefreshToken != "" {
creds.SessionOptions.RefreshMethod = RefreshOauth
}
}
if creds.UserInfo == nil || creds.UserInfo.UserName == "" {
force := NewForce(creds)
err = force.RefreshSession()
if err != nil {
return
}
var userinfo UserInfo
userinfo, err = getUserInfo(*creds)
if err != nil {
return
}
creds.UserInfo = &userinfo
_, err = ForceSaveLogin(*creds, os.Stderr)
}
return
} | go | {
"resource": ""
} |
q16158 | getAttributes | train | func getAttributes(m interface{}) map[string]reflect.StructField {
typ := reflect.TypeOf(m)
// if a pointer to a struct is passed, get the type of the dereferenced object
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
// create an attribute data structure as a map of types keyed by a string.
attrs := make(map[string]reflect.StructField)
// Only structs are supported so return an empty result if the passed object
// isn't a struct
if typ.Kind() != reflect.Struct {
fmt.Printf("%v type can't have attributes inspected\n", typ.Kind())
return attrs
}
// loop through the struct's fields and set the map
for i := 0; i < typ.NumField(); i++ {
p := typ.Field(i)
if !p.Anonymous {
attrs[strings.ToLower(p.Name)] = p
}
}
return attrs
} | go | {
"resource": ""
} |
q16159 | PushByPaths | train | func PushByPaths(fpaths []string, byName bool, namePaths map[string]string, opts *ForceDeployOptions) {
pb := NewPushBuilder()
var badPaths []string
for _, fpath := range fpaths {
fi, err := os.Stat(fpath)
if err != nil {
fmt.Println(err)
badPaths = append(badPaths, fpath)
continue
}
mode := fi.Mode()
//If path provided is dir we are adding all containing files to deployment
if mode.IsDir() {
dirNamePaths, dirBadPath, err := pb.AddDirectory(fpath)
if err != nil {
fmt.Println(err.Error())
badPaths = append(badPaths, dirBadPath...)
} else {
for dirContentName, dirContentPath := range dirNamePaths {
namePaths[dirContentName] = dirContentPath
}
}
} else if mode.IsRegular() { // single file processing
name, err := pb.AddFile(fpath)
if err != nil {
fmt.Println(err.Error())
badPaths = append(badPaths, fpath)
} else {
// Store paths by name for error messages
namePaths[name] = fpath
}
}
}
if len(badPaths) == 0 {
fmt.Println("Deploying now...")
t0 := time.Now()
deployFiles(pb.ForceMetadataFiles(), byName, namePaths, opts)
t1 := time.Now()
fmt.Printf("The deployment took %v to run.\n", t1.Sub(t0))
} else {
ErrorAndExit("Could not add the following files:\n {%v}", strings.Join(badPaths, "\n"))
}
} | go | {
"resource": ""
} |
q16160 | processDeployResults | train | func processDeployResults(result ForceCheckDeploymentStatusResult, byName bool, namePaths map[string]string, deployErr error) (err error) {
if deployErr != nil {
ErrorAndExit(deployErr.Error())
}
problems := result.Details.ComponentFailures
successes := result.Details.ComponentSuccesses
testFailures := result.Details.RunTestResult.TestFailures
testSuccesses := result.Details.RunTestResult.TestSuccesses
codeCoverageWarnings := result.Details.RunTestResult.CodeCoverageWarnings
if len(successes) > 0 {
fmt.Printf("\nSuccesses - %d\n", len(successes)-1)
for _, success := range successes {
if success.FullName != "package.xml" {
verb := "unchanged"
if success.Changed {
verb = "changed"
} else if success.Deleted {
verb = "deleted"
} else if success.Created {
verb = "created"
}
fmt.Printf("\t%s: %s\n", success.FullName, verb)
}
}
}
fmt.Printf("\nTest Successes - %d\n", len(testSuccesses))
for _, failure := range testSuccesses {
fmt.Printf(" [PASS] %s::%s\n", failure.Name, failure.MethodName)
}
if len(problems) > 0 {
fmt.Printf("\nFailures - %d\n", len(problems))
for _, problem := range problems {
if problem.FullName == "" {
fmt.Println(problem.Problem)
} else {
if byName {
fmt.Printf("ERROR with %s, line %d\n %s\n", problem.FullName, problem.LineNumber, problem.Problem)
} else {
fname, found := namePaths[problem.FullName]
if !found {
fname = problem.FullName
}
fmt.Printf("\"%s\", line %d: %s %s\n", fname, problem.LineNumber, problem.ProblemType, problem.Problem)
}
}
}
}
fmt.Printf("\nTest Failures - %d\n", len(testFailures))
for _, failure := range testFailures {
fmt.Printf("\n [FAIL] %s::%s: %s\n", failure.Name, failure.MethodName, failure.Message)
fmt.Println(failure.StackTrace)
}
if len(codeCoverageWarnings) > 0 {
fmt.Printf("\nCode Coverage Warnings - %d\n", len(codeCoverageWarnings))
for _, warning := range codeCoverageWarnings {
fmt.Printf("\n %s: %s\n", warning.Name, warning.Message)
}
}
// Handle notifications
desktop.NotifySuccess("push", len(problems) == 0)
if len(problems) > 0 {
err = errors.New("Some components failed deployment")
} else if len(testFailures) > 0 {
err = errors.New("Some tests failed")
} else if !result.Success {
err = errors.New(fmt.Sprintf("Status: %s", result.Status))
}
return
} | go | {
"resource": ""
} |
q16161 | qualifyUrl | train | func (f *Force) qualifyUrl(url string) string {
return fmt.Sprintf("%s/%s", f.Credentials.InstanceUrl, strings.TrimLeft(url, "/"))
} | go | {
"resource": ""
} |
q16162 | IsSourceDir | train | func IsSourceDir(dir string) bool {
if _, err := os.Stat(dir); err == nil {
return true
}
return false
} | go | {
"resource": ""
} |
q16163 | GetSourceDir | train | func GetSourceDir() (dir string, err error) {
base, err := os.Getwd()
if err != nil {
return
}
// Look down to our nearest subdirectories
for _, src := range sourceDirs {
if len(src) > 0 {
dir = filepath.Join(base, src)
if IsSourceDir(dir) {
return
}
}
}
// Check the current directory and then start looking up at our parents.
// When dir's parent is identical, it means we're at the root. If we blow
// past the actual root, we should drop to the next section of code
for dir != filepath.Dir(dir) {
dir = filepath.Dir(dir)
for _, src := range sourceDirs {
adir := filepath.Join(dir, src)
if IsSourceDir(adir) {
dir = adir
return
}
}
}
// No source directory found, create a src directory and a symlinked "metadata"
// directory for backward compatibility and return that.
dir = filepath.Join(base, "src")
err = os.Mkdir(dir, 0777)
symlink := filepath.Join(base, "metadata")
os.Symlink(dir, symlink)
dir = symlink
return
} | go | {
"resource": ""
} |
q16164 | StringSlicePos | train | func StringSlicePos(slice []string, value string) int {
for p, v := range slice {
if v == value {
return p
}
}
return -1
} | go | {
"resource": ""
} |
q16165 | Group | train | func (p *DefaultTokenGrouper) Group(tokens []*Token) [][2]*Token {
if len(tokens) == 0 {
return nil
}
pairTokens := make([][2]*Token, 0, len(tokens))
prevToken := tokens[0]
for _, tok := range tokens {
if prevToken == tok {
continue
}
pairTokens = append(pairTokens, [2]*Token{prevToken, tok})
prevToken = tok
}
pairTokens = append(pairTokens, [2]*Token{prevToken, nil})
return pairTokens
} | go | {
"resource": ""
} |
q16166 | NewToken | train | func NewToken(token string) *Token {
tok := Token{
Tok: token,
reEllipsis: reEllipsis,
reNumeric: reNumeric,
reInitial: reInitial,
reAlpha: reAlpha,
}
return &tok
} | go | {
"resource": ""
} |
q16167 | String | train | func (p *Token) String() string {
return fmt.Sprintf("<Token Tok: %q, SentBreak: %t, Abbr: %t, Position: %d>", p.Tok, p.SentBreak, p.Abbr, p.Position)
} | go | {
"resource": ""
} |
q16168 | N | train | func (f *FreqDist) N() float64 {
sum := 0.0
for _, val := range f.Samples {
sum += float64(val)
}
return sum
} | go | {
"resource": ""
} |
q16169 | rToNr | train | func (f *FreqDist) rToNr(bins int) map[int]int {
tmpRToNr := map[int]int{}
for _, value := range f.Samples {
tmpRToNr[value] += 1
}
if bins == 0 {
tmpRToNr[0] = 0
} else {
tmpRToNr[0] = bins - f.B()
}
return tmpRToNr
} | go | {
"resource": ""
} |
q16170 | cumulativeFrequencies | train | func (f *FreqDist) cumulativeFrequencies(Samples []string) []int {
cf := make([]int, 0, len(f.Samples))
for _, val := range Samples {
cf = append(cf, f.Samples[val])
}
return cf
} | go | {
"resource": ""
} |
q16171 | HasSentencePunct | train | func (p *DefaultPunctStrings) HasSentencePunct(text string) bool {
endPunct := `.!?`
for _, char := range endPunct {
for _, achar := range text {
if char == achar {
return true
}
}
}
return false
} | go | {
"resource": ""
} |
q16172 | NewTypeBasedAnnotation | train | func NewTypeBasedAnnotation(s *Storage, p PunctStrings, e TokenExistential) *TypeBasedAnnotation {
return &TypeBasedAnnotation{
Storage: s,
PunctStrings: p,
TokenExistential: e,
}
} | go | {
"resource": ""
} |
q16173 | NewAnnotations | train | func NewAnnotations(s *Storage, p PunctStrings, word WordTokenizer) []AnnotateTokens {
return []AnnotateTokens{
&TypeBasedAnnotation{s, p, word},
&TokenBasedAnnotation{s, p, word, &DefaultTokenGrouper{}, &OrthoContext{
s, p, word, word,
}},
}
} | go | {
"resource": ""
} |
q16174 | Annotate | train | func (a *TypeBasedAnnotation) Annotate(tokens []*Token) []*Token {
for _, augTok := range tokens {
a.typeAnnotation(augTok)
}
return tokens
} | go | {
"resource": ""
} |
q16175 | Annotate | train | func (a *TokenBasedAnnotation) Annotate(tokens []*Token) []*Token {
for _, tokPair := range a.TokenGrouper.Group(tokens) {
a.tokenAnnotation(tokPair[0], tokPair[1])
}
return tokens
} | go | {
"resource": ""
} |
q16176 | NewSentenceTokenizer | train | func NewSentenceTokenizer(s *Storage) *DefaultSentenceTokenizer {
lang := NewPunctStrings()
word := NewWordTokenizer(lang)
annotations := NewAnnotations(s, lang, word)
tokenizer := &DefaultSentenceTokenizer{
Storage: s,
PunctStrings: lang,
WordTokenizer: word,
Annotations: annotations,
}
return tokenizer
} | go | {
"resource": ""
} |
q16177 | NewTokenizer | train | func NewTokenizer(s *Storage, word WordTokenizer, lang PunctStrings) *DefaultSentenceTokenizer {
annotations := NewAnnotations(s, lang, word)
tokenizer := &DefaultSentenceTokenizer{
Storage: s,
PunctStrings: lang,
WordTokenizer: word,
Annotations: annotations,
}
return tokenizer
} | go | {
"resource": ""
} |
q16178 | Tokenize | train | func (s *DefaultSentenceTokenizer) Tokenize(text string) []*Sentence {
annotatedTokens := s.AnnotatedTokens(text)
lastBreak := 0
sentences := make([]*Sentence, 0, len(annotatedTokens))
for _, token := range annotatedTokens {
if !token.SentBreak {
continue
}
sentence := &Sentence{lastBreak, token.Position, text[lastBreak:token.Position]}
sentences = append(sentences, sentence)
lastBreak = token.Position
}
if lastBreak != len(text) {
lastChar := len(text)
sentence := &Sentence{lastBreak, lastChar, text[lastBreak:lastChar]}
sentences = append(sentences, sentence)
}
return sentences
} | go | {
"resource": ""
} |
q16179 | HasSentEndChars | train | func (e *WordTokenizer) HasSentEndChars(t *sentences.Token) bool {
enders := []string{
`."`, `.'`, `.)`, `.’`, `.”`,
`?`, `?"`, `?'`, `?)`, `?’`, `?”`,
`!`, `!"`, `!'`, `!)`, `!’`, `!”`,
}
for _, ender := range enders {
if strings.HasSuffix(t.Tok, ender) {
return true
}
}
parens := []string{
`.[`, `.(`, `."`, `.'`,
`?[`, `?(`,
`![`, `!(`,
}
for _, paren := range parens {
if strings.Index(t.Tok, paren) != -1 {
return true
}
}
return false
} | go | {
"resource": ""
} |
q16180 | Tokenize | train | func (p *DefaultWordTokenizer) Tokenize(text string, onlyPeriodContext bool) []*Token {
textLength := len(text)
if textLength == 0 {
return nil
}
tokens := make([]*Token, 0, 50)
lastSpace := 0
lineStart := false
paragraphStart := false
getNextWord := false
for i, char := range text {
if !unicode.IsSpace(char) && i != textLength - 1 {
continue
}
if char == '\n' {
if lineStart {
paragraphStart = true
}
lineStart = true
}
var cursor int
if i == textLength - 1 {
cursor = textLength
} else {
cursor = i
}
word := strings.TrimSpace(text[lastSpace:cursor])
if word == "" {
continue
}
hasSentencePunct := p.PunctStrings.HasSentencePunct(word)
if onlyPeriodContext && !hasSentencePunct && !getNextWord {
lastSpace = cursor
continue
}
token := NewToken(word)
token.Position = cursor
token.ParaStart = paragraphStart
token.LineStart = lineStart
tokens = append(tokens, token)
lastSpace = cursor
lineStart = false
paragraphStart = false
if hasSentencePunct {
getNextWord = true
} else {
getNextWord = false
}
}
if len(tokens) == 0 {
token := NewToken(text)
token.Position = textLength
tokens = append(tokens, token)
}
return tokens
} | go | {
"resource": ""
} |
q16181 | Type | train | func (p *DefaultWordTokenizer) Type(t *Token) string {
typ := t.reNumeric.ReplaceAllString(strings.ToLower(t.Tok), "##number##")
if len(typ) == 1 {
return typ
}
// removing comma from typ
return strings.Replace(typ, ",", "", -1)
} | go | {
"resource": ""
} |
q16182 | TypeNoPeriod | train | func (p *DefaultWordTokenizer) TypeNoPeriod(t *Token) string {
typ := p.Type(t)
if len(typ) > 1 && string(typ[len(typ)-1]) == "." {
return string(typ[:len(typ)-1])
}
return typ
} | go | {
"resource": ""
} |
q16183 | TypeNoSentPeriod | train | func (p *DefaultWordTokenizer) TypeNoSentPeriod(t *Token) string {
if p == nil {
return ""
}
if t.SentBreak {
return p.TypeNoPeriod(t)
}
return p.Type(t)
} | go | {
"resource": ""
} |
q16184 | FirstUpper | train | func (p *DefaultWordTokenizer) FirstUpper(t *Token) bool {
if t.Tok == "" {
return false
}
runes := []rune(t.Tok)
return unicode.IsUpper(runes[0])
} | go | {
"resource": ""
} |
q16185 | FirstLower | train | func (p *DefaultWordTokenizer) FirstLower(t *Token) bool {
if t.Tok == "" {
return false
}
runes := []rune(t.Tok)
return unicode.IsLower(runes[0])
} | go | {
"resource": ""
} |
q16186 | IsEllipsis | train | func (p *DefaultWordTokenizer) IsEllipsis(t *Token) bool {
return t.reEllipsis.MatchString(t.Tok)
} | go | {
"resource": ""
} |
q16187 | IsNumber | train | func (p *DefaultWordTokenizer) IsNumber(t *Token) bool {
return strings.HasPrefix(t.Tok, "##number##")
} | go | {
"resource": ""
} |
q16188 | IsInitial | train | func (p *DefaultWordTokenizer) IsInitial(t *Token) bool {
return t.reInitial.MatchString(t.Tok)
} | go | {
"resource": ""
} |
q16189 | IsAlpha | train | func (p *DefaultWordTokenizer) IsAlpha(t *Token) bool {
return t.reAlpha.MatchString(t.Tok)
} | go | {
"resource": ""
} |
q16190 | IsNonPunct | train | func (p *DefaultWordTokenizer) IsNonPunct(t *Token) bool {
nonPunct := regexp.MustCompile(p.PunctStrings.NonPunct())
return nonPunct.MatchString(p.Type(t))
} | go | {
"resource": ""
} |
q16191 | HasPeriodFinal | train | func (p *DefaultWordTokenizer) HasPeriodFinal(t *Token) bool {
return strings.HasSuffix(t.Tok, ".")
} | go | {
"resource": ""
} |
q16192 | HasSentEndChars | train | func (p *DefaultWordTokenizer) HasSentEndChars(t *Token) bool {
enders := []string{
`."`, `.'`, `.)`,
`?`, `?"`, `?'`, `?)`,
`!`, `!"`, `!'`, `!)`, `!’`, `!”`,
}
for _, ender := range enders {
if strings.HasSuffix(t.Tok, ender) {
return true
}
}
parens := []string{
`.[`, `.(`, `."`, `.'`,
`?[`, `?(`,
`![`, `!(`,
}
for _, paren := range parens {
if strings.Index(t.Tok, paren) != -1 {
return true
}
}
return false
} | go | {
"resource": ""
} |
q16193 | Has | train | func (ss SetString) Has(str string) bool {
if ss[str] == 0 {
return false
}
return true
} | go | {
"resource": ""
} |
q16194 | Array | train | func (ss SetString) Array() []string {
arr := make([]string, 0, len(ss))
for key := range ss {
arr = append(arr, key)
}
return arr
} | go | {
"resource": ""
} |
q16195 | addOrthoContext | train | func (p *Storage) addOrthoContext(typ string, flag int) {
p.OrthoContext[typ] |= flag
} | go | {
"resource": ""
} |
q16196 | IsAbbr | train | func (p *Storage) IsAbbr(tokens ...string) bool {
for _, token := range tokens {
if p.AbbrevTypes.Has(token) {
return true
}
}
return false
} | go | {
"resource": ""
} |
q16197 | Add | train | func (ix *Indexes) Add(columns ...string) {
name := strings.Join(columns, "_")
*ix = append(*ix, &index{name: name, columns: columns, unique: false})
} | go | {
"resource": ""
} |
q16198 | AddUnique | train | func (ix *Indexes) AddUnique(columns ...string) {
name := strings.Join(columns, "_")
*ix = append(*ix, &index{name: name, columns: columns, unique: true})
} | go | {
"resource": ""
} |
q16199 | CreateTableIfNotExists | train | func (mg *Migration) CreateTableIfNotExists(structPtr interface{}) error {
model := structPtrToModel(structPtr, true, nil)
sql := mg.dialect.createTableSql(model, true)
if mg.Log {
fmt.Println(sql)
}
sqls := strings.Split(sql, ";")
for _, v := range sqls {
_, err := mg.db.Exec(v)
if err != nil && !mg.dialect.catchMigrationError(err) {
panic(err)
}
}
columns := mg.dialect.columnsInTable(mg, model.table)
if len(model.fields) > len(columns) {
oldFields := []*modelField{}
newFields := []*modelField{}
for _, v := range model.fields {
if _, ok := columns[v.name]; ok {
oldFields = append(oldFields, v)
} else {
newFields = append(newFields, v)
}
}
if len(oldFields) != len(columns) {
panic("Column name has changed, rename column migration is not supported.")
}
for _, v := range newFields {
mg.addColumn(model.table, v)
}
}
var indexErr error
for _, i := range model.indexes {
indexErr = mg.CreateIndexIfNotExists(model.table, i.name, i.unique, i.columns...)
}
return indexErr
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.