repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
FiloSottile/mkcert
https://github.com/FiloSottile/mkcert/blob/1c1dc4ed27ed5936046b6398d39cab4d657a2d8e/truststore_java.go
truststore_java.go
// Copyright 2018 The mkcert Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "crypto/sha1" "crypto/sha256" "crypto/x509" "encoding/hex" "hash" "os" "os/exec" "path/filepath" "runtime" "strings" ) var ( hasJava bool hasKeytool bool javaHome string cacertsPath string keytoolPath string storePass string = "changeit" ) func init() { if runtime.GOOS == "windows" { keytoolPath = filepath.Join("bin", "keytool.exe") } else { keytoolPath = filepath.Join("bin", "keytool") } if v := os.Getenv("JAVA_HOME"); v != "" { hasJava = true javaHome = v if pathExists(filepath.Join(v, keytoolPath)) { hasKeytool = true keytoolPath = filepath.Join(v, keytoolPath) } if pathExists(filepath.Join(v, "lib", "security", "cacerts")) { cacertsPath = filepath.Join(v, "lib", "security", "cacerts") } if pathExists(filepath.Join(v, "jre", "lib", "security", "cacerts")) { cacertsPath = filepath.Join(v, "jre", "lib", "security", "cacerts") } } } func (m *mkcert) checkJava() bool { if !hasKeytool { return false } // exists returns true if the given x509.Certificate's fingerprint // is in the keytool -list output exists := func(c *x509.Certificate, h hash.Hash, keytoolOutput []byte) bool { h.Write(c.Raw) fp := strings.ToUpper(hex.EncodeToString(h.Sum(nil))) return bytes.Contains(keytoolOutput, []byte(fp)) } keytoolOutput, err := exec.Command(keytoolPath, "-list", "-keystore", cacertsPath, "-storepass", storePass).CombinedOutput() fatalIfCmdErr(err, "keytool -list", keytoolOutput) // keytool outputs SHA1 and SHA256 (Java 9+) certificates in uppercase hex // with each octet pair delimitated by ":". Drop them from the keytool output keytoolOutput = bytes.Replace(keytoolOutput, []byte(":"), nil, -1) // pre-Java 9 uses SHA1 fingerprints s1, s256 := sha1.New(), sha256.New() return exists(m.caCert, s1, keytoolOutput) || exists(m.caCert, s256, keytoolOutput) } func (m *mkcert) installJava() { args := []string{ "-importcert", "-noprompt", "-keystore", cacertsPath, "-storepass", storePass, "-file", filepath.Join(m.CAROOT, rootName), "-alias", m.caUniqueName(), } out, err := execKeytool(exec.Command(keytoolPath, args...)) fatalIfCmdErr(err, "keytool -importcert", out) } func (m *mkcert) uninstallJava() { args := []string{ "-delete", "-alias", m.caUniqueName(), "-keystore", cacertsPath, "-storepass", storePass, } out, err := execKeytool(exec.Command(keytoolPath, args...)) if bytes.Contains(out, []byte("does not exist")) { return // cert didn't exist } fatalIfCmdErr(err, "keytool -delete", out) } // execKeytool will execute a "keytool" command and if needed re-execute // the command with commandWithSudo to work around file permissions. func execKeytool(cmd *exec.Cmd) ([]byte, error) { out, err := cmd.CombinedOutput() if err != nil && bytes.Contains(out, []byte("java.io.FileNotFoundException")) && runtime.GOOS != "windows" { origArgs := cmd.Args[1:] cmd = commandWithSudo(cmd.Path) cmd.Args = append(cmd.Args, origArgs...) cmd.Env = []string{ "JAVA_HOME=" + javaHome, } out, err = cmd.CombinedOutput() } return out, err }
go
BSD-3-Clause
1c1dc4ed27ed5936046b6398d39cab4d657a2d8e
2026-01-07T08:35:43.510017Z
false
FiloSottile/mkcert
https://github.com/FiloSottile/mkcert/blob/1c1dc4ed27ed5936046b6398d39cab4d657a2d8e/truststore_linux.go
truststore_linux.go
// Copyright 2018 The mkcert Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "fmt" "io/ioutil" "log" "os" "path/filepath" "strings" ) var ( FirefoxProfiles = []string{os.Getenv("HOME") + "/.mozilla/firefox/*", os.Getenv("HOME") + "/snap/firefox/common/.mozilla/firefox/*"} NSSBrowsers = "Firefox and/or Chrome/Chromium" SystemTrustFilename string SystemTrustCommand []string CertutilInstallHelp string ) func init() { switch { case binaryExists("apt"): CertutilInstallHelp = "apt install libnss3-tools" case binaryExists("yum"): CertutilInstallHelp = "yum install nss-tools" case binaryExists("zypper"): CertutilInstallHelp = "zypper install mozilla-nss-tools" } if pathExists("/etc/pki/ca-trust/source/anchors/") { SystemTrustFilename = "/etc/pki/ca-trust/source/anchors/%s.pem" SystemTrustCommand = []string{"update-ca-trust", "extract"} } else if pathExists("/usr/local/share/ca-certificates/") { SystemTrustFilename = "/usr/local/share/ca-certificates/%s.crt" SystemTrustCommand = []string{"update-ca-certificates"} } else if pathExists("/etc/ca-certificates/trust-source/anchors/") { SystemTrustFilename = "/etc/ca-certificates/trust-source/anchors/%s.crt" SystemTrustCommand = []string{"trust", "extract-compat"} } else if pathExists("/usr/share/pki/trust/anchors") { SystemTrustFilename = "/usr/share/pki/trust/anchors/%s.pem" SystemTrustCommand = []string{"update-ca-certificates"} } } func (m *mkcert) systemTrustFilename() string { return fmt.Sprintf(SystemTrustFilename, strings.Replace(m.caUniqueName(), " ", "_", -1)) } func (m *mkcert) installPlatform() bool { if SystemTrustCommand == nil { log.Printf("Installing to the system store is not yet supported on this Linux 😣 but %s will still work.", NSSBrowsers) log.Printf("You can also manually install the root certificate at %q.", filepath.Join(m.CAROOT, rootName)) return false } cert, err := ioutil.ReadFile(filepath.Join(m.CAROOT, rootName)) fatalIfErr(err, "failed to read root certificate") cmd := commandWithSudo("tee", m.systemTrustFilename()) cmd.Stdin = bytes.NewReader(cert) out, err := cmd.CombinedOutput() fatalIfCmdErr(err, "tee", out) cmd = commandWithSudo(SystemTrustCommand...) out, err = cmd.CombinedOutput() fatalIfCmdErr(err, strings.Join(SystemTrustCommand, " "), out) return true } func (m *mkcert) uninstallPlatform() bool { if SystemTrustCommand == nil { return false } cmd := commandWithSudo("rm", "-f", m.systemTrustFilename()) out, err := cmd.CombinedOutput() fatalIfCmdErr(err, "rm", out) // We used to install under non-unique filenames. legacyFilename := fmt.Sprintf(SystemTrustFilename, "mkcert-rootCA") if pathExists(legacyFilename) { cmd := commandWithSudo("rm", "-f", legacyFilename) out, err := cmd.CombinedOutput() fatalIfCmdErr(err, "rm (legacy filename)", out) } cmd = commandWithSudo(SystemTrustCommand...) out, err = cmd.CombinedOutput() fatalIfCmdErr(err, strings.Join(SystemTrustCommand, " "), out) return true }
go
BSD-3-Clause
1c1dc4ed27ed5936046b6398d39cab4d657a2d8e
2026-01-07T08:35:43.510017Z
false
FiloSottile/mkcert
https://github.com/FiloSottile/mkcert/blob/1c1dc4ed27ed5936046b6398d39cab4d657a2d8e/truststore_nss.go
truststore_nss.go
// Copyright 2018 The mkcert Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "log" "os" "os/exec" "path/filepath" "runtime" "strings" ) var ( hasNSS bool hasCertutil bool certutilPath string nssDBs = []string{ filepath.Join(os.Getenv("HOME"), ".pki/nssdb"), filepath.Join(os.Getenv("HOME"), "snap/chromium/current/.pki/nssdb"), // Snapcraft "/etc/pki/nssdb", // CentOS 7 } firefoxPaths = []string{ "/usr/bin/firefox", "/usr/bin/firefox-nightly", "/usr/bin/firefox-developer-edition", "/snap/firefox", "/Applications/Firefox.app", "/Applications/FirefoxDeveloperEdition.app", "/Applications/Firefox Developer Edition.app", "/Applications/Firefox Nightly.app", "C:\\Program Files\\Mozilla Firefox", } ) func init() { allPaths := append(append([]string{}, nssDBs...), firefoxPaths...) for _, path := range allPaths { if pathExists(path) { hasNSS = true break } } switch runtime.GOOS { case "darwin": switch { case binaryExists("certutil"): certutilPath, _ = exec.LookPath("certutil") hasCertutil = true case binaryExists("/usr/local/opt/nss/bin/certutil"): // Check the default Homebrew path, to save executing Ruby. #135 certutilPath = "/usr/local/opt/nss/bin/certutil" hasCertutil = true default: out, err := exec.Command("brew", "--prefix", "nss").Output() if err == nil { certutilPath = filepath.Join(strings.TrimSpace(string(out)), "bin", "certutil") hasCertutil = pathExists(certutilPath) } } case "linux": if hasCertutil = binaryExists("certutil"); hasCertutil { certutilPath, _ = exec.LookPath("certutil") } } } func (m *mkcert) checkNSS() bool { if !hasCertutil { return false } success := true if m.forEachNSSProfile(func(profile string) { err := exec.Command(certutilPath, "-V", "-d", profile, "-u", "L", "-n", m.caUniqueName()).Run() if err != nil { success = false } }) == 0 { success = false } return success } func (m *mkcert) installNSS() bool { if m.forEachNSSProfile(func(profile string) { cmd := exec.Command(certutilPath, "-A", "-d", profile, "-t", "C,,", "-n", m.caUniqueName(), "-i", filepath.Join(m.CAROOT, rootName)) out, err := execCertutil(cmd) fatalIfCmdErr(err, "certutil -A -d "+profile, out) }) == 0 { log.Printf("ERROR: no %s security databases found", NSSBrowsers) return false } if !m.checkNSS() { log.Printf("Installing in %s failed. Please report the issue with details about your environment at https://github.com/FiloSottile/mkcert/issues/new πŸ‘Ž", NSSBrowsers) log.Printf("Note that if you never started %s, you need to do that at least once.", NSSBrowsers) return false } return true } func (m *mkcert) uninstallNSS() { m.forEachNSSProfile(func(profile string) { err := exec.Command(certutilPath, "-V", "-d", profile, "-u", "L", "-n", m.caUniqueName()).Run() if err != nil { return } cmd := exec.Command(certutilPath, "-D", "-d", profile, "-n", m.caUniqueName()) out, err := execCertutil(cmd) fatalIfCmdErr(err, "certutil -D -d "+profile, out) }) } // execCertutil will execute a "certutil" command and if needed re-execute // the command with commandWithSudo to work around file permissions. func execCertutil(cmd *exec.Cmd) ([]byte, error) { out, err := cmd.CombinedOutput() if err != nil && bytes.Contains(out, []byte("SEC_ERROR_READ_ONLY")) && runtime.GOOS != "windows" { origArgs := cmd.Args[1:] cmd = commandWithSudo(cmd.Path) cmd.Args = append(cmd.Args, origArgs...) out, err = cmd.CombinedOutput() } return out, err } func (m *mkcert) forEachNSSProfile(f func(profile string)) (found int) { var profiles []string profiles = append(profiles, nssDBs...) for _, ff := range FirefoxProfiles { pp, _ := filepath.Glob(ff) profiles = append(profiles, pp...) } for _, profile := range profiles { if stat, err := os.Stat(profile); err != nil || !stat.IsDir() { continue } if pathExists(filepath.Join(profile, "cert9.db")) { f("sql:" + profile) found++ } else if pathExists(filepath.Join(profile, "cert8.db")) { f("dbm:" + profile) found++ } } return }
go
BSD-3-Clause
1c1dc4ed27ed5936046b6398d39cab4d657a2d8e
2026-01-07T08:35:43.510017Z
false
FiloSottile/mkcert
https://github.com/FiloSottile/mkcert/blob/1c1dc4ed27ed5936046b6398d39cab4d657a2d8e/truststore_windows.go
truststore_windows.go
// Copyright 2018 The mkcert Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "crypto/x509" "encoding/pem" "fmt" "io/ioutil" "math/big" "os" "path/filepath" "syscall" "unsafe" ) var ( FirefoxProfiles = []string{os.Getenv("USERPROFILE") + "\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles"} CertutilInstallHelp = "" // certutil unsupported on Windows NSSBrowsers = "Firefox" ) var ( modcrypt32 = syscall.NewLazyDLL("crypt32.dll") procCertAddEncodedCertificateToStore = modcrypt32.NewProc("CertAddEncodedCertificateToStore") procCertCloseStore = modcrypt32.NewProc("CertCloseStore") procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore") procCertDuplicateCertificateContext = modcrypt32.NewProc("CertDuplicateCertificateContext") procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") ) func (m *mkcert) installPlatform() bool { // Load cert cert, err := ioutil.ReadFile(filepath.Join(m.CAROOT, rootName)) fatalIfErr(err, "failed to read root certificate") // Decode PEM if certBlock, _ := pem.Decode(cert); certBlock == nil || certBlock.Type != "CERTIFICATE" { fatalIfErr(fmt.Errorf("invalid PEM data"), "decode pem") } else { cert = certBlock.Bytes } // Open root store store, err := openWindowsRootStore() fatalIfErr(err, "open root store") defer store.close() // Add cert fatalIfErr(store.addCert(cert), "add cert") return true } func (m *mkcert) uninstallPlatform() bool { // We'll just remove all certs with the same serial number // Open root store store, err := openWindowsRootStore() fatalIfErr(err, "open root store") defer store.close() // Do the deletion deletedAny, err := store.deleteCertsWithSerial(m.caCert.SerialNumber) if err == nil && !deletedAny { err = fmt.Errorf("no certs found") } fatalIfErr(err, "delete cert") return true } type windowsRootStore uintptr func openWindowsRootStore() (windowsRootStore, error) { rootStr, err := syscall.UTF16PtrFromString("ROOT") if err != nil { return 0, err } store, _, err := procCertOpenSystemStoreW.Call(0, uintptr(unsafe.Pointer(rootStr))) if store != 0 { return windowsRootStore(store), nil } return 0, fmt.Errorf("failed to open windows root store: %v", err) } func (w windowsRootStore) close() error { ret, _, err := procCertCloseStore.Call(uintptr(w), 0) if ret != 0 { return nil } return fmt.Errorf("failed to close windows root store: %v", err) } func (w windowsRootStore) addCert(cert []byte) error { // TODO: ok to always overwrite? ret, _, err := procCertAddEncodedCertificateToStore.Call( uintptr(w), // HCERTSTORE hCertStore uintptr(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING), // DWORD dwCertEncodingType uintptr(unsafe.Pointer(&cert[0])), // const BYTE *pbCertEncoded uintptr(len(cert)), // DWORD cbCertEncoded 3, // DWORD dwAddDisposition (CERT_STORE_ADD_REPLACE_EXISTING is 3) 0, // PCCERT_CONTEXT *ppCertContext ) if ret != 0 { return nil } return fmt.Errorf("failed adding cert: %v", err) } func (w windowsRootStore) deleteCertsWithSerial(serial *big.Int) (bool, error) { // Go over each, deleting the ones we find var cert *syscall.CertContext deletedAny := false for { // Next enum certPtr, _, err := procCertEnumCertificatesInStore.Call(uintptr(w), uintptr(unsafe.Pointer(cert))) if cert = (*syscall.CertContext)(unsafe.Pointer(certPtr)); cert == nil { if errno, ok := err.(syscall.Errno); ok && errno == 0x80092004 { break } return deletedAny, fmt.Errorf("failed enumerating certs: %v", err) } // Parse cert certBytes := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length] parsedCert, err := x509.ParseCertificate(certBytes) // We'll just ignore parse failures for now if err == nil && parsedCert.SerialNumber != nil && parsedCert.SerialNumber.Cmp(serial) == 0 { // Duplicate the context so it doesn't stop the enum when we delete it dupCertPtr, _, err := procCertDuplicateCertificateContext.Call(uintptr(unsafe.Pointer(cert))) if dupCertPtr == 0 { return deletedAny, fmt.Errorf("failed duplicating context: %v", err) } if ret, _, err := procCertDeleteCertificateFromStore.Call(dupCertPtr); ret == 0 { return deletedAny, fmt.Errorf("failed deleting certificate: %v", err) } deletedAny = true } } return deletedAny, nil }
go
BSD-3-Clause
1c1dc4ed27ed5936046b6398d39cab4d657a2d8e
2026-01-07T08:35:43.510017Z
false
FiloSottile/mkcert
https://github.com/FiloSottile/mkcert/blob/1c1dc4ed27ed5936046b6398d39cab4d657a2d8e/truststore_darwin.go
truststore_darwin.go
// Copyright 2018 The mkcert Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "encoding/asn1" "io/ioutil" "log" "os" "path/filepath" "howett.net/plist" ) var ( FirefoxProfiles = []string{os.Getenv("HOME") + "/Library/Application Support/Firefox/Profiles/*"} CertutilInstallHelp = "brew install nss" NSSBrowsers = "Firefox" ) // https://github.com/golang/go/issues/24652#issuecomment-399826583 var trustSettings []interface{} var _, _ = plist.Unmarshal(trustSettingsData, &trustSettings) var trustSettingsData = []byte(` <array> <dict> <key>kSecTrustSettingsPolicy</key> <data> KoZIhvdjZAED </data> <key>kSecTrustSettingsPolicyName</key> <string>sslServer</string> <key>kSecTrustSettingsResult</key> <integer>1</integer> </dict> <dict> <key>kSecTrustSettingsPolicy</key> <data> KoZIhvdjZAEC </data> <key>kSecTrustSettingsPolicyName</key> <string>basicX509</string> <key>kSecTrustSettingsResult</key> <integer>1</integer> </dict> </array> `) func (m *mkcert) installPlatform() bool { cmd := commandWithSudo("security", "add-trusted-cert", "-d", "-k", "/Library/Keychains/System.keychain", filepath.Join(m.CAROOT, rootName)) out, err := cmd.CombinedOutput() fatalIfCmdErr(err, "security add-trusted-cert", out) // Make trustSettings explicit, as older Go does not know the defaults. // https://github.com/golang/go/issues/24652 plistFile, err := ioutil.TempFile("", "trust-settings") fatalIfErr(err, "failed to create temp file") defer os.Remove(plistFile.Name()) cmd = commandWithSudo("security", "trust-settings-export", "-d", plistFile.Name()) out, err = cmd.CombinedOutput() fatalIfCmdErr(err, "security trust-settings-export", out) plistData, err := ioutil.ReadFile(plistFile.Name()) fatalIfErr(err, "failed to read trust settings") var plistRoot map[string]interface{} _, err = plist.Unmarshal(plistData, &plistRoot) fatalIfErr(err, "failed to parse trust settings") rootSubjectASN1, _ := asn1.Marshal(m.caCert.Subject.ToRDNSequence()) if plistRoot["trustVersion"].(uint64) != 1 { log.Fatalln("ERROR: unsupported trust settings version:", plistRoot["trustVersion"]) } trustList := plistRoot["trustList"].(map[string]interface{}) for key := range trustList { entry := trustList[key].(map[string]interface{}) if _, ok := entry["issuerName"]; !ok { continue } issuerName := entry["issuerName"].([]byte) if !bytes.Equal(rootSubjectASN1, issuerName) { continue } entry["trustSettings"] = trustSettings break } plistData, err = plist.MarshalIndent(plistRoot, plist.XMLFormat, "\t") fatalIfErr(err, "failed to serialize trust settings") err = ioutil.WriteFile(plistFile.Name(), plistData, 0600) fatalIfErr(err, "failed to write trust settings") cmd = commandWithSudo("security", "trust-settings-import", "-d", plistFile.Name()) out, err = cmd.CombinedOutput() fatalIfCmdErr(err, "security trust-settings-import", out) return true } func (m *mkcert) uninstallPlatform() bool { cmd := commandWithSudo("security", "remove-trusted-cert", "-d", filepath.Join(m.CAROOT, rootName)) out, err := cmd.CombinedOutput() fatalIfCmdErr(err, "security remove-trusted-cert", out) return true }
go
BSD-3-Clause
1c1dc4ed27ed5936046b6398d39cab4d657a2d8e
2026-01-07T08:35:43.510017Z
false
FiloSottile/mkcert
https://github.com/FiloSottile/mkcert/blob/1c1dc4ed27ed5936046b6398d39cab4d657a2d8e/cert.go
cert.go
// Copyright 2018 The mkcert Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/sha1" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "encoding/pem" "io/ioutil" "log" "math/big" "net" "net/mail" "net/url" "os" "os/user" "path/filepath" "regexp" "strconv" "strings" "time" pkcs12 "software.sslmate.com/src/go-pkcs12" ) var userAndHostname string func init() { u, err := user.Current() if err == nil { userAndHostname = u.Username + "@" } if h, err := os.Hostname(); err == nil { userAndHostname += h } if err == nil && u.Name != "" && u.Name != u.Username { userAndHostname += " (" + u.Name + ")" } } func (m *mkcert) makeCert(hosts []string) { if m.caKey == nil { log.Fatalln("ERROR: can't create new certificates because the CA key (rootCA-key.pem) is missing") } priv, err := m.generateKey(false) fatalIfErr(err, "failed to generate certificate key") pub := priv.(crypto.Signer).Public() // Certificates last for 2 years and 3 months, which is always less than // 825 days, the limit that macOS/iOS apply to all certificates, // including custom roots. See https://support.apple.com/en-us/HT210176. expiration := time.Now().AddDate(2, 3, 0) tpl := &x509.Certificate{ SerialNumber: randomSerialNumber(), Subject: pkix.Name{ Organization: []string{"mkcert development certificate"}, OrganizationalUnit: []string{userAndHostname}, }, NotBefore: time.Now(), NotAfter: expiration, KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, } for _, h := range hosts { if ip := net.ParseIP(h); ip != nil { tpl.IPAddresses = append(tpl.IPAddresses, ip) } else if email, err := mail.ParseAddress(h); err == nil && email.Address == h { tpl.EmailAddresses = append(tpl.EmailAddresses, h) } else if uriName, err := url.Parse(h); err == nil && uriName.Scheme != "" && uriName.Host != "" { tpl.URIs = append(tpl.URIs, uriName) } else { tpl.DNSNames = append(tpl.DNSNames, h) } } if m.client { tpl.ExtKeyUsage = append(tpl.ExtKeyUsage, x509.ExtKeyUsageClientAuth) } if len(tpl.IPAddresses) > 0 || len(tpl.DNSNames) > 0 || len(tpl.URIs) > 0 { tpl.ExtKeyUsage = append(tpl.ExtKeyUsage, x509.ExtKeyUsageServerAuth) } if len(tpl.EmailAddresses) > 0 { tpl.ExtKeyUsage = append(tpl.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) } // IIS (the main target of PKCS #12 files), only shows the deprecated // Common Name in the UI. See issue #115. if m.pkcs12 { tpl.Subject.CommonName = hosts[0] } cert, err := x509.CreateCertificate(rand.Reader, tpl, m.caCert, pub, m.caKey) fatalIfErr(err, "failed to generate certificate") certFile, keyFile, p12File := m.fileNames(hosts) if !m.pkcs12 { certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert}) privDER, err := x509.MarshalPKCS8PrivateKey(priv) fatalIfErr(err, "failed to encode certificate key") privPEM := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: privDER}) if certFile == keyFile { err = ioutil.WriteFile(keyFile, append(certPEM, privPEM...), 0600) fatalIfErr(err, "failed to save certificate and key") } else { err = ioutil.WriteFile(certFile, certPEM, 0644) fatalIfErr(err, "failed to save certificate") err = ioutil.WriteFile(keyFile, privPEM, 0600) fatalIfErr(err, "failed to save certificate key") } } else { domainCert, _ := x509.ParseCertificate(cert) pfxData, err := pkcs12.Encode(rand.Reader, priv, domainCert, []*x509.Certificate{m.caCert}, "changeit") fatalIfErr(err, "failed to generate PKCS#12") err = ioutil.WriteFile(p12File, pfxData, 0644) fatalIfErr(err, "failed to save PKCS#12") } m.printHosts(hosts) if !m.pkcs12 { if certFile == keyFile { log.Printf("\nThe certificate and key are at \"%s\" βœ…\n\n", certFile) } else { log.Printf("\nThe certificate is at \"%s\" and the key at \"%s\" βœ…\n\n", certFile, keyFile) } } else { log.Printf("\nThe PKCS#12 bundle is at \"%s\" βœ…\n", p12File) log.Printf("\nThe legacy PKCS#12 encryption password is the often hardcoded default \"changeit\" ℹ️\n\n") } log.Printf("It will expire on %s πŸ—“\n\n", expiration.Format("2 January 2006")) } func (m *mkcert) printHosts(hosts []string) { secondLvlWildcardRegexp := regexp.MustCompile(`(?i)^\*\.[0-9a-z_-]+$`) log.Printf("\nCreated a new certificate valid for the following names πŸ“œ") for _, h := range hosts { log.Printf(" - %q", h) if secondLvlWildcardRegexp.MatchString(h) { log.Printf(" Warning: many browsers don't support second-level wildcards like %q ⚠️", h) } } for _, h := range hosts { if strings.HasPrefix(h, "*.") { log.Printf("\nReminder: X.509 wildcards only go one level deep, so this won't match a.b.%s ℹ️", h[2:]) break } } } func (m *mkcert) generateKey(rootCA bool) (crypto.PrivateKey, error) { if m.ecdsa { return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) } if rootCA { return rsa.GenerateKey(rand.Reader, 3072) } return rsa.GenerateKey(rand.Reader, 2048) } func (m *mkcert) fileNames(hosts []string) (certFile, keyFile, p12File string) { defaultName := strings.Replace(hosts[0], ":", "_", -1) defaultName = strings.Replace(defaultName, "*", "_wildcard", -1) if len(hosts) > 1 { defaultName += "+" + strconv.Itoa(len(hosts)-1) } if m.client { defaultName += "-client" } certFile = "./" + defaultName + ".pem" if m.certFile != "" { certFile = m.certFile } keyFile = "./" + defaultName + "-key.pem" if m.keyFile != "" { keyFile = m.keyFile } p12File = "./" + defaultName + ".p12" if m.p12File != "" { p12File = m.p12File } return } func randomSerialNumber() *big.Int { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) fatalIfErr(err, "failed to generate serial number") return serialNumber } func (m *mkcert) makeCertFromCSR() { if m.caKey == nil { log.Fatalln("ERROR: can't create new certificates because the CA key (rootCA-key.pem) is missing") } csrPEMBytes, err := ioutil.ReadFile(m.csrPath) fatalIfErr(err, "failed to read the CSR") csrPEM, _ := pem.Decode(csrPEMBytes) if csrPEM == nil { log.Fatalln("ERROR: failed to read the CSR: unexpected content") } if csrPEM.Type != "CERTIFICATE REQUEST" && csrPEM.Type != "NEW CERTIFICATE REQUEST" { log.Fatalln("ERROR: failed to read the CSR: expected CERTIFICATE REQUEST, got " + csrPEM.Type) } csr, err := x509.ParseCertificateRequest(csrPEM.Bytes) fatalIfErr(err, "failed to parse the CSR") fatalIfErr(csr.CheckSignature(), "invalid CSR signature") expiration := time.Now().AddDate(2, 3, 0) tpl := &x509.Certificate{ SerialNumber: randomSerialNumber(), Subject: csr.Subject, ExtraExtensions: csr.Extensions, // includes requested SANs, KUs and EKUs NotBefore: time.Now(), NotAfter: expiration, // If the CSR does not request a SAN extension, fix it up for them as // the Common Name field does not work in modern browsers. Otherwise, // this will get overridden. DNSNames: []string{csr.Subject.CommonName}, // Likewise, if the CSR does not set KUs and EKUs, fix it up as Apple // platforms require serverAuth for TLS. KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, } if m.client { tpl.ExtKeyUsage = append(tpl.ExtKeyUsage, x509.ExtKeyUsageClientAuth) } if len(csr.EmailAddresses) > 0 { tpl.ExtKeyUsage = append(tpl.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) } cert, err := x509.CreateCertificate(rand.Reader, tpl, m.caCert, csr.PublicKey, m.caKey) fatalIfErr(err, "failed to generate certificate") c, err := x509.ParseCertificate(cert) fatalIfErr(err, "failed to parse generated certificate") var hosts []string hosts = append(hosts, c.DNSNames...) hosts = append(hosts, c.EmailAddresses...) for _, ip := range c.IPAddresses { hosts = append(hosts, ip.String()) } for _, uri := range c.URIs { hosts = append(hosts, uri.String()) } certFile, _, _ := m.fileNames(hosts) err = ioutil.WriteFile(certFile, pem.EncodeToMemory( &pem.Block{Type: "CERTIFICATE", Bytes: cert}), 0644) fatalIfErr(err, "failed to save certificate") m.printHosts(hosts) log.Printf("\nThe certificate is at \"%s\" βœ…\n\n", certFile) log.Printf("It will expire on %s πŸ—“\n\n", expiration.Format("2 January 2006")) } // loadCA will load or create the CA at CAROOT. func (m *mkcert) loadCA() { if !pathExists(filepath.Join(m.CAROOT, rootName)) { m.newCA() } certPEMBlock, err := ioutil.ReadFile(filepath.Join(m.CAROOT, rootName)) fatalIfErr(err, "failed to read the CA certificate") certDERBlock, _ := pem.Decode(certPEMBlock) if certDERBlock == nil || certDERBlock.Type != "CERTIFICATE" { log.Fatalln("ERROR: failed to read the CA certificate: unexpected content") } m.caCert, err = x509.ParseCertificate(certDERBlock.Bytes) fatalIfErr(err, "failed to parse the CA certificate") if !pathExists(filepath.Join(m.CAROOT, rootKeyName)) { return // keyless mode, where only -install works } keyPEMBlock, err := ioutil.ReadFile(filepath.Join(m.CAROOT, rootKeyName)) fatalIfErr(err, "failed to read the CA key") keyDERBlock, _ := pem.Decode(keyPEMBlock) if keyDERBlock == nil || keyDERBlock.Type != "PRIVATE KEY" { log.Fatalln("ERROR: failed to read the CA key: unexpected content") } m.caKey, err = x509.ParsePKCS8PrivateKey(keyDERBlock.Bytes) fatalIfErr(err, "failed to parse the CA key") } func (m *mkcert) newCA() { priv, err := m.generateKey(true) fatalIfErr(err, "failed to generate the CA key") pub := priv.(crypto.Signer).Public() spkiASN1, err := x509.MarshalPKIXPublicKey(pub) fatalIfErr(err, "failed to encode public key") var spki struct { Algorithm pkix.AlgorithmIdentifier SubjectPublicKey asn1.BitString } _, err = asn1.Unmarshal(spkiASN1, &spki) fatalIfErr(err, "failed to decode public key") skid := sha1.Sum(spki.SubjectPublicKey.Bytes) tpl := &x509.Certificate{ SerialNumber: randomSerialNumber(), Subject: pkix.Name{ Organization: []string{"mkcert development CA"}, OrganizationalUnit: []string{userAndHostname}, // The CommonName is required by iOS to show the certificate in the // "Certificate Trust Settings" menu. // https://github.com/FiloSottile/mkcert/issues/47 CommonName: "mkcert " + userAndHostname, }, SubjectKeyId: skid[:], NotAfter: time.Now().AddDate(10, 0, 0), NotBefore: time.Now(), KeyUsage: x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: true, MaxPathLenZero: true, } cert, err := x509.CreateCertificate(rand.Reader, tpl, tpl, pub, priv) fatalIfErr(err, "failed to generate CA certificate") privDER, err := x509.MarshalPKCS8PrivateKey(priv) fatalIfErr(err, "failed to encode CA key") err = ioutil.WriteFile(filepath.Join(m.CAROOT, rootKeyName), pem.EncodeToMemory( &pem.Block{Type: "PRIVATE KEY", Bytes: privDER}), 0400) fatalIfErr(err, "failed to save CA key") err = ioutil.WriteFile(filepath.Join(m.CAROOT, rootName), pem.EncodeToMemory( &pem.Block{Type: "CERTIFICATE", Bytes: cert}), 0644) fatalIfErr(err, "failed to save CA certificate") log.Printf("Created a new local CA πŸ’₯\n") } func (m *mkcert) caUniqueName() string { return "mkcert development CA " + m.caCert.SerialNumber.String() }
go
BSD-3-Clause
1c1dc4ed27ed5936046b6398d39cab4d657a2d8e
2026-01-07T08:35:43.510017Z
false
FiloSottile/mkcert
https://github.com/FiloSottile/mkcert/blob/1c1dc4ed27ed5936046b6398d39cab4d657a2d8e/main.go
main.go
// Copyright 2018 The mkcert Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Command mkcert is a simple zero-config tool to make development certificates. package main import ( "crypto" "crypto/x509" "flag" "fmt" "log" "net" "net/mail" "net/url" "os" "os/exec" "os/user" "path/filepath" "regexp" "runtime" "runtime/debug" "strings" "sync" "golang.org/x/net/idna" ) const shortUsage = `Usage of mkcert: $ mkcert -install Install the local CA in the system trust store. $ mkcert example.org Generate "example.org.pem" and "example.org-key.pem". $ mkcert example.com myapp.dev localhost 127.0.0.1 ::1 Generate "example.com+4.pem" and "example.com+4-key.pem". $ mkcert "*.example.it" Generate "_wildcard.example.it.pem" and "_wildcard.example.it-key.pem". $ mkcert -uninstall Uninstall the local CA (but do not delete it). ` const advancedUsage = `Advanced options: -cert-file FILE, -key-file FILE, -p12-file FILE Customize the output paths. -client Generate a certificate for client authentication. -ecdsa Generate a certificate with an ECDSA key. -pkcs12 Generate a ".p12" PKCS #12 file, also know as a ".pfx" file, containing certificate and key for legacy applications. -csr CSR Generate a certificate based on the supplied CSR. Conflicts with all other flags and arguments except -install and -cert-file. -CAROOT Print the CA certificate and key storage location. $CAROOT (environment variable) Set the CA certificate and key storage location. (This allows maintaining multiple local CAs in parallel.) $TRUST_STORES (environment variable) A comma-separated list of trust stores to install the local root CA into. Options are: "system", "java" and "nss" (includes Firefox). Autodetected by default. ` // Version can be set at link time to override debug.BuildInfo.Main.Version, // which is "(devel)" when building from within the module. See // golang.org/issue/29814 and golang.org/issue/29228. var Version string func main() { if len(os.Args) == 1 { fmt.Print(shortUsage) return } log.SetFlags(0) var ( installFlag = flag.Bool("install", false, "") uninstallFlag = flag.Bool("uninstall", false, "") pkcs12Flag = flag.Bool("pkcs12", false, "") ecdsaFlag = flag.Bool("ecdsa", false, "") clientFlag = flag.Bool("client", false, "") helpFlag = flag.Bool("help", false, "") carootFlag = flag.Bool("CAROOT", false, "") csrFlag = flag.String("csr", "", "") certFileFlag = flag.String("cert-file", "", "") keyFileFlag = flag.String("key-file", "", "") p12FileFlag = flag.String("p12-file", "", "") versionFlag = flag.Bool("version", false, "") ) flag.Usage = func() { fmt.Fprint(flag.CommandLine.Output(), shortUsage) fmt.Fprintln(flag.CommandLine.Output(), `For more options, run "mkcert -help".`) } flag.Parse() if *helpFlag { fmt.Print(shortUsage) fmt.Print(advancedUsage) return } if *versionFlag { if Version != "" { fmt.Println(Version) return } if buildInfo, ok := debug.ReadBuildInfo(); ok { fmt.Println(buildInfo.Main.Version) return } fmt.Println("(unknown)") return } if *carootFlag { if *installFlag || *uninstallFlag { log.Fatalln("ERROR: you can't set -[un]install and -CAROOT at the same time") } fmt.Println(getCAROOT()) return } if *installFlag && *uninstallFlag { log.Fatalln("ERROR: you can't set -install and -uninstall at the same time") } if *csrFlag != "" && (*pkcs12Flag || *ecdsaFlag || *clientFlag) { log.Fatalln("ERROR: can only combine -csr with -install and -cert-file") } if *csrFlag != "" && flag.NArg() != 0 { log.Fatalln("ERROR: can't specify extra arguments when using -csr") } (&mkcert{ installMode: *installFlag, uninstallMode: *uninstallFlag, csrPath: *csrFlag, pkcs12: *pkcs12Flag, ecdsa: *ecdsaFlag, client: *clientFlag, certFile: *certFileFlag, keyFile: *keyFileFlag, p12File: *p12FileFlag, }).Run(flag.Args()) } const rootName = "rootCA.pem" const rootKeyName = "rootCA-key.pem" type mkcert struct { installMode, uninstallMode bool pkcs12, ecdsa, client bool keyFile, certFile, p12File string csrPath string CAROOT string caCert *x509.Certificate caKey crypto.PrivateKey // The system cert pool is only loaded once. After installing the root, checks // will keep failing until the next execution. TODO: maybe execve? // https://github.com/golang/go/issues/24540 (thanks, myself) ignoreCheckFailure bool } func (m *mkcert) Run(args []string) { m.CAROOT = getCAROOT() if m.CAROOT == "" { log.Fatalln("ERROR: failed to find the default CA location, set one as the CAROOT env var") } fatalIfErr(os.MkdirAll(m.CAROOT, 0755), "failed to create the CAROOT") m.loadCA() if m.installMode { m.install() if len(args) == 0 { return } } else if m.uninstallMode { m.uninstall() return } else { var warning bool if storeEnabled("system") && !m.checkPlatform() { warning = true log.Println("Note: the local CA is not installed in the system trust store.") } if storeEnabled("nss") && hasNSS && CertutilInstallHelp != "" && !m.checkNSS() { warning = true log.Printf("Note: the local CA is not installed in the %s trust store.", NSSBrowsers) } if storeEnabled("java") && hasJava && !m.checkJava() { warning = true log.Println("Note: the local CA is not installed in the Java trust store.") } if warning { log.Println("Run \"mkcert -install\" for certificates to be trusted automatically ⚠️") } } if m.csrPath != "" { m.makeCertFromCSR() return } if len(args) == 0 { flag.Usage() return } hostnameRegexp := regexp.MustCompile(`(?i)^(\*\.)?[0-9a-z_-]([0-9a-z._-]*[0-9a-z_-])?$`) for i, name := range args { if ip := net.ParseIP(name); ip != nil { continue } if email, err := mail.ParseAddress(name); err == nil && email.Address == name { continue } if uriName, err := url.Parse(name); err == nil && uriName.Scheme != "" && uriName.Host != "" { continue } punycode, err := idna.ToASCII(name) if err != nil { log.Fatalf("ERROR: %q is not a valid hostname, IP, URL or email: %s", name, err) } args[i] = punycode if !hostnameRegexp.MatchString(punycode) { log.Fatalf("ERROR: %q is not a valid hostname, IP, URL or email", name) } } m.makeCert(args) } func getCAROOT() string { if env := os.Getenv("CAROOT"); env != "" { return env } var dir string switch { case runtime.GOOS == "windows": dir = os.Getenv("LocalAppData") case os.Getenv("XDG_DATA_HOME") != "": dir = os.Getenv("XDG_DATA_HOME") case runtime.GOOS == "darwin": dir = os.Getenv("HOME") if dir == "" { return "" } dir = filepath.Join(dir, "Library", "Application Support") default: // Unix dir = os.Getenv("HOME") if dir == "" { return "" } dir = filepath.Join(dir, ".local", "share") } return filepath.Join(dir, "mkcert") } func (m *mkcert) install() { if storeEnabled("system") { if m.checkPlatform() { log.Print("The local CA is already installed in the system trust store! πŸ‘") } else { if m.installPlatform() { log.Print("The local CA is now installed in the system trust store! ⚑️") } m.ignoreCheckFailure = true // TODO: replace with a check for a successful install } } if storeEnabled("nss") && hasNSS { if m.checkNSS() { log.Printf("The local CA is already installed in the %s trust store! πŸ‘", NSSBrowsers) } else { if hasCertutil && m.installNSS() { log.Printf("The local CA is now installed in the %s trust store (requires browser restart)! 🦊", NSSBrowsers) } else if CertutilInstallHelp == "" { log.Printf(`Note: %s support is not available on your platform. ℹ️`, NSSBrowsers) } else if !hasCertutil { log.Printf(`Warning: "certutil" is not available, so the CA can't be automatically installed in %s! ⚠️`, NSSBrowsers) log.Printf(`Install "certutil" with "%s" and re-run "mkcert -install" πŸ‘ˆ`, CertutilInstallHelp) } } } if storeEnabled("java") && hasJava { if m.checkJava() { log.Println("The local CA is already installed in Java's trust store! πŸ‘") } else { if hasKeytool { m.installJava() log.Println("The local CA is now installed in Java's trust store! β˜•οΈ") } else { log.Println(`Warning: "keytool" is not available, so the CA can't be automatically installed in Java's trust store! ⚠️`) } } } log.Print("") } func (m *mkcert) uninstall() { if storeEnabled("nss") && hasNSS { if hasCertutil { m.uninstallNSS() } else if CertutilInstallHelp != "" { log.Print("") log.Printf(`Warning: "certutil" is not available, so the CA can't be automatically uninstalled from %s (if it was ever installed)! ⚠️`, NSSBrowsers) log.Printf(`You can install "certutil" with "%s" and re-run "mkcert -uninstall" πŸ‘ˆ`, CertutilInstallHelp) log.Print("") } } if storeEnabled("java") && hasJava { if hasKeytool { m.uninstallJava() } else { log.Print("") log.Println(`Warning: "keytool" is not available, so the CA can't be automatically uninstalled from Java's trust store (if it was ever installed)! ⚠️`) log.Print("") } } if storeEnabled("system") && m.uninstallPlatform() { log.Print("The local CA is now uninstalled from the system trust store(s)! πŸ‘‹") log.Print("") } else if storeEnabled("nss") && hasCertutil { log.Printf("The local CA is now uninstalled from the %s trust store(s)! πŸ‘‹", NSSBrowsers) log.Print("") } } func (m *mkcert) checkPlatform() bool { if m.ignoreCheckFailure { return true } _, err := m.caCert.Verify(x509.VerifyOptions{}) return err == nil } func storeEnabled(name string) bool { stores := os.Getenv("TRUST_STORES") if stores == "" { return true } for _, store := range strings.Split(stores, ",") { if store == name { return true } } return false } func fatalIfErr(err error, msg string) { if err != nil { log.Fatalf("ERROR: %s: %s", msg, err) } } func fatalIfCmdErr(err error, cmd string, out []byte) { if err != nil { log.Fatalf("ERROR: failed to execute \"%s\": %s\n\n%s\n", cmd, err, out) } } func pathExists(path string) bool { _, err := os.Stat(path) return err == nil } func binaryExists(name string) bool { _, err := exec.LookPath(name) return err == nil } var sudoWarningOnce sync.Once func commandWithSudo(cmd ...string) *exec.Cmd { if u, err := user.Current(); err == nil && u.Uid == "0" { return exec.Command(cmd[0], cmd[1:]...) } if !binaryExists("sudo") { sudoWarningOnce.Do(func() { log.Println(`Warning: "sudo" is not available, and mkcert is not running as root. The (un)install operation might fail. ⚠️`) }) return exec.Command(cmd[0], cmd[1:]...) } return exec.Command("sudo", append([]string{"--prompt=Sudo password:", "--"}, cmd...)...) }
go
BSD-3-Clause
1c1dc4ed27ed5936046b6398d39cab4d657a2d8e
2026-01-07T08:35:43.510017Z
false
base/node
https://github.com/base/node/blob/29fcaba8537427f848786b62bdcffebba6a33254/dependency_updater/dependency_updater.go
dependency_updater/dependency_updater.go
package main import ( "context" "encoding/json" "fmt" "slices" "time" "github.com/ethereum-optimism/optimism/op-service/retry" "github.com/google/go-github/v72/github" "github.com/urfave/cli/v3" "log" "os" "os/exec" "strings" ) type Info struct { Tag string `json:"tag,omitempty"` Commit string `json:"commit"` TagPrefix string `json:"tagPrefix,omitempty"` Owner string `json:"owner"` Repo string `json:"repo"` Branch string `json:"branch,omitempty"` Tracking string `json:"tracking"` } type VersionUpdateInfo struct { Repo string From string To string DiffUrl string } type Dependencies = map[string]*Info func main() { cmd := &cli.Command{ Name: "updater", Usage: "Updates the dependencies in the geth, nethermind and reth Dockerfiles", Flags: []cli.Flag{ &cli.StringFlag{ Name: "token", Usage: "Auth token used to make requests to the Github API must be set using export", Sources: cli.EnvVars("GITHUB_TOKEN"), Required: true, }, &cli.StringFlag{ Name: "repo", Usage: "Specifies repo location to run the version updater on", Required: true, }, &cli.BoolFlag{ Name: "commit", Usage: "Stages updater changes and creates commit message", Required: false, }, &cli.BoolFlag{ Name: "github-action", Usage: "Specifies whether tool is being used through github action workflow", Required: false, }, }, Action: func(ctx context.Context, cmd *cli.Command) error { err := updater(cmd.String("token"), cmd.String("repo"), cmd.Bool("commit"), cmd.Bool("github-action")) if err != nil { return fmt.Errorf("failed to run updater: %s", err) } return nil }, } if err := cmd.Run(context.Background(), os.Args); err != nil { log.Fatal(err) } } func updater(token string, repoPath string, commit bool, githubAction bool) error { var err error var dependencies Dependencies var updatedDependencies []VersionUpdateInfo f, err := os.ReadFile(repoPath + "/versions.json") if err != nil { return fmt.Errorf("error reading versions JSON: %s", err) } client := github.NewClient(nil).WithAuthToken(token) ctx := context.Background() err = json.Unmarshal(f, &dependencies) if err != nil { return fmt.Errorf("error unmarshalling versions JSON to dependencies: %s", err) } for dependency := range dependencies { var updatedDependency VersionUpdateInfo err := retry.Do0(context.Background(), 3, retry.Fixed(1*time.Second), func() error { updatedDependency, err = getAndUpdateDependency( ctx, client, dependency, repoPath, dependencies, ) return err }) if err != nil { return fmt.Errorf("error getting and updating version/commit for "+dependency+": %s", err) } if updatedDependency != (VersionUpdateInfo{}) { updatedDependencies = append(updatedDependencies, updatedDependency) } } e := createVersionsEnv(repoPath, dependencies) if e != nil { return fmt.Errorf("error creating versions.env: %s", e) } if (commit && updatedDependencies != nil) || (githubAction && updatedDependencies != nil) { err := createCommitMessage(updatedDependencies, repoPath, githubAction) if err != nil { return fmt.Errorf("error creating commit message: %s", err) } } return nil } func createCommitMessage(updatedDependencies []VersionUpdateInfo, repoPath string, githubAction bool) error { var repos []string descriptionLines := []string{ "### Dependency Updates", } commitTitle := "chore: updated " for _, dependency := range updatedDependencies { repo, tag := dependency.Repo, dependency.To descriptionLines = append(descriptionLines, fmt.Sprintf("**%s** - %s: [diff](%s)", repo, tag, dependency.DiffUrl)) repos = append(repos, repo) } commitDescription := strings.Join(descriptionLines, "\n") commitTitle += strings.Join(repos, ", ") if githubAction { err := writeToGithubOutput(commitTitle, commitDescription, repoPath) if err != nil { return fmt.Errorf("error creating git commit message: %s", err) } } else { cmd := exec.Command("git", "commit", "-am", commitTitle, "-m", commitDescription) if err := cmd.Run(); err != nil { return fmt.Errorf("failed to run git commit -m: %s", err) } } return nil } func getAndUpdateDependency(ctx context.Context, client *github.Client, dependencyType string, repoPath string, dependencies Dependencies) (VersionUpdateInfo, error) { version, commit, updatedDependency, err := getVersionAndCommit(ctx, client, dependencies, dependencyType) if err != nil { return VersionUpdateInfo{}, err } if updatedDependency != (VersionUpdateInfo{}) { e := updateVersionTagAndCommit(commit, version, dependencyType, repoPath, dependencies) if e != nil { return VersionUpdateInfo{}, fmt.Errorf("error updating version tag and commit: %s", e) } } return updatedDependency, nil } func getVersionAndCommit(ctx context.Context, client *github.Client, dependencies Dependencies, dependencyType string) (string, string, VersionUpdateInfo, error) { var version *github.RepositoryRelease var commit string var diffUrl string var updatedDependency VersionUpdateInfo foundPrefixVersion := false options := &github.ListOptions{Page: 1} if dependencies[dependencyType].Tracking == "tag" { for { releases, resp, err := client.Repositories.ListReleases( ctx, dependencies[dependencyType].Owner, dependencies[dependencyType].Repo, options) if err != nil { return "", "", VersionUpdateInfo{}, fmt.Errorf("error getting releases: %s", err) } if dependencies[dependencyType].TagPrefix == "" { version = releases[0] if *version.TagName != dependencies[dependencyType].Tag { diffUrl = generateGithubRepoUrl(dependencies, dependencyType) + "/compare/" + dependencies[dependencyType].Tag + "..." + *version.TagName } break } else if dependencies[dependencyType].TagPrefix != "" { for release := range releases { if strings.HasPrefix(*releases[release].TagName, dependencies[dependencyType].TagPrefix) { version = releases[release] foundPrefixVersion = true if *version.TagName != dependencies[dependencyType].Tag { diffUrl = generateGithubRepoUrl(dependencies, dependencyType) + "/compare/" + dependencies[dependencyType].Tag + "..." + *version.TagName } break } } if foundPrefixVersion { break } options.Page = resp.NextPage } else if resp.NextPage == 0 { break } } } if diffUrl != "" { updatedDependency = VersionUpdateInfo{ dependencies[dependencyType].Repo, dependencies[dependencyType].Tag, *version.TagName, diffUrl, } } if dependencies[dependencyType].Tracking == "tag" { versionCommit, _, err := client.Repositories.GetCommit( ctx, dependencies[dependencyType].Owner, dependencies[dependencyType].Repo, "refs/tags/"+*version.TagName, &github.ListOptions{}) if err != nil { return "", "", VersionUpdateInfo{}, fmt.Errorf("error getting commit for "+dependencyType+": %s", err) } commit = *versionCommit.SHA } else if dependencies[dependencyType].Tracking == "branch" { branchCommit, _, err := client.Repositories.ListCommits( ctx, dependencies[dependencyType].Owner, dependencies[dependencyType].Repo, &github.CommitsListOptions{ SHA: dependencies[dependencyType].Branch, }, ) if err != nil { return "", "", VersionUpdateInfo{}, fmt.Errorf("error listing commits for "+dependencyType+": %s", err) } commit = *branchCommit[0].SHA if dependencies[dependencyType].Commit != commit { from, to := dependencies[dependencyType].Commit, commit diffUrl = fmt.Sprintf("%s/compare/%s...%s", generateGithubRepoUrl(dependencies, dependencyType), from, to) updatedDependency = VersionUpdateInfo{ dependencies[dependencyType].Repo, dependencies[dependencyType].Tag, commit, diffUrl, } } } if version != nil { return *version.TagName, commit, updatedDependency, nil } return "", commit, updatedDependency, nil } func updateVersionTagAndCommit( commit string, tag string, dependencyType string, repoPath string, dependencies Dependencies) error { dependencies[dependencyType].Tag = tag dependencies[dependencyType].Commit = commit err := writeToVersionsJson(repoPath, dependencies) if err != nil { return fmt.Errorf("error writing to versions "+dependencyType+": %s", err) } return nil } func writeToVersionsJson(repoPath string, dependencies Dependencies) error { // formatting json updatedJson, err := json.MarshalIndent(dependencies, "", " ") if err != nil { return fmt.Errorf("error marshaling dependencies json: %s", err) } e := os.WriteFile(repoPath+"/versions.json", updatedJson, 0644) if e != nil { return fmt.Errorf("error writing to versions.json: %s", e) } return nil } func createVersionsEnv(repoPath string, dependencies Dependencies) error { envLines := []string{} for dependency := range dependencies { repoUrl := generateGithubRepoUrl(dependencies, dependency) + ".git" dependencyPrefix := strings.ToUpper(dependency) if dependencies[dependency].Tracking == "branch" { dependencies[dependency].Tag = dependencies[dependency].Branch } envLines = append(envLines, fmt.Sprintf("export %s_%s=%s", dependencyPrefix, "TAG", dependencies[dependency].Tag)) envLines = append(envLines, fmt.Sprintf("export %s_%s=%s", dependencyPrefix, "COMMIT", dependencies[dependency].Commit)) envLines = append(envLines, fmt.Sprintf("export %s_%s=%s", dependencyPrefix, "REPO", repoUrl)) } slices.Sort(envLines) file, err := os.Create(repoPath + "/versions.env") if err != nil { return fmt.Errorf("error creating versions.env file: %s", err) } defer file.Close() _, err = file.WriteString(strings.Join(envLines, "\n")) if err != nil { return fmt.Errorf("error writing to versions.env file: %s", err) } return nil } func writeToGithubOutput(title string, description string, repoPath string) error { file := os.Getenv("GITHUB_OUTPUT") f, err := os.OpenFile(file, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("failed to open GITHUB_OUTPUT file: %s", err) } defer f.Close() titleToWrite := fmt.Sprintf("%s=%s\n", "TITLE", title) _, err = f.WriteString(titleToWrite) if err != nil { return fmt.Errorf("failed to write to GITHUB_OUTPUT file: %s", err) } delimiter := "EOF" descToWrite := fmt.Sprintf("%s<<%s\n%s\n%s\n", "DESC", delimiter, description, delimiter) _, err = f.WriteString(descToWrite) if err != nil { return fmt.Errorf("failed to write to GITHUB_OUTPUT file: %s", err) } return nil } func generateGithubRepoUrl(dependencies Dependencies, dependencyType string) string { return "https://github.com/" + dependencies[dependencyType].Owner + "/" + dependencies[dependencyType].Repo }
go
MIT
29fcaba8537427f848786b62bdcffebba6a33254
2026-01-07T08:35:43.469798Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/main_test.go
main_test.go
package main import ( "os" "testing" ) func TestMain(_ *testing.T) { os.Args = []string{"act", "--help"} main() }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/main.go
main.go
package main import ( _ "embed" "github.com/nektos/act/cmd" "github.com/nektos/act/pkg/common" ) //go:embed VERSION var version string func main() { ctx, cancel := common.CreateGracefulJobCancellationContext() defer cancel() // run the command cmd.Execute(ctx, version) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/gh/gh.go
pkg/gh/gh.go
package gh import ( "bufio" "bytes" "context" "os/exec" ) func GetToken(ctx context.Context, workingDirectory string) (string, error) { var token string // Locate the 'gh' executable path, err := exec.LookPath("gh") if err != nil { return "", err } // Command setup cmd := exec.CommandContext(ctx, path, "auth", "token") cmd.Dir = workingDirectory // Capture the output var out bytes.Buffer cmd.Stdout = &out // Run the command err = cmd.Run() if err != nil { return "", err } // Read the first line of the output scanner := bufio.NewScanner(&out) if scanner.Scan() { token = scanner.Text() } return token, nil }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/gh/gh_test.go
pkg/gh/gh_test.go
package gh import ( "context" "testing" ) func TestGetToken(t *testing.T) { token, _ := GetToken(context.TODO(), "") t.Log(token) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/lookpath/error.go
pkg/lookpath/error.go
package lookpath type Error struct { Name string Err error } func (e *Error) Error() string { return e.Err.Error() }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/lookpath/lp_js.go
pkg/lookpath/lp_js.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build js && wasm package lookpath import ( "errors" ) // ErrNotFound is the error resulting if a path search failed to find an executable file. var ErrNotFound = errors.New("executable file not found in $PATH") // LookPath searches for an executable named file in the // directories named by the PATH environment variable. // If file contains a slash, it is tried directly and the PATH is not consulted. // The result may be an absolute path or a path relative to the current directory. func LookPath2(file string, lenv Env) (string, error) { // Wasm can not execute processes, so act as if there are no executables at all. return "", &Error{file, ErrNotFound} }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/lookpath/lp_windows.go
pkg/lookpath/lp_windows.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package lookpath import ( "errors" "io/fs" "os" "path/filepath" "strings" ) // ErrNotFound is the error resulting if a path search failed to find an executable file. var ErrNotFound = errors.New("executable file not found in %PATH%") func chkStat(file string) error { d, err := os.Stat(file) if err != nil { return err } if d.IsDir() { return fs.ErrPermission } return nil } func hasExt(file string) bool { i := strings.LastIndex(file, ".") if i < 0 { return false } return strings.LastIndexAny(file, `:\/`) < i } func findExecutable(file string, exts []string) (string, error) { if len(exts) == 0 { return file, chkStat(file) } if hasExt(file) { if chkStat(file) == nil { return file, nil } } for _, e := range exts { if f := file + e; chkStat(f) == nil { return f, nil } } return "", fs.ErrNotExist } // LookPath searches for an executable named file in the // directories named by the PATH environment variable. // If file contains a slash, it is tried directly and the PATH is not consulted. // LookPath also uses PATHEXT environment variable to match // a suitable candidate. // The result may be an absolute path or a path relative to the current directory. func LookPath2(file string, lenv Env) (string, error) { var exts []string x := lenv.Getenv(`PATHEXT`) if x != "" { for _, e := range strings.Split(strings.ToLower(x), `;`) { if e == "" { continue } if e[0] != '.' { e = "." + e } exts = append(exts, e) } } else { exts = []string{".com", ".exe", ".bat", ".cmd"} } if strings.ContainsAny(file, `:\/`) { if f, err := findExecutable(file, exts); err == nil { return f, nil } else { return "", &Error{file, err} } } if f, err := findExecutable(filepath.Join(".", file), exts); err == nil { return f, nil } path := lenv.Getenv("path") for _, dir := range filepath.SplitList(path) { if f, err := findExecutable(filepath.Join(dir, file), exts); err == nil { return f, nil } } return "", &Error{file, ErrNotFound} }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/lookpath/env.go
pkg/lookpath/env.go
package lookpath import "os" type Env interface { Getenv(name string) string } type defaultEnv struct { } func (*defaultEnv) Getenv(name string) string { return os.Getenv(name) } func LookPath(file string) (string, error) { return LookPath2(file, &defaultEnv{}) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/lookpath/lp_plan9.go
pkg/lookpath/lp_plan9.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package lookpath import ( "errors" "io/fs" "os" "path/filepath" "strings" ) // ErrNotFound is the error resulting if a path search failed to find an executable file. var ErrNotFound = errors.New("executable file not found in $path") func findExecutable(file string) error { d, err := os.Stat(file) if err != nil { return err } if m := d.Mode(); !m.IsDir() && m&0111 != 0 { return nil } return fs.ErrPermission } // LookPath searches for an executable named file in the // directories named by the path environment variable. // If file begins with "/", "#", "./", or "../", it is tried // directly and the path is not consulted. // The result may be an absolute path or a path relative to the current directory. func LookPath2(file string, lenv Env) (string, error) { // skip the path lookup for these prefixes skip := []string{"/", "#", "./", "../"} for _, p := range skip { if strings.HasPrefix(file, p) { err := findExecutable(file) if err == nil { return file, nil } return "", &Error{file, err} } } path := lenv.Getenv("path") for _, dir := range filepath.SplitList(path) { path := filepath.Join(dir, file) if err := findExecutable(path); err == nil { return path, nil } } return "", &Error{file, ErrNotFound} }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/lookpath/lp_unix.go
pkg/lookpath/lp_unix.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris package lookpath import ( "errors" "io/fs" "os" "path/filepath" "strings" ) // ErrNotFound is the error resulting if a path search failed to find an executable file. var ErrNotFound = errors.New("executable file not found in $PATH") func findExecutable(file string) error { d, err := os.Stat(file) if err != nil { return err } if m := d.Mode(); !m.IsDir() && m&0111 != 0 { return nil } return fs.ErrPermission } // LookPath searches for an executable named file in the // directories named by the PATH environment variable. // If file contains a slash, it is tried directly and the PATH is not consulted. // The result may be an absolute path or a path relative to the current directory. func LookPath2(file string, lenv Env) (string, error) { // NOTE(rsc): I wish we could use the Plan 9 behavior here // (only bypass the path if file begins with / or ./ or ../) // but that would not match all the Unix shells. if strings.Contains(file, "/") { err := findExecutable(file) if err == nil { return file, nil } return "", &Error{file, err} } path := lenv.Getenv("PATH") for _, dir := range filepath.SplitList(path) { if dir == "" { // Unix shell semantics: path element "" means "." dir = "." } path := filepath.Join(dir, file) if err := findExecutable(path); err == nil { return path, nil } } return "", &Error{file, ErrNotFound} }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/job_context.go
pkg/model/job_context.go
package model type JobContext struct { Status string `json:"status"` Container struct { ID string `json:"id"` Network string `json:"network"` } `json:"container"` Services map[string]struct { ID string `json:"id"` } `json:"services"` }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/anchors_test.go
pkg/model/anchors_test.go
package model import ( "testing" "github.com/stretchr/testify/assert" "gopkg.in/yaml.v3" ) func TestVerifyNilAliasError(t *testing.T) { var node yaml.Node err := yaml.Unmarshal([]byte(` test: - a - b - c`), &node) *node.Content[0].Content[1].Content[1] = yaml.Node{ Kind: yaml.AliasNode, } assert.NoError(t, err) err = resolveAliases(&node) assert.Error(t, err) } func TestVerifyNoRecursion(t *testing.T) { table := []struct { name string yaml string yamlErr bool anchorErr bool }{ { name: "no anchors", yaml: ` a: x b: y c: z `, yamlErr: false, anchorErr: false, }, { name: "simple anchors", yaml: ` a: &a x b: &b y c: *a `, yamlErr: false, anchorErr: false, }, { name: "nested anchors", yaml: ` a: &a val: x b: &b val: y c: *a `, yamlErr: false, anchorErr: false, }, { name: "circular anchors", yaml: ` a: &b ref: *c b: &c ref: *b `, yamlErr: true, anchorErr: false, }, { name: "self-referencing anchor", yaml: ` a: &a ref: *a `, yamlErr: false, anchorErr: true, }, { name: "reuse snippet with anchors", yaml: ` a: &b x b: &a ref: *b c: *a `, yamlErr: false, anchorErr: false, }, } for _, tt := range table { t.Run(tt.name, func(t *testing.T) { var node yaml.Node err := yaml.Unmarshal([]byte(tt.yaml), &node) if tt.yamlErr { assert.Error(t, err) return } assert.NoError(t, err) err = resolveAliases(&node) if tt.anchorErr { assert.Error(t, err) } else { assert.NoError(t, err) } }) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/workflow_test.go
pkg/model/workflow_test.go
package model import ( "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestReadWorkflow_StringEvent(t *testing.T) { yaml := ` name: local-action-docker-url on: push jobs: test: runs-on: ubuntu-latest steps: - uses: ./actions/docker-url ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Len(t, workflow.On(), 1) assert.Contains(t, workflow.On(), "push") } func TestReadWorkflow_ListEvent(t *testing.T) { yaml := ` name: local-action-docker-url on: [push, pull_request] jobs: test: runs-on: ubuntu-latest steps: - uses: ./actions/docker-url ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Len(t, workflow.On(), 2) assert.Contains(t, workflow.On(), "push") assert.Contains(t, workflow.On(), "pull_request") } func TestReadWorkflow_MapEvent(t *testing.T) { yaml := ` name: local-action-docker-url on: push: branches: - master pull_request: branches: - master jobs: test: runs-on: ubuntu-latest steps: - uses: ./actions/docker-url ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Len(t, workflow.On(), 2) assert.Contains(t, workflow.On(), "push") assert.Contains(t, workflow.On(), "pull_request") } func TestReadWorkflow_RunsOnLabels(t *testing.T) { yaml := ` name: local-action-docker-url jobs: test: container: nginx:latest runs-on: labels: ubuntu-latest steps: - uses: ./actions/docker-url` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest"}) } func TestReadWorkflow_RunsOnLabelsWithGroup(t *testing.T) { yaml := ` name: local-action-docker-url jobs: test: container: nginx:latest runs-on: labels: [ubuntu-latest] group: linux steps: - uses: ./actions/docker-url` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest", "linux"}) } func TestReadWorkflow_StringContainer(t *testing.T) { yaml := ` name: local-action-docker-url jobs: test: container: nginx:latest runs-on: ubuntu-latest steps: - uses: ./actions/docker-url test2: container: image: nginx:latest env: foo: bar runs-on: ubuntu-latest steps: - uses: ./actions/docker-url ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Len(t, workflow.Jobs, 2) assert.Contains(t, workflow.Jobs["test"].Container().Image, "nginx:latest") assert.Contains(t, workflow.Jobs["test2"].Container().Image, "nginx:latest") assert.Contains(t, workflow.Jobs["test2"].Container().Env["foo"], "bar") } func TestReadWorkflow_ObjectContainer(t *testing.T) { yaml := ` name: local-action-docker-url jobs: test: container: image: r.example.org/something:latest credentials: username: registry-username password: registry-password env: HOME: /home/user volumes: - my_docker_volume:/volume_mount - /data/my_data - /source/directory:/destination/directory runs-on: ubuntu-latest steps: - uses: ./actions/docker-url ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Len(t, workflow.Jobs, 1) container := workflow.GetJob("test").Container() assert.Contains(t, container.Image, "r.example.org/something:latest") assert.Contains(t, container.Env["HOME"], "/home/user") assert.Contains(t, container.Credentials["username"], "registry-username") assert.Contains(t, container.Credentials["password"], "registry-password") assert.ElementsMatch(t, container.Volumes, []string{ "my_docker_volume:/volume_mount", "/data/my_data", "/source/directory:/destination/directory", }) } func TestReadWorkflow_JobTypes(t *testing.T) { yaml := ` name: invalid job definition jobs: default-job: runs-on: ubuntu-latest steps: - run: echo remote-reusable-workflow-yml: uses: remote/repo/some/path/to/workflow.yml@main remote-reusable-workflow-yaml: uses: remote/repo/some/path/to/workflow.yaml@main remote-reusable-workflow-custom-path: uses: remote/repo/path/to/workflow.yml@main local-reusable-workflow-yml: uses: ./some/path/to/workflow.yml local-reusable-workflow-yaml: uses: ./some/path/to/workflow.yaml ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Len(t, workflow.Jobs, 6) jobType, err := workflow.Jobs["default-job"].Type() assert.Equal(t, nil, err) assert.Equal(t, JobTypeDefault, jobType) jobType, err = workflow.Jobs["remote-reusable-workflow-yml"].Type() assert.Equal(t, nil, err) assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) jobType, err = workflow.Jobs["remote-reusable-workflow-yaml"].Type() assert.Equal(t, nil, err) assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) jobType, err = workflow.Jobs["remote-reusable-workflow-custom-path"].Type() assert.Equal(t, nil, err) assert.Equal(t, JobTypeReusableWorkflowRemote, jobType) jobType, err = workflow.Jobs["local-reusable-workflow-yml"].Type() assert.Equal(t, nil, err) assert.Equal(t, JobTypeReusableWorkflowLocal, jobType) jobType, err = workflow.Jobs["local-reusable-workflow-yaml"].Type() assert.Equal(t, nil, err) assert.Equal(t, JobTypeReusableWorkflowLocal, jobType) } func TestReadWorkflow_JobTypes_InvalidPath(t *testing.T) { yaml := ` name: invalid job definition jobs: remote-reusable-workflow-missing-version: uses: remote/repo/some/path/to/workflow.yml remote-reusable-workflow-bad-extension: uses: remote/repo/some/path/to/workflow.json local-reusable-workflow-bad-extension: uses: ./some/path/to/workflow.json local-reusable-workflow-bad-path: uses: some/path/to/workflow.yaml ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Len(t, workflow.Jobs, 4) jobType, err := workflow.Jobs["remote-reusable-workflow-missing-version"].Type() assert.Equal(t, JobTypeInvalid, jobType) assert.NotEqual(t, nil, err) jobType, err = workflow.Jobs["remote-reusable-workflow-bad-extension"].Type() assert.Equal(t, JobTypeInvalid, jobType) assert.NotEqual(t, nil, err) jobType, err = workflow.Jobs["local-reusable-workflow-bad-extension"].Type() assert.Equal(t, JobTypeInvalid, jobType) assert.NotEqual(t, nil, err) jobType, err = workflow.Jobs["local-reusable-workflow-bad-path"].Type() assert.Equal(t, JobTypeInvalid, jobType) assert.NotEqual(t, nil, err) } func TestReadWorkflow_StepsTypes(t *testing.T) { yaml := ` name: invalid step definition jobs: test: runs-on: ubuntu-latest steps: - name: test1 uses: actions/checkout@v2 run: echo - name: test2 run: echo - name: test3 uses: actions/checkout@v2 - name: test4 uses: docker://nginx:latest - name: test5 uses: ./local-action ` _, err := ReadWorkflow(strings.NewReader(yaml), false) assert.Error(t, err, "read workflow should fail") } // See: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs func TestReadWorkflow_JobOutputs(t *testing.T) { yaml := ` name: job outputs definition jobs: test1: runs-on: ubuntu-latest steps: - id: test1_1 run: | echo "::set-output name=a_key::some-a_value" echo "::set-output name=b-key::some-b-value" outputs: some_a_key: ${{ steps.test1_1.outputs.a_key }} some-b-key: ${{ steps.test1_1.outputs.b-key }} test2: runs-on: ubuntu-latest needs: - test1 steps: - name: test2_1 run: | echo "${{ needs.test1.outputs.some_a_key }}" echo "${{ needs.test1.outputs.some-b-key }}" ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") assert.Len(t, workflow.Jobs, 2) assert.Len(t, workflow.Jobs["test1"].Steps, 1) assert.Equal(t, StepTypeRun, workflow.Jobs["test1"].Steps[0].Type()) assert.Equal(t, "test1_1", workflow.Jobs["test1"].Steps[0].ID) assert.Len(t, workflow.Jobs["test1"].Outputs, 2) assert.Contains(t, workflow.Jobs["test1"].Outputs, "some_a_key") assert.Contains(t, workflow.Jobs["test1"].Outputs, "some-b-key") assert.Equal(t, "${{ steps.test1_1.outputs.a_key }}", workflow.Jobs["test1"].Outputs["some_a_key"]) assert.Equal(t, "${{ steps.test1_1.outputs.b-key }}", workflow.Jobs["test1"].Outputs["some-b-key"]) } func TestReadWorkflow_Strategy(t *testing.T) { w, err := NewWorkflowPlanner("testdata/strategy/push.yml", true, false) assert.NoError(t, err) p, err := w.PlanJob("strategy-only-max-parallel") assert.NoError(t, err) assert.Equal(t, len(p.Stages), 1) assert.Equal(t, len(p.Stages[0].Runs), 1) wf := p.Stages[0].Runs[0].Workflow job := wf.Jobs["strategy-only-max-parallel"] matrixes, err := job.GetMatrixes() assert.NoError(t, err) assert.Equal(t, matrixes, []map[string]interface{}{{}}) assert.Equal(t, job.Matrix(), map[string][]interface{}(nil)) assert.Equal(t, job.Strategy.MaxParallel, 2) assert.Equal(t, job.Strategy.FailFast, true) job = wf.Jobs["strategy-only-fail-fast"] matrixes, err = job.GetMatrixes() assert.NoError(t, err) assert.Equal(t, matrixes, []map[string]interface{}{{}}) assert.Equal(t, job.Matrix(), map[string][]interface{}(nil)) assert.Equal(t, job.Strategy.MaxParallel, 4) assert.Equal(t, job.Strategy.FailFast, false) job = wf.Jobs["strategy-no-matrix"] matrixes, err = job.GetMatrixes() assert.NoError(t, err) assert.Equal(t, matrixes, []map[string]interface{}{{}}) assert.Equal(t, job.Matrix(), map[string][]interface{}(nil)) assert.Equal(t, job.Strategy.MaxParallel, 2) assert.Equal(t, job.Strategy.FailFast, false) job = wf.Jobs["strategy-all"] matrixes, err = job.GetMatrixes() assert.NoError(t, err) assert.Equal(t, matrixes, []map[string]interface{}{ {"datacenter": "site-c", "node-version": "14.x", "site": "staging", "php-version": 5.4}, {"datacenter": "site-c", "node-version": "16.x", "site": "staging", "php-version": 5.4}, {"datacenter": "site-d", "node-version": "16.x", "site": "staging", "php-version": 5.4}, {"datacenter": "site-a", "node-version": "10.x", "site": "prod"}, {"datacenter": "site-b", "node-version": "12.x", "site": "dev"}, }, ) assert.Equal(t, job.Matrix(), map[string][]interface{}{ "datacenter": {"site-c", "site-d"}, "exclude": { map[string]interface{}{"datacenter": "site-d", "node-version": "14.x", "site": "staging"}, }, "include": { map[string]interface{}{"php-version": 5.4}, map[string]interface{}{"datacenter": "site-a", "node-version": "10.x", "site": "prod"}, map[string]interface{}{"datacenter": "site-b", "node-version": "12.x", "site": "dev"}, }, "node-version": {"14.x", "16.x"}, "site": {"staging"}, }, ) assert.Equal(t, job.Strategy.MaxParallel, 2) assert.Equal(t, job.Strategy.FailFast, false) } func TestMatrixOnlyIncludes(t *testing.T) { matrix := map[string][]interface{}{ "include": []interface{}{ map[string]interface{}{"a": "1", "b": "2"}, map[string]interface{}{"a": "3", "b": "4"}, }, } rN := yaml.Node{} err := rN.Encode(matrix) require.NoError(t, err, "encoding matrix should succeed") job := &Job{ Strategy: &Strategy{ RawMatrix: rN, }, } assert.Equal(t, job.Matrix(), matrix) matrixes, err := job.GetMatrixes() require.NoError(t, err) assert.Equal(t, matrixes, []map[string]interface{}{ {"a": "1", "b": "2"}, {"a": "3", "b": "4"}, }, ) } func TestStep_ShellCommand(t *testing.T) { tests := []struct { shell string workflowShell string want string }{ {"pwsh -v '. {0}'", "", "pwsh -v '. {0}'"}, {"pwsh", "", "pwsh -command . '{0}'"}, {"powershell", "", "powershell -command . '{0}'"}, {"bash", "", "bash -e {0}"}, {"bash", "bash", "bash --noprofile --norc -e -o pipefail {0}"}, } for _, tt := range tests { t.Run(tt.shell, func(t *testing.T) { got := (&Step{Shell: tt.shell, WorkflowShell: tt.workflowShell}).ShellCommand() assert.Equal(t, got, tt.want) }) } } func TestReadWorkflow_WorkflowDispatchConfig(t *testing.T) { yaml := ` name: local-action-docker-url ` workflow, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") workflowDispatch := workflow.WorkflowDispatchConfig() assert.Nil(t, workflowDispatch) yaml = ` name: local-action-docker-url on: push ` workflow, err = ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") workflowDispatch = workflow.WorkflowDispatchConfig() assert.Nil(t, workflowDispatch) yaml = ` name: local-action-docker-url on: workflow_dispatch ` workflow, err = ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") workflowDispatch = workflow.WorkflowDispatchConfig() assert.NotNil(t, workflowDispatch) assert.Nil(t, workflowDispatch.Inputs) yaml = ` name: local-action-docker-url on: [push, pull_request] ` workflow, err = ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") workflowDispatch = workflow.WorkflowDispatchConfig() assert.Nil(t, workflowDispatch) yaml = ` name: local-action-docker-url on: [push, workflow_dispatch] ` workflow, err = ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") workflowDispatch = workflow.WorkflowDispatchConfig() assert.NotNil(t, workflowDispatch) assert.Nil(t, workflowDispatch.Inputs) yaml = ` name: local-action-docker-url on: - push - workflow_dispatch ` workflow, err = ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") workflowDispatch = workflow.WorkflowDispatchConfig() assert.NotNil(t, workflowDispatch) assert.Nil(t, workflowDispatch.Inputs) yaml = ` name: local-action-docker-url on: push: pull_request: ` workflow, err = ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") workflowDispatch = workflow.WorkflowDispatchConfig() assert.Nil(t, workflowDispatch) yaml = ` name: local-action-docker-url on: push: pull_request: workflow_dispatch: inputs: logLevel: description: 'Log level' required: true default: 'warning' type: choice options: - info - warning - debug ` workflow, err = ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") workflowDispatch = workflow.WorkflowDispatchConfig() assert.NotNil(t, workflowDispatch) assert.Equal(t, WorkflowDispatchInput{ Default: "warning", Description: "Log level", Options: []string{ "info", "warning", "debug", }, Required: true, Type: "choice", }, workflowDispatch.Inputs["logLevel"]) } func TestReadWorkflow_InvalidStringEvent(t *testing.T) { yaml := ` name: local-action-docker-url on: push2 jobs: test: runs-on: ubuntu-latest steps: - uses: ./actions/docker-url ` _, err := ReadWorkflow(strings.NewReader(yaml), true) assert.Error(t, err, "read workflow should succeed") } func TestReadWorkflow_AnchorStrict(t *testing.T) { yaml := ` on: push jobs: test: runs-on: &runner ubuntu-latest steps: - uses: &checkout actions/checkout@v5 test2: runs-on: *runner steps: - uses: *checkout ` w, err := ReadWorkflow(strings.NewReader(yaml), true) assert.NoError(t, err, "read workflow should succeed") for _, job := range w.Jobs { assert.Equal(t, []string{"ubuntu-latest"}, job.RunsOn()) assert.Equal(t, "actions/checkout@v5", job.Steps[0].Uses) } } func TestReadWorkflow_Anchor(t *testing.T) { yaml := ` jobs: test: runs-on: &runner ubuntu-latest steps: - uses: &checkout actions/checkout@v5 test2: &job runs-on: *runner steps: - uses: *checkout - run: echo $TRIGGER env: TRIGGER: &trigger push test3: *job on: push #*trigger ` w, err := ReadWorkflow(strings.NewReader(yaml), false) assert.NoError(t, err, "read workflow should succeed") for _, job := range w.Jobs { assert.Equal(t, []string{"ubuntu-latest"}, job.RunsOn()) assert.Equal(t, "actions/checkout@v5", job.Steps[0].Uses) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/action.go
pkg/model/action.go
package model import ( "fmt" "io" "strings" "github.com/nektos/act/pkg/schema" "gopkg.in/yaml.v3" ) // ActionRunsUsing is the type of runner for the action type ActionRunsUsing string func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error { var using string if err := unmarshal(&using); err != nil { return err } // Force input to lowercase for case insensitive comparison format := ActionRunsUsing(strings.ToLower(using)) switch format { case ActionRunsUsingNode24, ActionRunsUsingNode20, ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite: *a = format default: return fmt.Errorf("The runs.using key in action.yml must be one of: %v, got %s", []string{ ActionRunsUsingComposite, ActionRunsUsingDocker, ActionRunsUsingNode12, ActionRunsUsingNode16, ActionRunsUsingNode20, ActionRunsUsingNode24, }, format) } return nil } const ( // ActionRunsUsingNode12 for running with node12 ActionRunsUsingNode12 = "node12" // ActionRunsUsingNode16 for running with node16 ActionRunsUsingNode16 = "node16" // ActionRunsUsingNode20 for running with node20 ActionRunsUsingNode20 = "node20" // ActionRunsUsingNode24 for running with node24 ActionRunsUsingNode24 = "node24" // ActionRunsUsingDocker for running with docker ActionRunsUsingDocker = "docker" // ActionRunsUsingComposite for running composite ActionRunsUsingComposite = "composite" ) func (a ActionRunsUsing) IsNode() bool { switch a { case ActionRunsUsingNode12, ActionRunsUsingNode16, ActionRunsUsingNode20, ActionRunsUsingNode24: return true default: return false } } func (a ActionRunsUsing) IsDocker() bool { return a == ActionRunsUsingDocker } func (a ActionRunsUsing) IsComposite() bool { return a == ActionRunsUsingComposite } // ActionRuns are a field in Action type ActionRuns struct { Using ActionRunsUsing `yaml:"using"` Env map[string]string `yaml:"env"` Main string `yaml:"main"` Pre string `yaml:"pre"` PreIf string `yaml:"pre-if"` Post string `yaml:"post"` PostIf string `yaml:"post-if"` Image string `yaml:"image"` PreEntrypoint string `yaml:"pre-entrypoint"` Entrypoint string `yaml:"entrypoint"` PostEntrypoint string `yaml:"post-entrypoint"` Args []string `yaml:"args"` Steps []Step `yaml:"steps"` } // Action describes a metadata file for GitHub actions. The metadata filename must be either action.yml or action.yaml. The data in the metadata file defines the inputs, outputs and main entrypoint for your action. type Action struct { Name string `yaml:"name"` Author string `yaml:"author"` Description string `yaml:"description"` Inputs map[string]Input `yaml:"inputs"` Outputs map[string]Output `yaml:"outputs"` Runs ActionRuns `yaml:"runs"` Branding struct { Color string `yaml:"color"` Icon string `yaml:"icon"` } `yaml:"branding"` } func (a *Action) UnmarshalYAML(node *yaml.Node) error { // TODO enable after verifying that this runner side feature has rolled out in actions/runner // // Resolve yaml anchor aliases first // if err := resolveAliases(node); err != nil { // return err // } // Validate the schema before deserializing it into our model if err := (&schema.Node{ Definition: "action-root", Schema: schema.GetActionSchema(), }).UnmarshalYAML(node); err != nil { return err } type ActionDefault Action return node.Decode((*ActionDefault)(a)) } // Input parameters allow you to specify data that the action expects to use during runtime. GitHub stores input parameters as environment variables. Input ids with uppercase letters are converted to lowercase during runtime. We recommended using lowercase input ids. type Input struct { Description string `yaml:"description"` Required bool `yaml:"required"` Default string `yaml:"default"` } // Output parameters allow you to declare data that an action sets. Actions that run later in a workflow can use the output data set in previously run actions. For example, if you had an action that performed the addition of two inputs (x + y = z), the action could output the sum (z) for other actions to use as an input. type Output struct { Description string `yaml:"description"` Value string `yaml:"value"` } // ReadAction reads an action from a reader func ReadAction(in io.Reader) (*Action, error) { a := new(Action) err := yaml.NewDecoder(in).Decode(a) if err != nil { return nil, err } // set defaults if a.Runs.PreIf == "" { a.Runs.PreIf = "always()" } if a.Runs.PostIf == "" { a.Runs.PostIf = "always()" } return a, nil }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/workflow.go
pkg/model/workflow.go
package model import ( "errors" "fmt" "io" "reflect" "regexp" "strconv" "strings" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/schema" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" ) // Workflow is the structure of the files in .github/workflows type Workflow struct { File string Name string `yaml:"name"` RawOn yaml.Node `yaml:"on"` Env map[string]string `yaml:"env"` Jobs map[string]*Job `yaml:"jobs"` Defaults Defaults `yaml:"defaults"` } // On events for the workflow func (w *Workflow) On() []string { switch w.RawOn.Kind { case yaml.ScalarNode: var val string err := w.RawOn.Decode(&val) if err != nil { log.Fatal(err) } return []string{val} case yaml.SequenceNode: var val []string err := w.RawOn.Decode(&val) if err != nil { log.Fatal(err) } return val case yaml.MappingNode: var val map[string]interface{} err := w.RawOn.Decode(&val) if err != nil { log.Fatal(err) } var keys []string for k := range val { keys = append(keys, k) } return keys } return nil } func (w *Workflow) OnEvent(event string) interface{} { if w.RawOn.Kind == yaml.MappingNode { var val map[string]interface{} if !decodeNode(w.RawOn, &val) { return nil } return val[event] } return nil } func (w *Workflow) UnmarshalYAML(node *yaml.Node) error { // Resolve yaml anchor aliases first if err := resolveAliases(node); err != nil { return err } // Validate the schema before deserializing it into our model if err := (&schema.Node{ Definition: "workflow-root", Schema: schema.GetWorkflowSchema(), }).UnmarshalYAML(node); err != nil { return errors.Join(err, fmt.Errorf("Actions YAML Schema Validation Error detected:\nFor more information, see: https://nektosact.com/usage/schema.html")) } type WorkflowDefault Workflow return node.Decode((*WorkflowDefault)(w)) } type WorkflowStrict Workflow func (w *WorkflowStrict) UnmarshalYAML(node *yaml.Node) error { // Resolve yaml anchor aliases first if err := resolveAliases(node); err != nil { return err } // Validate the schema before deserializing it into our model if err := (&schema.Node{ Definition: "workflow-root-strict", Schema: schema.GetWorkflowSchema(), }).UnmarshalYAML(node); err != nil { return errors.Join(err, fmt.Errorf("Actions YAML Strict Schema Validation Error detected:\nFor more information, see: https://nektosact.com/usage/schema.html")) } type WorkflowDefault Workflow return node.Decode((*WorkflowDefault)(w)) } type WorkflowDispatchInput struct { Description string `yaml:"description"` Required bool `yaml:"required"` Default string `yaml:"default"` Type string `yaml:"type"` Options []string `yaml:"options"` } type WorkflowDispatch struct { Inputs map[string]WorkflowDispatchInput `yaml:"inputs"` } func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch { switch w.RawOn.Kind { case yaml.ScalarNode: var val string if !decodeNode(w.RawOn, &val) { return nil } if val == "workflow_dispatch" { return &WorkflowDispatch{} } case yaml.SequenceNode: var val []string if !decodeNode(w.RawOn, &val) { return nil } for _, v := range val { if v == "workflow_dispatch" { return &WorkflowDispatch{} } } case yaml.MappingNode: var val map[string]yaml.Node if !decodeNode(w.RawOn, &val) { return nil } n, found := val["workflow_dispatch"] var workflowDispatch WorkflowDispatch if found && decodeNode(n, &workflowDispatch) { return &workflowDispatch } default: return nil } return nil } type WorkflowCallInput struct { Description string `yaml:"description"` Required bool `yaml:"required"` Default yaml.Node `yaml:"default"` Type string `yaml:"type"` } type WorkflowCallOutput struct { Description string `yaml:"description"` Value string `yaml:"value"` } type WorkflowCall struct { Inputs map[string]WorkflowCallInput `yaml:"inputs"` Outputs map[string]WorkflowCallOutput `yaml:"outputs"` } type WorkflowCallResult struct { Outputs map[string]string } func (w *Workflow) WorkflowCallConfig() *WorkflowCall { if w.RawOn.Kind != yaml.MappingNode { // The callers expect for "on: workflow_call" and "on: [ workflow_call ]" a non nil return value return &WorkflowCall{} } var val map[string]yaml.Node if !decodeNode(w.RawOn, &val) { return &WorkflowCall{} } var config WorkflowCall node := val["workflow_call"] if !decodeNode(node, &config) { return &WorkflowCall{} } return &config } // Job is the structure of one job in a workflow type Job struct { Name string `yaml:"name"` RawNeeds yaml.Node `yaml:"needs"` RawRunsOn yaml.Node `yaml:"runs-on"` Env yaml.Node `yaml:"env"` If yaml.Node `yaml:"if"` Steps []*Step `yaml:"steps"` TimeoutMinutes string `yaml:"timeout-minutes"` Services map[string]*ContainerSpec `yaml:"services"` Strategy *Strategy `yaml:"strategy"` RawContainer yaml.Node `yaml:"container"` Defaults Defaults `yaml:"defaults"` Outputs map[string]string `yaml:"outputs"` Uses string `yaml:"uses"` With map[string]interface{} `yaml:"with"` RawSecrets yaml.Node `yaml:"secrets"` Result string } // Strategy for the job type Strategy struct { FailFast bool MaxParallel int FailFastString string `yaml:"fail-fast"` MaxParallelString string `yaml:"max-parallel"` RawMatrix yaml.Node `yaml:"matrix"` } // Default settings that will apply to all steps in the job or workflow type Defaults struct { Run RunDefaults `yaml:"run"` } // Defaults for all run steps in the job or workflow type RunDefaults struct { Shell string `yaml:"shell"` WorkingDirectory string `yaml:"working-directory"` } // GetMaxParallel sets default and returns value for `max-parallel` func (s Strategy) GetMaxParallel() int { // MaxParallel default value is `GitHub will maximize the number of jobs run in parallel depending on the available runners on GitHub-hosted virtual machines` // So I take the liberty to hardcode default limit to 4 and this is because: // 1: tl;dr: self-hosted does only 1 parallel job - https://github.com/actions/runner/issues/639#issuecomment-825212735 // 2: GH has 20 parallel job limit (for free tier) - https://github.com/github/docs/blob/3ae84420bd10997bb5f35f629ebb7160fe776eae/content/actions/reference/usage-limits-billing-and-administration.md?plain=1#L45 // 3: I want to add support for MaxParallel to act and 20! parallel jobs is a bit overkill IMHO maxParallel := 4 if s.MaxParallelString != "" { var err error if maxParallel, err = strconv.Atoi(s.MaxParallelString); err != nil { log.Errorf("Failed to parse 'max-parallel' option: %v", err) } } return maxParallel } // GetFailFast sets default and returns value for `fail-fast` func (s Strategy) GetFailFast() bool { // FailFast option is true by default: https://github.com/github/docs/blob/3ae84420bd10997bb5f35f629ebb7160fe776eae/content/actions/reference/workflow-syntax-for-github-actions.md?plain=1#L1107 failFast := true log.Debug(s.FailFastString) if s.FailFastString != "" { var err error if failFast, err = strconv.ParseBool(s.FailFastString); err != nil { log.Errorf("Failed to parse 'fail-fast' option: %v", err) } } return failFast } func (j *Job) InheritSecrets() bool { if j.RawSecrets.Kind != yaml.ScalarNode { return false } var val string if !decodeNode(j.RawSecrets, &val) { return false } return val == "inherit" } func (j *Job) Secrets() map[string]string { if j.RawSecrets.Kind != yaml.MappingNode { return nil } var val map[string]string if !decodeNode(j.RawSecrets, &val) { return nil } return val } // Container details for the job func (j *Job) Container() *ContainerSpec { var val *ContainerSpec switch j.RawContainer.Kind { case yaml.ScalarNode: val = new(ContainerSpec) if !decodeNode(j.RawContainer, &val.Image) { return nil } case yaml.MappingNode: val = new(ContainerSpec) if !decodeNode(j.RawContainer, val) { return nil } } return val } // Needs list for Job func (j *Job) Needs() []string { switch j.RawNeeds.Kind { case yaml.ScalarNode: var val string if !decodeNode(j.RawNeeds, &val) { return nil } return []string{val} case yaml.SequenceNode: var val []string if !decodeNode(j.RawNeeds, &val) { return nil } return val } return nil } // RunsOn list for Job func (j *Job) RunsOn() []string { switch j.RawRunsOn.Kind { case yaml.MappingNode: var val struct { Group string Labels yaml.Node } if !decodeNode(j.RawRunsOn, &val) { return nil } labels := nodeAsStringSlice(val.Labels) if val.Group != "" { labels = append(labels, val.Group) } return labels default: return nodeAsStringSlice(j.RawRunsOn) } } func nodeAsStringSlice(node yaml.Node) []string { switch node.Kind { case yaml.ScalarNode: var val string if !decodeNode(node, &val) { return nil } return []string{val} case yaml.SequenceNode: var val []string if !decodeNode(node, &val) { return nil } return val } return nil } func environment(yml yaml.Node) map[string]string { env := make(map[string]string) if yml.Kind == yaml.MappingNode { if !decodeNode(yml, &env) { return nil } } return env } // Environment returns string-based key=value map for a job func (j *Job) Environment() map[string]string { return environment(j.Env) } // Matrix decodes RawMatrix YAML node func (j *Job) Matrix() map[string][]interface{} { if j.Strategy.RawMatrix.Kind == yaml.MappingNode { var val map[string][]interface{} if !decodeNode(j.Strategy.RawMatrix, &val) { return nil } return val } return nil } // GetMatrixes returns the matrix cross product // It skips includes and hard fails excludes for non-existing keys func (j *Job) GetMatrixes() ([]map[string]interface{}, error) { matrixes := make([]map[string]interface{}, 0) if j.Strategy != nil { j.Strategy.FailFast = j.Strategy.GetFailFast() j.Strategy.MaxParallel = j.Strategy.GetMaxParallel() if m := j.Matrix(); m != nil { includes := make([]map[string]interface{}, 0) extraIncludes := make([]map[string]interface{}, 0) for _, v := range m["include"] { switch t := v.(type) { case []interface{}: for _, i := range t { i := i.(map[string]interface{}) includes = append(includes, i) } case interface{}: v := v.(map[string]interface{}) includes = append(includes, v) } } delete(m, "include") excludes := make([]map[string]interface{}, 0) for _, e := range m["exclude"] { e := e.(map[string]interface{}) for k := range e { if _, ok := m[k]; ok { excludes = append(excludes, e) } else { // We fail completely here because that's what GitHub does for non-existing matrix keys, fail on exclude, silent skip on include return nil, fmt.Errorf("the workflow is not valid. Matrix exclude key %q does not match any key within the matrix", k) } } } delete(m, "exclude") matrixProduct := common.CartesianProduct(m) MATRIX: for _, matrix := range matrixProduct { for _, exclude := range excludes { if commonKeysMatch(matrix, exclude) { log.Debugf("Skipping matrix '%v' due to exclude '%v'", matrix, exclude) continue MATRIX } } matrixes = append(matrixes, matrix) } for _, include := range includes { matched := false for _, matrix := range matrixes { if commonKeysMatch2(matrix, include, m) { matched = true log.Debugf("Adding include values '%v' to existing entry", include) for k, v := range include { matrix[k] = v } } } if !matched { extraIncludes = append(extraIncludes, include) } } for _, include := range extraIncludes { log.Debugf("Adding include '%v'", include) matrixes = append(matrixes, include) } if len(matrixes) == 0 { matrixes = append(matrixes, make(map[string]interface{})) } } else { matrixes = append(matrixes, make(map[string]interface{})) } } else { matrixes = append(matrixes, make(map[string]interface{})) log.Debugf("Empty Strategy, matrixes=%v", matrixes) } return matrixes, nil } func commonKeysMatch(a map[string]interface{}, b map[string]interface{}) bool { for aKey, aVal := range a { if bVal, ok := b[aKey]; ok && !reflect.DeepEqual(aVal, bVal) { return false } } return true } func commonKeysMatch2(a map[string]interface{}, b map[string]interface{}, m map[string][]interface{}) bool { for aKey, aVal := range a { _, useKey := m[aKey] if bVal, ok := b[aKey]; useKey && ok && !reflect.DeepEqual(aVal, bVal) { return false } } return true } // JobType describes what type of job we are about to run type JobType int const ( // JobTypeDefault is all jobs that have a `run` attribute JobTypeDefault JobType = iota // JobTypeReusableWorkflowLocal is all jobs that have a `uses` that is a local workflow in the .github/workflows directory JobTypeReusableWorkflowLocal // JobTypeReusableWorkflowRemote is all jobs that have a `uses` that references a workflow file in a github repo JobTypeReusableWorkflowRemote // JobTypeInvalid represents a job which is not configured correctly JobTypeInvalid ) func (j JobType) String() string { switch j { case JobTypeDefault: return "default" case JobTypeReusableWorkflowLocal: return "local-reusable-workflow" case JobTypeReusableWorkflowRemote: return "remote-reusable-workflow" } return "unknown" } // Type returns the type of the job func (j *Job) Type() (JobType, error) { isReusable := j.Uses != "" if isReusable { isYaml, _ := regexp.MatchString(`\.(ya?ml)(?:$|@)`, j.Uses) if isYaml { isLocalPath := strings.HasPrefix(j.Uses, "./") isRemotePath, _ := regexp.MatchString(`^[^.](.+?/){2,}.+\.ya?ml@`, j.Uses) hasVersion, _ := regexp.MatchString(`\.ya?ml@`, j.Uses) if isLocalPath { return JobTypeReusableWorkflowLocal, nil } else if isRemotePath && hasVersion { return JobTypeReusableWorkflowRemote, nil } } return JobTypeInvalid, fmt.Errorf("`uses` key references invalid workflow path '%s'. Must start with './' if it's a local workflow, or must start with '<org>/<repo>/' and include an '@' if it's a remote workflow", j.Uses) } return JobTypeDefault, nil } // ContainerSpec is the specification of the container to use for the job type ContainerSpec struct { Image string `yaml:"image"` Env map[string]string `yaml:"env"` Ports []string `yaml:"ports"` Volumes []string `yaml:"volumes"` Options string `yaml:"options"` Credentials map[string]string `yaml:"credentials"` Entrypoint string Args string Name string Reuse bool } // Step is the structure of one step in a job type Step struct { ID string `yaml:"id"` If yaml.Node `yaml:"if"` Name string `yaml:"name"` Uses string `yaml:"uses"` Run string `yaml:"run"` WorkingDirectory string `yaml:"working-directory"` // WorkflowShell is the shell really configured in the job, directly at step level or higher in defaults.run.shell WorkflowShell string `yaml:"-"` Shell string `yaml:"shell"` Env yaml.Node `yaml:"env"` With map[string]string `yaml:"with"` RawContinueOnError string `yaml:"continue-on-error"` TimeoutMinutes string `yaml:"timeout-minutes"` } // String gets the name of step func (s *Step) String() string { if s.Name != "" { return s.Name } else if s.Uses != "" { return s.Uses } else if s.Run != "" { return s.Run } return s.ID } // Environment returns string-based key=value map for a step func (s *Step) Environment() map[string]string { return environment(s.Env) } // GetEnv gets the env for a step func (s *Step) GetEnv() map[string]string { env := s.Environment() for k, v := range s.With { envKey := regexp.MustCompile("[^A-Z0-9-]").ReplaceAllString(strings.ToUpper(k), "_") envKey = fmt.Sprintf("INPUT_%s", strings.ToUpper(envKey)) env[envKey] = v } return env } // ShellCommand returns the command for the shell func (s *Step) ShellCommand() string { shellCommand := "" //Reference: https://github.com/actions/runner/blob/8109c962f09d9acc473d92c595ff43afceddb347/src/Runner.Worker/Handlers/ScriptHandlerHelpers.cs#L9-L17 switch s.Shell { case "": shellCommand = "bash -e {0}" case "bash": if s.WorkflowShell == "" { shellCommand = "bash -e {0}" } else { shellCommand = "bash --noprofile --norc -e -o pipefail {0}" } case "pwsh": shellCommand = "pwsh -command . '{0}'" case "python": shellCommand = "python {0}" case "sh": shellCommand = "sh -e {0}" case "cmd": shellCommand = "cmd /D /E:ON /V:OFF /S /C \"CALL \"{0}\"\"" case "powershell": shellCommand = "powershell -command . '{0}'" default: shellCommand = s.Shell } return shellCommand } // StepType describes what type of step we are about to run type StepType int const ( // StepTypeRun is all steps that have a `run` attribute StepTypeRun StepType = iota // StepTypeUsesDockerURL is all steps that have a `uses` that is of the form `docker://...` StepTypeUsesDockerURL // StepTypeUsesActionLocal is all steps that have a `uses` that is a local action in a subdirectory StepTypeUsesActionLocal // StepTypeUsesActionRemote is all steps that have a `uses` that is a reference to a github repo StepTypeUsesActionRemote // StepTypeReusableWorkflowLocal is all steps that have a `uses` that is a local workflow in the .github/workflows directory StepTypeReusableWorkflowLocal // StepTypeReusableWorkflowRemote is all steps that have a `uses` that references a workflow file in a github repo StepTypeReusableWorkflowRemote // StepTypeInvalid is for steps that have invalid step action StepTypeInvalid ) func (s StepType) String() string { switch s { case StepTypeInvalid: return "invalid" case StepTypeRun: return "run" case StepTypeUsesActionLocal: return "local-action" case StepTypeUsesActionRemote: return "remote-action" case StepTypeUsesDockerURL: return "docker" case StepTypeReusableWorkflowLocal: return "local-reusable-workflow" case StepTypeReusableWorkflowRemote: return "remote-reusable-workflow" } return "unknown" } // Type returns the type of the step func (s *Step) Type() StepType { if s.Run == "" && s.Uses == "" { return StepTypeInvalid } if s.Run != "" { if s.Uses != "" { return StepTypeInvalid } return StepTypeRun } else if strings.HasPrefix(s.Uses, "docker://") { return StepTypeUsesDockerURL } else if strings.HasPrefix(s.Uses, "./.github/workflows") && (strings.HasSuffix(s.Uses, ".yml") || strings.HasSuffix(s.Uses, ".yaml")) { return StepTypeReusableWorkflowLocal } else if !strings.HasPrefix(s.Uses, "./") && strings.Contains(s.Uses, ".github/workflows") && (strings.Contains(s.Uses, ".yml@") || strings.Contains(s.Uses, ".yaml@")) { return StepTypeReusableWorkflowRemote } else if strings.HasPrefix(s.Uses, "./") { return StepTypeUsesActionLocal } return StepTypeUsesActionRemote } // ReadWorkflow returns a list of jobs for a given workflow file reader func ReadWorkflow(in io.Reader, strict bool) (*Workflow, error) { if strict { w := new(WorkflowStrict) err := yaml.NewDecoder(in).Decode(w) return (*Workflow)(w), err } w := new(Workflow) err := yaml.NewDecoder(in).Decode(w) return w, err } // GetJob will get a job by name in the workflow func (w *Workflow) GetJob(jobID string) *Job { for id, j := range w.Jobs { if jobID == id { if j.Name == "" { j.Name = id } if j.If.Value == "" { j.If.Value = "success()" } return j } } return nil } // GetJobIDs will get all the job names in the workflow func (w *Workflow) GetJobIDs() []string { ids := make([]string, 0) for id := range w.Jobs { ids = append(ids, id) } return ids } var OnDecodeNodeError = func(node yaml.Node, out interface{}, err error) { log.Fatalf("Failed to decode node %v into %T: %v", node, out, err) } func decodeNode(node yaml.Node, out interface{}) bool { if err := node.Decode(out); err != nil { if OnDecodeNodeError != nil { OnDecodeNodeError(node, out, err) } return false } return true }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/planner.go
pkg/model/planner.go
package model import ( "fmt" "io" "io/fs" "math" "os" "path/filepath" "regexp" "sort" log "github.com/sirupsen/logrus" ) // WorkflowPlanner contains methods for creating plans type WorkflowPlanner interface { PlanEvent(eventName string) (*Plan, error) PlanJob(jobName string) (*Plan, error) PlanAll() (*Plan, error) GetEvents() []string } // Plan contains a list of stages to run in series type Plan struct { Stages []*Stage } // Stage contains a list of runs to execute in parallel type Stage struct { Runs []*Run } // Run represents a job from a workflow that needs to be run type Run struct { Workflow *Workflow JobID string } func (r *Run) String() string { jobName := r.Job().Name if jobName == "" { jobName = r.JobID } return jobName } // Job returns the job for this Run func (r *Run) Job() *Job { return r.Workflow.GetJob(r.JobID) } type WorkflowFiles struct { workflowDirEntry os.DirEntry dirPath string } // NewWorkflowPlanner will load a specific workflow, all workflows from a directory or all workflows from a directory and its subdirectories func NewWorkflowPlanner(path string, noWorkflowRecurse, strict bool) (WorkflowPlanner, error) { path, err := filepath.Abs(path) if err != nil { return nil, err } fi, err := os.Stat(path) if err != nil { return nil, err } var workflows []WorkflowFiles if fi.IsDir() { log.Debugf("Loading workflows from '%s'", path) if noWorkflowRecurse { files, err := os.ReadDir(path) if err != nil { return nil, err } for _, v := range files { workflows = append(workflows, WorkflowFiles{ dirPath: path, workflowDirEntry: v, }) } } else { log.Debug("Loading workflows recursively") if err := filepath.Walk(path, func(p string, f os.FileInfo, err error) error { if err != nil { return err } if !f.IsDir() { log.Debugf("Found workflow '%s' in '%s'", f.Name(), p) workflows = append(workflows, WorkflowFiles{ dirPath: filepath.Dir(p), workflowDirEntry: fs.FileInfoToDirEntry(f), }) } return nil }); err != nil { return nil, err } } } else { log.Debugf("Loading workflow '%s'", path) dirname := filepath.Dir(path) workflows = append(workflows, WorkflowFiles{ dirPath: dirname, workflowDirEntry: fs.FileInfoToDirEntry(fi), }) } wp := new(workflowPlanner) for _, wf := range workflows { ext := filepath.Ext(wf.workflowDirEntry.Name()) if ext == ".yml" || ext == ".yaml" { f, err := os.Open(filepath.Join(wf.dirPath, wf.workflowDirEntry.Name())) if err != nil { return nil, err } log.Debugf("Reading workflow '%s'", f.Name()) workflow, err := ReadWorkflow(f, strict) if err != nil { _ = f.Close() if err == io.EOF { return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", wf.workflowDirEntry.Name(), err) } return nil, fmt.Errorf("workflow is not valid. '%s': %w", wf.workflowDirEntry.Name(), err) } _, err = f.Seek(0, 0) if err != nil { _ = f.Close() return nil, fmt.Errorf("error occurring when resetting io pointer in '%s': %w", wf.workflowDirEntry.Name(), err) } workflow.File = wf.workflowDirEntry.Name() if workflow.Name == "" { workflow.Name = wf.workflowDirEntry.Name() } err = validateJobName(workflow) if err != nil { _ = f.Close() return nil, err } wp.workflows = append(wp.workflows, workflow) _ = f.Close() } } return wp, nil } func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) { wp := new(workflowPlanner) log.Debugf("Reading workflow %s", name) workflow, err := ReadWorkflow(f, false) if err != nil { if err == io.EOF { return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err) } return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err) } workflow.File = name if workflow.Name == "" { workflow.Name = name } err = validateJobName(workflow) if err != nil { return nil, err } wp.workflows = append(wp.workflows, workflow) return wp, nil } func validateJobName(workflow *Workflow) error { jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`) for k := range workflow.Jobs { if ok := jobNameRegex.MatchString(k); !ok { return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k) } } return nil } type workflowPlanner struct { workflows []*Workflow } // PlanEvent builds a new list of runs to execute in parallel for an event name func (wp *workflowPlanner) PlanEvent(eventName string) (*Plan, error) { plan := new(Plan) if len(wp.workflows) == 0 { log.Debug("no workflows found by planner") return plan, nil } var lastErr error for _, w := range wp.workflows { events := w.On() if len(events) == 0 { log.Debugf("no events found for workflow: %s", w.File) continue } for _, e := range events { if e == eventName { stages, err := createStages(w, w.GetJobIDs()...) if err != nil { log.Warn(err) lastErr = err } else { plan.mergeStages(stages) } } } } return plan, lastErr } // PlanJob builds a new run to execute in parallel for a job name func (wp *workflowPlanner) PlanJob(jobName string) (*Plan, error) { plan := new(Plan) if len(wp.workflows) == 0 { log.Debugf("no jobs found for workflow: %s", jobName) } var lastErr error for _, w := range wp.workflows { stages, err := createStages(w, jobName) if err != nil { log.Warn(err) lastErr = err } else { plan.mergeStages(stages) } } return plan, lastErr } // PlanAll builds a new run to execute in parallel all func (wp *workflowPlanner) PlanAll() (*Plan, error) { plan := new(Plan) if len(wp.workflows) == 0 { log.Debug("no workflows found by planner") return plan, nil } var lastErr error for _, w := range wp.workflows { stages, err := createStages(w, w.GetJobIDs()...) if err != nil { log.Warn(err) lastErr = err } else { plan.mergeStages(stages) } } return plan, lastErr } // GetEvents gets all the events in the workflows file func (wp *workflowPlanner) GetEvents() []string { events := make([]string, 0) for _, w := range wp.workflows { found := false for _, e := range events { for _, we := range w.On() { if e == we { found = true break } } if found { break } } if !found { events = append(events, w.On()...) } } // sort the list based on depth of dependencies sort.Slice(events, func(i, j int) bool { return events[i] < events[j] }) return events } // MaxRunNameLen determines the max name length of all jobs func (p *Plan) MaxRunNameLen() int { maxRunNameLen := 0 for _, stage := range p.Stages { for _, run := range stage.Runs { runNameLen := len(run.String()) if runNameLen > maxRunNameLen { maxRunNameLen = runNameLen } } } return maxRunNameLen } // GetJobIDs will get all the job names in the stage func (s *Stage) GetJobIDs() []string { names := make([]string, 0) for _, r := range s.Runs { names = append(names, r.JobID) } return names } // Merge stages with existing stages in plan func (p *Plan) mergeStages(stages []*Stage) { newStages := make([]*Stage, int(math.Max(float64(len(p.Stages)), float64(len(stages))))) for i := 0; i < len(newStages); i++ { newStages[i] = new(Stage) if i >= len(p.Stages) { newStages[i].Runs = append(newStages[i].Runs, stages[i].Runs...) } else if i >= len(stages) { newStages[i].Runs = append(newStages[i].Runs, p.Stages[i].Runs...) } else { newStages[i].Runs = append(newStages[i].Runs, p.Stages[i].Runs...) newStages[i].Runs = append(newStages[i].Runs, stages[i].Runs...) } } p.Stages = newStages } func createStages(w *Workflow, jobIDs ...string) ([]*Stage, error) { // first, build a list of all the necessary jobs to run, and their dependencies jobDependencies := make(map[string][]string) for len(jobIDs) > 0 { newJobIDs := make([]string, 0) for _, jID := range jobIDs { // make sure we haven't visited this job yet if _, ok := jobDependencies[jID]; !ok { if job := w.GetJob(jID); job != nil { jobDependencies[jID] = job.Needs() newJobIDs = append(newJobIDs, job.Needs()...) } } } jobIDs = newJobIDs } // next, build an execution graph stages := make([]*Stage, 0) for len(jobDependencies) > 0 { stage := new(Stage) for jID, jDeps := range jobDependencies { // make sure all deps are in the graph already if listInStages(jDeps, stages...) { stage.Runs = append(stage.Runs, &Run{ Workflow: w, JobID: jID, }) delete(jobDependencies, jID) } } if len(stage.Runs) == 0 { return nil, fmt.Errorf("unable to build dependency graph for %s (%s)", w.Name, w.File) } stages = append(stages, stage) } return stages, nil } // return true iff all strings in srcList exist in at least one of the stages func listInStages(srcList []string, stages ...*Stage) bool { for _, src := range srcList { found := false for _, stage := range stages { for _, search := range stage.GetJobIDs() { if src == search { found = true } } } if !found { return false } } return true }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/github_context_test.go
pkg/model/github_context_test.go
package model import ( "context" "fmt" "testing" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) func TestSetRef(t *testing.T) { log.SetLevel(log.DebugLevel) oldFindGitRef := findGitRef oldFindGitRevision := findGitRevision defer func() { findGitRef = oldFindGitRef }() defer func() { findGitRevision = oldFindGitRevision }() findGitRef = func(_ context.Context, _ string) (string, error) { return "refs/heads/master", nil } findGitRevision = func(_ context.Context, _ string) (string, string, error) { return "", "1234fakesha", nil } tables := []struct { eventName string event map[string]interface{} ref string refName string }{ { eventName: "pull_request_target", event: map[string]interface{}{}, ref: "refs/heads/master", refName: "master", }, { eventName: "pull_request", event: map[string]interface{}{ "number": 1234., }, ref: "refs/pull/1234/merge", refName: "1234/merge", }, { eventName: "deployment", event: map[string]interface{}{ "deployment": map[string]interface{}{ "ref": "refs/heads/somebranch", }, }, ref: "refs/heads/somebranch", refName: "somebranch", }, { eventName: "release", event: map[string]interface{}{ "release": map[string]interface{}{ "tag_name": "v1.0.0", }, }, ref: "refs/tags/v1.0.0", refName: "v1.0.0", }, { eventName: "push", event: map[string]interface{}{ "ref": "refs/heads/somebranch", }, ref: "refs/heads/somebranch", refName: "somebranch", }, { eventName: "unknown", event: map[string]interface{}{ "repository": map[string]interface{}{ "default_branch": "main", }, }, ref: "refs/heads/main", refName: "main", }, { eventName: "no-event", event: map[string]interface{}{}, ref: "refs/heads/master", refName: "master", }, } for _, table := range tables { t.Run(table.eventName, func(t *testing.T) { ghc := &GithubContext{ EventName: table.eventName, BaseRef: "master", Event: table.event, } ghc.SetRef(context.Background(), "main", "/some/dir") ghc.SetRefTypeAndName() assert.Equal(t, table.ref, ghc.Ref) assert.Equal(t, table.refName, ghc.RefName) }) } t.Run("no-default-branch", func(t *testing.T) { findGitRef = func(_ context.Context, _ string) (string, error) { return "", fmt.Errorf("no default branch") } ghc := &GithubContext{ EventName: "no-default-branch", Event: map[string]interface{}{}, } ghc.SetRef(context.Background(), "", "/some/dir") assert.Equal(t, "refs/heads/master", ghc.Ref) }) } func TestSetSha(t *testing.T) { log.SetLevel(log.DebugLevel) oldFindGitRef := findGitRef oldFindGitRevision := findGitRevision defer func() { findGitRef = oldFindGitRef }() defer func() { findGitRevision = oldFindGitRevision }() findGitRef = func(_ context.Context, _ string) (string, error) { return "refs/heads/master", nil } findGitRevision = func(_ context.Context, _ string) (string, string, error) { return "", "1234fakesha", nil } tables := []struct { eventName string event map[string]interface{} sha string }{ { eventName: "pull_request_target", event: map[string]interface{}{ "pull_request": map[string]interface{}{ "base": map[string]interface{}{ "sha": "pr-base-sha", }, }, }, sha: "pr-base-sha", }, { eventName: "pull_request", event: map[string]interface{}{ "number": 1234., }, sha: "1234fakesha", }, { eventName: "deployment", event: map[string]interface{}{ "deployment": map[string]interface{}{ "sha": "deployment-sha", }, }, sha: "deployment-sha", }, { eventName: "release", event: map[string]interface{}{}, sha: "1234fakesha", }, { eventName: "push", event: map[string]interface{}{ "after": "push-sha", "deleted": false, }, sha: "push-sha", }, { eventName: "unknown", event: map[string]interface{}{}, sha: "1234fakesha", }, { eventName: "no-event", event: map[string]interface{}{}, sha: "1234fakesha", }, } for _, table := range tables { t.Run(table.eventName, func(t *testing.T) { ghc := &GithubContext{ EventName: table.eventName, BaseRef: "master", Event: table.event, } ghc.SetSha(context.Background(), "/some/dir") assert.Equal(t, table.sha, ghc.Sha) }) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/github_context.go
pkg/model/github_context.go
package model import ( "context" "fmt" "strings" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/common/git" ) type GithubContext struct { Event map[string]interface{} `json:"event"` EventPath string `json:"event_path"` Workflow string `json:"workflow"` RunAttempt string `json:"run_attempt"` RunID string `json:"run_id"` RunNumber string `json:"run_number"` Actor string `json:"actor"` Repository string `json:"repository"` EventName string `json:"event_name"` Sha string `json:"sha"` Ref string `json:"ref"` RefName string `json:"ref_name"` RefType string `json:"ref_type"` HeadRef string `json:"head_ref"` BaseRef string `json:"base_ref"` Token string `json:"token"` Workspace string `json:"workspace"` Action string `json:"action"` ActionPath string `json:"action_path"` ActionRef string `json:"action_ref"` ActionRepository string `json:"action_repository"` Job string `json:"job"` JobName string `json:"job_name"` RepositoryOwner string `json:"repository_owner"` RetentionDays string `json:"retention_days"` RunnerPerflog string `json:"runner_perflog"` RunnerTrackingID string `json:"runner_tracking_id"` ServerURL string `json:"server_url"` APIURL string `json:"api_url"` GraphQLURL string `json:"graphql_url"` } func asString(v interface{}) string { if v == nil { return "" } else if s, ok := v.(string); ok { return s } return "" } func nestedMapLookup(m map[string]interface{}, ks ...string) (rval interface{}) { var ok bool if len(ks) == 0 { // degenerate input return nil } if rval, ok = m[ks[0]]; !ok { return nil } else if len(ks) == 1 { // we've reached the final key return rval } else if m, ok = rval.(map[string]interface{}); !ok { return nil } // 1+ more keys return nestedMapLookup(m, ks[1:]...) } func withDefaultBranch(ctx context.Context, b string, event map[string]interface{}) map[string]interface{} { repoI, ok := event["repository"] if !ok { repoI = make(map[string]interface{}) } repo, ok := repoI.(map[string]interface{}) if !ok { common.Logger(ctx).Warnf("unable to set default branch to %v", b) return event } // if the branch is already there return with no changes if _, ok = repo["default_branch"]; ok { return event } repo["default_branch"] = b event["repository"] = repo return event } var findGitRef = git.FindGitRef var findGitRevision = git.FindGitRevision func (ghc *GithubContext) SetRef(ctx context.Context, defaultBranch string, repoPath string) { logger := common.Logger(ctx) // https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows // https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads switch ghc.EventName { case "pull_request_target": ghc.Ref = fmt.Sprintf("refs/heads/%s", ghc.BaseRef) case "pull_request", "pull_request_review", "pull_request_review_comment": ghc.Ref = fmt.Sprintf("refs/pull/%.0f/merge", ghc.Event["number"]) case "deployment", "deployment_status": ghc.Ref = asString(nestedMapLookup(ghc.Event, "deployment", "ref")) case "release": ghc.Ref = fmt.Sprintf("refs/tags/%s", asString(nestedMapLookup(ghc.Event, "release", "tag_name"))) case "push", "create", "workflow_dispatch": ghc.Ref = asString(ghc.Event["ref"]) default: defaultBranch := asString(nestedMapLookup(ghc.Event, "repository", "default_branch")) if defaultBranch != "" { ghc.Ref = fmt.Sprintf("refs/heads/%s", defaultBranch) } } if ghc.Ref == "" { ref, err := findGitRef(ctx, repoPath) if err != nil { logger.Warningf("unable to get git ref: %v", err) } else { logger.Debugf("using github ref: %s", ref) ghc.Ref = ref } // set the branch in the event data if defaultBranch != "" { ghc.Event = withDefaultBranch(ctx, defaultBranch, ghc.Event) } else { ghc.Event = withDefaultBranch(ctx, "master", ghc.Event) } if ghc.Ref == "" { ghc.Ref = fmt.Sprintf("refs/heads/%s", asString(nestedMapLookup(ghc.Event, "repository", "default_branch"))) } } } func (ghc *GithubContext) SetSha(ctx context.Context, repoPath string) { logger := common.Logger(ctx) // https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows // https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads switch ghc.EventName { case "pull_request_target": ghc.Sha = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "sha")) case "deployment", "deployment_status": ghc.Sha = asString(nestedMapLookup(ghc.Event, "deployment", "sha")) case "push", "create", "workflow_dispatch": if deleted, ok := ghc.Event["deleted"].(bool); ok && !deleted { ghc.Sha = asString(ghc.Event["after"]) } } if ghc.Sha == "" { _, sha, err := findGitRevision(ctx, repoPath) if err != nil { logger.Warningf("unable to get git revision: %v", err) } else { ghc.Sha = sha } } } func (ghc *GithubContext) SetRepositoryAndOwner(ctx context.Context, githubInstance string, remoteName string, repoPath string) { if ghc.Repository == "" { repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName) if err != nil { common.Logger(ctx).Debugf("unable to get git repo (githubInstance: %v; remoteName: %v, repoPath: %v): %v", githubInstance, remoteName, repoPath, err) // nektos/act is used as a default action, so why not a repo? ghc.Repository = "nektos/act" ghc.RepositoryOwner = strings.Split(ghc.Repository, "/")[0] return } ghc.Repository = repo } ghc.RepositoryOwner = strings.Split(ghc.Repository, "/")[0] } func (ghc *GithubContext) SetRefTypeAndName() { var refType, refName string // https://docs.github.com/en/actions/learn-github-actions/environment-variables if strings.HasPrefix(ghc.Ref, "refs/tags/") { refType = "tag" refName = ghc.Ref[len("refs/tags/"):] } else if strings.HasPrefix(ghc.Ref, "refs/heads/") { refType = "branch" refName = ghc.Ref[len("refs/heads/"):] } else if strings.HasPrefix(ghc.Ref, "refs/pull/") { refType = "" refName = ghc.Ref[len("refs/pull/"):] } if ghc.RefType == "" { ghc.RefType = refType } if ghc.RefName == "" { ghc.RefName = refName } } func (ghc *GithubContext) SetBaseAndHeadRef() { if ghc.EventName == "pull_request" || ghc.EventName == "pull_request_target" { if ghc.BaseRef == "" { ghc.BaseRef = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "ref")) } if ghc.HeadRef == "" { ghc.HeadRef = asString(nestedMapLookup(ghc.Event, "pull_request", "head", "ref")) } } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/planner_test.go
pkg/model/planner_test.go
package model import ( "path/filepath" "testing" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) type WorkflowPlanTest struct { workflowPath string errorMessage string noWorkflowRecurse bool } func TestPlanner(t *testing.T) { log.SetLevel(log.DebugLevel) tables := []WorkflowPlanTest{ {"invalid-job-name/invalid-1.yml", "workflow is not valid. 'invalid-job-name-1': Job name 'invalid-JOB-Name-v1.2.3-docker_hub' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", false}, {"invalid-job-name/invalid-2.yml", "workflow is not valid. 'invalid-job-name-2': Job name '1234invalid-JOB-Name-v123-docker_hub' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", false}, {"invalid-job-name/valid-1.yml", "", false}, {"invalid-job-name/valid-2.yml", "", false}, {"empty-workflow", "unable to read workflow 'push.yml': file is empty: EOF", false}, {"nested", "unable to read workflow 'fail.yml': file is empty: EOF", false}, {"nested", "", true}, } workdir, err := filepath.Abs("testdata") assert.NoError(t, err, workdir) for _, table := range tables { fullWorkflowPath := filepath.Join(workdir, table.workflowPath) _, err = NewWorkflowPlanner(fullWorkflowPath, table.noWorkflowRecurse, false) if table.errorMessage == "" { assert.NoError(t, err, "WorkflowPlanner should exit without any error") } else { assert.EqualError(t, err, table.errorMessage) } } } func TestWorkflow(t *testing.T) { log.SetLevel(log.DebugLevel) workflow := Workflow{ Jobs: map[string]*Job{ "valid_job": { Name: "valid_job", }, }, } // Check that a valid job id returns non-error result, err := createStages(&workflow, "valid_job") assert.Nil(t, err) assert.NotNil(t, result) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/step_result.go
pkg/model/step_result.go
package model import "fmt" type stepStatus int const ( StepStatusSuccess stepStatus = iota StepStatusFailure StepStatusSkipped ) var stepStatusStrings = [...]string{ "success", "failure", "skipped", } func (s stepStatus) MarshalText() ([]byte, error) { return []byte(s.String()), nil } func (s *stepStatus) UnmarshalText(b []byte) error { str := string(b) for i, name := range stepStatusStrings { if name == str { *s = stepStatus(i) return nil } } return fmt.Errorf("invalid step status %q", str) } func (s stepStatus) String() string { if int(s) >= len(stepStatusStrings) { return "" } return stepStatusStrings[s] } type StepResult struct { Outputs map[string]string `json:"outputs"` Conclusion stepStatus `json:"conclusion"` Outcome stepStatus `json:"outcome"` }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/model/anchors.go
pkg/model/anchors.go
package model import ( "errors" "gopkg.in/yaml.v3" ) func resolveAliasesExt(node *yaml.Node, path map[*yaml.Node]bool, skipCheck bool) error { if !skipCheck && path[node] { return errors.New("circular alias") } switch node.Kind { case yaml.AliasNode: aliasTarget := node.Alias if aliasTarget == nil { return errors.New("unresolved alias node") } path[node] = true *node = *aliasTarget if err := resolveAliasesExt(node, path, true); err != nil { return err } delete(path, node) case yaml.DocumentNode, yaml.MappingNode, yaml.SequenceNode: for _, child := range node.Content { if err := resolveAliasesExt(child, path, false); err != nil { return err } } } return nil } func resolveAliases(node *yaml.Node) error { return resolveAliasesExt(node, map[*yaml.Node]bool{}, false) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_volume.go
pkg/container/docker_volume.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) package container import ( "context" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/volume" "github.com/nektos/act/pkg/common" ) func NewDockerVolumeRemoveExecutor(volumeName string, force bool) common.Executor { return func(ctx context.Context) error { cli, err := GetDockerClient(ctx) if err != nil { return err } defer cli.Close() list, err := cli.VolumeList(ctx, volume.ListOptions{Filters: filters.NewArgs()}) if err != nil { return err } for _, vol := range list.Volumes { if vol.Name == volumeName { return removeExecutor(volumeName, force)(ctx) } } // Volume not found - do nothing return nil } } func removeExecutor(volume string, force bool) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) logger.Debugf("%sdocker volume rm %s", logPrefix, volume) if common.Dryrun(ctx) { return nil } cli, err := GetDockerClient(ctx) if err != nil { return err } defer cli.Close() return cli.VolumeRemove(ctx, volume, force) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_pull.go
pkg/container/docker_pull.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) package container import ( "context" "encoding/base64" "encoding/json" "fmt" "strings" "github.com/distribution/reference" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" "github.com/nektos/act/pkg/common" ) // NewDockerPullExecutor function to create a run executor for the container func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) logger.Debugf("%sdocker pull %v", logPrefix, input.Image) if common.Dryrun(ctx) { return nil } pull := input.ForcePull if !pull { imageExists, err := ImageExistsLocally(ctx, input.Image, input.Platform) logger.Debugf("Image exists? %v", imageExists) if err != nil { return fmt.Errorf("unable to determine if image already exists for image '%s' (%s): %w", input.Image, input.Platform, err) } if !imageExists { pull = true } } if !pull { return nil } imageRef := cleanImage(ctx, input.Image) logger.Debugf("pulling image '%v' (%s)", imageRef, input.Platform) cli, err := GetDockerClient(ctx) if err != nil { return err } defer cli.Close() imagePullOptions, err := getImagePullOptions(ctx, input) if err != nil { return err } reader, err := cli.ImagePull(ctx, imageRef, imagePullOptions) _ = logDockerResponse(logger, reader, err != nil) if err != nil { if imagePullOptions.RegistryAuth != "" && strings.Contains(err.Error(), "unauthorized") { logger.Errorf("pulling image '%v' (%s) failed with credentials %s retrying without them, please check for stale docker config files", imageRef, input.Platform, err.Error()) imagePullOptions.RegistryAuth = "" reader, err = cli.ImagePull(ctx, imageRef, imagePullOptions) _ = logDockerResponse(logger, reader, err != nil) } return err } return nil } } func getImagePullOptions(ctx context.Context, input NewDockerPullExecutorInput) (image.PullOptions, error) { imagePullOptions := image.PullOptions{ Platform: input.Platform, } logger := common.Logger(ctx) if input.Username != "" && input.Password != "" { logger.Debugf("using authentication for docker pull") authConfig := registry.AuthConfig{ Username: input.Username, Password: input.Password, } encodedJSON, err := json.Marshal(authConfig) if err != nil { return imagePullOptions, err } imagePullOptions.RegistryAuth = base64.URLEncoding.EncodeToString(encodedJSON) } else { authConfig, err := LoadDockerAuthConfig(ctx, input.Image) if err != nil { return imagePullOptions, err } if authConfig.Username == "" && authConfig.Password == "" { return imagePullOptions, nil } logger.Info("using DockerAuthConfig authentication for docker pull") encodedJSON, err := json.Marshal(authConfig) if err != nil { return imagePullOptions, err } imagePullOptions.RegistryAuth = base64.URLEncoding.EncodeToString(encodedJSON) } return imagePullOptions, nil } func cleanImage(ctx context.Context, image string) string { ref, err := reference.ParseAnyReference(image) if err != nil { common.Logger(ctx).Error(err) return "" } return ref.String() }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_auth.go
pkg/container/docker_auth.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) package container import ( "context" "strings" "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/credentials" "github.com/docker/docker/api/types/registry" "github.com/nektos/act/pkg/common" ) func LoadDockerAuthConfig(ctx context.Context, image string) (registry.AuthConfig, error) { logger := common.Logger(ctx) config, err := config.Load(config.Dir()) if err != nil { logger.Warnf("Could not load docker config: %v", err) return registry.AuthConfig{}, err } if !config.ContainsAuth() { config.CredentialsStore = credentials.DetectDefaultStore(config.CredentialsStore) } hostName := "index.docker.io" index := strings.IndexRune(image, '/') if index > -1 && (strings.ContainsAny(image[:index], ".:") || image[:index] == "localhost") { hostName = image[:index] } authConfig, err := config.GetAuthConfig(hostName) if err != nil { logger.Warnf("Could not get auth config from docker config: %v", err) return registry.AuthConfig{}, err } return registry.AuthConfig(authConfig), nil } func LoadDockerAuthConfigs(ctx context.Context) map[string]registry.AuthConfig { logger := common.Logger(ctx) config, err := config.Load(config.Dir()) if err != nil { logger.Warnf("Could not load docker config: %v", err) return nil } if !config.ContainsAuth() { config.CredentialsStore = credentials.DetectDefaultStore(config.CredentialsStore) } creds, _ := config.GetAllCredentials() authConfigs := make(map[string]registry.AuthConfig, len(creds)) for k, v := range creds { authConfigs[k] = registry.AuthConfig(v) } return authConfigs }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_run_test.go
pkg/container/docker_run_test.go
package container import ( "bufio" "bytes" "context" "fmt" "io" "net" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/nektos/act/pkg/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) func TestDocker(t *testing.T) { ctx := context.Background() client, err := GetDockerClient(ctx) assert.NoError(t, err) defer client.Close() dockerBuild := NewDockerBuildExecutor(NewDockerBuildExecutorInput{ ContextDir: "testdata", ImageTag: "envmergetest", }) err = dockerBuild(ctx) assert.NoError(t, err) cr := &containerReference{ cli: client, input: &NewContainerInput{ Image: "envmergetest", }, } env := map[string]string{ "PATH": "/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin", "RANDOM_VAR": "WITH_VALUE", "ANOTHER_VAR": "", "CONFLICT_VAR": "I_EXIST_IN_MULTIPLE_PLACES", } envExecutor := cr.extractFromImageEnv(&env) err = envExecutor(ctx) assert.NoError(t, err) assert.Equal(t, map[string]string{ "PATH": "/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin:/this/path/does/not/exists/anywhere:/this/either", "RANDOM_VAR": "WITH_VALUE", "ANOTHER_VAR": "", "SOME_RANDOM_VAR": "", "ANOTHER_ONE": "BUT_I_HAVE_VALUE", "CONFLICT_VAR": "I_EXIST_IN_MULTIPLE_PLACES", }, env) } type mockDockerClient struct { client.APIClient mock.Mock } func (m *mockDockerClient) ContainerExecCreate(ctx context.Context, id string, opts container.ExecOptions) (container.ExecCreateResponse, error) { args := m.Called(ctx, id, opts) return args.Get(0).(container.ExecCreateResponse), args.Error(1) } func (m *mockDockerClient) ContainerExecAttach(ctx context.Context, id string, opts container.ExecStartOptions) (types.HijackedResponse, error) { args := m.Called(ctx, id, opts) return args.Get(0).(types.HijackedResponse), args.Error(1) } func (m *mockDockerClient) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) { args := m.Called(ctx, execID) return args.Get(0).(container.ExecInspect), args.Error(1) } func (m *mockDockerClient) CopyToContainer(ctx context.Context, id string, path string, content io.Reader, options container.CopyToContainerOptions) error { args := m.Called(ctx, id, path, content, options) return args.Error(0) } type endlessReader struct { io.Reader } func (r endlessReader) Read(_ []byte) (n int, err error) { return 1, nil } type mockConn struct { net.Conn mock.Mock } func (m *mockConn) Write(b []byte) (n int, err error) { args := m.Called(b) return args.Int(0), args.Error(1) } func (m *mockConn) Close() (err error) { return nil } func TestDockerExecAbort(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) conn := &mockConn{} conn.On("Write", mock.AnythingOfType("[]uint8")).Return(1, nil) client := &mockDockerClient{} client.On("ContainerExecCreate", ctx, "123", mock.AnythingOfType("container.ExecOptions")).Return(container.ExecCreateResponse{ID: "id"}, nil) client.On("ContainerExecAttach", ctx, "id", mock.AnythingOfType("container.ExecStartOptions")).Return(types.HijackedResponse{ Conn: conn, Reader: bufio.NewReader(endlessReader{}), }, nil) cr := &containerReference{ id: "123", cli: client, input: &NewContainerInput{ Image: "image", }, } channel := make(chan error) go func() { channel <- cr.exec([]string{""}, map[string]string{}, "user", "workdir")(ctx) }() time.Sleep(500 * time.Millisecond) cancel() err := <-channel assert.ErrorIs(t, err, context.Canceled) conn.AssertExpectations(t) client.AssertExpectations(t) } func TestDockerExecFailure(t *testing.T) { ctx := context.Background() conn := &mockConn{} client := &mockDockerClient{} client.On("ContainerExecCreate", ctx, "123", mock.AnythingOfType("container.ExecOptions")).Return(container.ExecCreateResponse{ID: "id"}, nil) client.On("ContainerExecAttach", ctx, "id", mock.AnythingOfType("container.ExecStartOptions")).Return(types.HijackedResponse{ Conn: conn, Reader: bufio.NewReader(strings.NewReader("output")), }, nil) client.On("ContainerExecInspect", ctx, "id").Return(container.ExecInspect{ ExitCode: 1, }, nil) cr := &containerReference{ id: "123", cli: client, input: &NewContainerInput{ Image: "image", }, } err := cr.exec([]string{""}, map[string]string{}, "user", "workdir")(ctx) assert.Error(t, err, "exit with `FAILURE`: 1") conn.AssertExpectations(t) client.AssertExpectations(t) } func TestDockerCopyTarStream(t *testing.T) { ctx := context.Background() conn := &mockConn{} client := &mockDockerClient{} client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("container.CopyToContainerOptions")).Return(nil) client.On("CopyToContainer", ctx, "123", "/var/run/act", mock.Anything, mock.AnythingOfType("container.CopyToContainerOptions")).Return(nil) cr := &containerReference{ id: "123", cli: client, input: &NewContainerInput{ Image: "image", }, } _ = cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{}) conn.AssertExpectations(t) client.AssertExpectations(t) } func TestDockerCopyTarStreamDryRun(t *testing.T) { ctx := common.WithDryrun(context.Background(), true) conn := &mockConn{} client := &mockDockerClient{} client.AssertNotCalled(t, "CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("container.CopyToContainerOptions")) client.AssertNotCalled(t, "CopyToContainer", ctx, "123", "/var/run/act", mock.Anything, mock.AnythingOfType("container.CopyToContainerOptions")) cr := &containerReference{ id: "123", cli: client, input: &NewContainerInput{ Image: "image", }, } _ = cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{}) conn.AssertExpectations(t) client.AssertExpectations(t) } func TestDockerCopyTarStreamErrorInCopyFiles(t *testing.T) { ctx := context.Background() conn := &mockConn{} merr := fmt.Errorf("Failure") client := &mockDockerClient{} client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("container.CopyToContainerOptions")).Return(merr) client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("container.CopyToContainerOptions")).Return(merr) cr := &containerReference{ id: "123", cli: client, input: &NewContainerInput{ Image: "image", }, } err := cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{}) assert.ErrorIs(t, err, merr) conn.AssertExpectations(t) client.AssertExpectations(t) } func TestDockerCopyTarStreamErrorInMkdir(t *testing.T) { ctx := context.Background() conn := &mockConn{} merr := fmt.Errorf("Failure") client := &mockDockerClient{} client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("container.CopyToContainerOptions")).Return(nil) client.On("CopyToContainer", ctx, "123", "/var/run/act", mock.Anything, mock.AnythingOfType("container.CopyToContainerOptions")).Return(merr) cr := &containerReference{ id: "123", cli: client, input: &NewContainerInput{ Image: "image", }, } err := cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{}) assert.ErrorIs(t, err, merr) conn.AssertExpectations(t) client.AssertExpectations(t) } // Type assert containerReference implements ExecutionsEnvironment var _ ExecutionsEnvironment = &containerReference{}
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_socket_test.go
pkg/container/docker_socket_test.go
package container import ( "os" "testing" log "github.com/sirupsen/logrus" assert "github.com/stretchr/testify/assert" ) func init() { log.SetLevel(log.DebugLevel) } var originalCommonSocketLocations = CommonSocketLocations func TestGetSocketAndHostWithSocket(t *testing.T) { // Arrange CommonSocketLocations = originalCommonSocketLocations dockerHost := "unix:///my/docker/host.sock" socketURI := "/path/to/my.socket" os.Setenv("DOCKER_HOST", dockerHost) // Act ret, err := GetSocketAndHost(socketURI) // Assert assert.Nil(t, err) assert.Equal(t, SocketAndHost{socketURI, dockerHost}, ret) } func TestGetSocketAndHostNoSocket(t *testing.T) { // Arrange dockerHost := "unix:///my/docker/host.sock" os.Setenv("DOCKER_HOST", dockerHost) // Act ret, err := GetSocketAndHost("") // Assert assert.Nil(t, err) assert.Equal(t, SocketAndHost{dockerHost, dockerHost}, ret) } func TestGetSocketAndHostOnlySocket(t *testing.T) { // Arrange socketURI := "/path/to/my.socket" os.Unsetenv("DOCKER_HOST") CommonSocketLocations = originalCommonSocketLocations defaultSocket, defaultSocketFound := socketLocation() // Act ret, err := GetSocketAndHost(socketURI) // Assert assert.NoError(t, err, "Expected no error from GetSocketAndHost") assert.Equal(t, true, defaultSocketFound, "Expected to find default socket") assert.Equal(t, socketURI, ret.Socket, "Expected socket to match common location") assert.Equal(t, defaultSocket, ret.Host, "Expected ret.Host to match default socket location") } func TestGetSocketAndHostDontMount(t *testing.T) { // Arrange CommonSocketLocations = originalCommonSocketLocations dockerHost := "unix:///my/docker/host.sock" os.Setenv("DOCKER_HOST", dockerHost) // Act ret, err := GetSocketAndHost("-") // Assert assert.Nil(t, err) assert.Equal(t, SocketAndHost{"-", dockerHost}, ret) } func TestGetSocketAndHostNoHostNoSocket(t *testing.T) { // Arrange CommonSocketLocations = originalCommonSocketLocations os.Unsetenv("DOCKER_HOST") defaultSocket, found := socketLocation() // Act ret, err := GetSocketAndHost("") // Assert assert.Equal(t, true, found, "Expected a default socket to be found") assert.Nil(t, err, "Expected no error from GetSocketAndHost") assert.Equal(t, SocketAndHost{defaultSocket, defaultSocket}, ret, "Expected to match default socket location") } // Catch // > Your code breaks setting DOCKER_HOST if shouldMount is false. // > This happens if neither DOCKER_HOST nor --container-daemon-socket has a value, but socketLocation() returns a URI func TestGetSocketAndHostNoHostNoSocketDefaultLocation(t *testing.T) { // Arrange mySocketFile, tmpErr := os.CreateTemp("", "act-*.sock") mySocket := mySocketFile.Name() unixSocket := "unix://" + mySocket defer os.RemoveAll(mySocket) assert.NoError(t, tmpErr) os.Unsetenv("DOCKER_HOST") CommonSocketLocations = []string{mySocket} defaultSocket, found := socketLocation() // Act ret, err := GetSocketAndHost("") // Assert assert.Equal(t, unixSocket, defaultSocket, "Expected default socket to match common socket location") assert.Equal(t, true, found, "Expected default socket to be found") assert.Nil(t, err, "Expected no error from GetSocketAndHost") assert.Equal(t, SocketAndHost{unixSocket, unixSocket}, ret, "Expected to match default socket location") } func TestGetSocketAndHostNoHostInvalidSocket(t *testing.T) { // Arrange os.Unsetenv("DOCKER_HOST") mySocket := "/my/socket/path.sock" CommonSocketLocations = []string{"/unusual", "/socket", "/location"} defaultSocket, found := socketLocation() // Act ret, err := GetSocketAndHost(mySocket) // Assert assert.Equal(t, false, found, "Expected no default socket to be found") assert.Equal(t, "", defaultSocket, "Expected no default socket to be found") assert.Equal(t, SocketAndHost{}, ret, "Expected to match default socket location") assert.Error(t, err, "Expected an error in invalid state") } func TestGetSocketAndHostOnlySocketValidButUnusualLocation(t *testing.T) { // Arrange socketURI := "unix:///path/to/my.socket" CommonSocketLocations = []string{"/unusual", "/location"} os.Unsetenv("DOCKER_HOST") defaultSocket, found := socketLocation() // Act ret, err := GetSocketAndHost(socketURI) // Assert // Default socket locations assert.Equal(t, "", defaultSocket, "Expect default socket location to be empty") assert.Equal(t, false, found, "Expected no default socket to be found") // Sane default assert.Nil(t, err, "Expect no error from GetSocketAndHost") assert.Equal(t, socketURI, ret.Host, "Expect host to default to unusual socket") }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/linux_container_environment_extensions.go
pkg/container/linux_container_environment_extensions.go
package container import ( "context" "path/filepath" "regexp" "runtime" "strings" log "github.com/sirupsen/logrus" ) type LinuxContainerEnvironmentExtensions struct { } // Resolves the equivalent host path inside the container // This is required for windows and WSL 2 to translate things like C:\Users\Myproject to /mnt/users/Myproject // For use in docker volumes and binds func (*LinuxContainerEnvironmentExtensions) ToContainerPath(path string) string { if runtime.GOOS == "windows" && strings.Contains(path, "/") { log.Error("You cannot specify linux style local paths (/mnt/etc) on Windows as it does not understand them.") return "" } abspath, err := filepath.Abs(path) if err != nil { log.Error(err) return "" } // Test if the path is a windows path windowsPathRegex := regexp.MustCompile(`^([a-zA-Z]):\\(.+)$`) windowsPathComponents := windowsPathRegex.FindStringSubmatch(abspath) // Return as-is if no match if windowsPathComponents == nil { return abspath } // Convert to WSL2-compatible path if it is a windows path // NOTE: Cannot use filepath because it will use the wrong path separators assuming we want the path to be windows // based if running on Windows, and because we are feeding this to Docker, GoLang auto-path-translate doesn't work. driveLetter := strings.ToLower(windowsPathComponents[1]) translatedPath := strings.ReplaceAll(windowsPathComponents[2], `\`, `/`) // Should make something like /mnt/c/Users/person/My Folder/MyActProject result := strings.Join([]string{"/mnt", driveLetter, translatedPath}, `/`) return result } func (*LinuxContainerEnvironmentExtensions) GetActPath() string { return "/var/run/act" } func (*LinuxContainerEnvironmentExtensions) GetPathVariableName() string { return "PATH" } func (*LinuxContainerEnvironmentExtensions) DefaultPathVariable() string { return "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" } func (*LinuxContainerEnvironmentExtensions) JoinPathVariable(paths ...string) string { return strings.Join(paths, ":") } func (*LinuxContainerEnvironmentExtensions) GetRunnerContext(ctx context.Context) map[string]interface{} { return map[string]interface{}{ "os": "Linux", "arch": RunnerArch(ctx), "temp": "/tmp", "tool_cache": "/opt/hostedtoolcache", } } func (*LinuxContainerEnvironmentExtensions) IsEnvironmentCaseInsensitive() bool { return false }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/parse_env_file.go
pkg/container/parse_env_file.go
package container import ( "archive/tar" "bufio" "context" "fmt" "io" "strings" "github.com/nektos/act/pkg/common" ) func parseEnvFile(e Container, srcPath string, env *map[string]string) common.Executor { localEnv := *env return func(ctx context.Context) error { envTar, err := e.GetContainerArchive(ctx, srcPath) if err != nil { return nil } defer envTar.Close() reader := tar.NewReader(envTar) _, err = reader.Next() if err != nil && err != io.EOF { return err } s := bufio.NewScanner(reader) s.Buffer(nil, 1024*1024*1024) // increase buffer to 1GB to avoid scanner buffer overflow firstLine := true for s.Scan() { line := s.Text() if firstLine { firstLine = false // skip utf8 bom, powershell 5 legacy uses it for utf8 if len(line) >= 3 && line[0] == 239 && line[1] == 187 && line[2] == 191 { line = line[3:] } } singleLineEnv := strings.Index(line, "=") multiLineEnv := strings.Index(line, "<<") if singleLineEnv != -1 && (multiLineEnv == -1 || singleLineEnv < multiLineEnv) { localEnv[line[:singleLineEnv]] = line[singleLineEnv+1:] } else if multiLineEnv != -1 { multiLineEnvContent := "" multiLineEnvDelimiter := line[multiLineEnv+2:] delimiterFound := false for s.Scan() { content := s.Text() if content == multiLineEnvDelimiter { delimiterFound = true break } if multiLineEnvContent != "" { multiLineEnvContent += "\n" } multiLineEnvContent += content } if !delimiterFound { return fmt.Errorf("invalid format delimiter '%v' not found before end of file", multiLineEnvDelimiter) } localEnv[line[:multiLineEnv]] = multiLineEnvContent } else { return fmt.Errorf("invalid format '%v', expected a line with '=' or '<<'", line) } } env = &localEnv return s.Err() } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/host_environment.go
pkg/container/host_environment.go
package container import ( "archive/tar" "bytes" "context" "errors" "fmt" "io" "io/fs" "os" "os/exec" "path/filepath" "runtime" "strings" "time" "github.com/go-git/go-billy/v5/helper/polyfill" "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-git/v5/plumbing/format/gitignore" "golang.org/x/term" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/filecollector" "github.com/nektos/act/pkg/lookpath" ) type HostEnvironment struct { Path string TmpDir string ToolCache string Workdir string ActPath string CleanUp func() StdOut io.Writer } func (e *HostEnvironment) Create(_ []string, _ []string) common.Executor { return func(_ context.Context) error { return nil } } func (e *HostEnvironment) Close() common.Executor { return func(_ context.Context) error { return nil } } func (e *HostEnvironment) Copy(destPath string, files ...*FileEntry) common.Executor { return func(_ context.Context) error { for _, f := range files { if err := os.MkdirAll(filepath.Dir(filepath.Join(destPath, f.Name)), 0o777); err != nil { return err } if err := os.WriteFile(filepath.Join(destPath, f.Name), []byte(f.Body), fs.FileMode(f.Mode)); err != nil { return err } } return nil } } func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error { if err := os.RemoveAll(destPath); err != nil { return err } tr := tar.NewReader(tarStream) cp := &filecollector.CopyCollector{ DstDir: destPath, } for { ti, err := tr.Next() if errors.Is(err, io.EOF) { return nil } else if err != nil { return err } if ti.FileInfo().IsDir() { continue } if ctx.Err() != nil { return fmt.Errorf("CopyTarStream has been cancelled") } if err := cp.WriteFile(ti.Name, ti.FileInfo(), ti.Linkname, tr); err != nil { return err } } } func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) srcPrefix := filepath.Dir(srcPath) if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) { srcPrefix += string(filepath.Separator) } logger.Debugf("Stripping prefix:%s src:%s", srcPrefix, srcPath) var ignorer gitignore.Matcher if useGitIgnore { ps, err := gitignore.ReadPatterns(polyfill.New(osfs.New(srcPath)), nil) if err != nil { logger.Debugf("Error loading .gitignore: %v", err) } ignorer = gitignore.NewMatcher(ps) } fc := &filecollector.FileCollector{ Fs: &filecollector.DefaultFs{}, Ignorer: ignorer, SrcPath: srcPath, SrcPrefix: srcPrefix, Handler: &filecollector.CopyCollector{ DstDir: destPath, }, } return filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{})) } } func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error) { buf := &bytes.Buffer{} tw := tar.NewWriter(buf) defer tw.Close() srcPath = filepath.Clean(srcPath) fi, err := os.Lstat(srcPath) if err != nil { return nil, err } tc := &filecollector.TarCollector{ TarWriter: tw, } if fi.IsDir() { srcPrefix := srcPath if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) { srcPrefix += string(filepath.Separator) } fc := &filecollector.FileCollector{ Fs: &filecollector.DefaultFs{}, SrcPath: srcPath, SrcPrefix: srcPrefix, Handler: tc, } err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{})) if err != nil { return nil, err } } else { var f io.ReadCloser var linkname string if fi.Mode()&fs.ModeSymlink != 0 { linkname, err = os.Readlink(srcPath) if err != nil { return nil, err } } else { f, err = os.Open(srcPath) if err != nil { return nil, err } defer f.Close() } err := tc.WriteFile(fi.Name(), fi, linkname, f) if err != nil { return nil, err } } return io.NopCloser(buf), nil } func (e *HostEnvironment) Pull(_ bool) common.Executor { return func(_ context.Context) error { return nil } } func (e *HostEnvironment) Start(_ bool) common.Executor { return func(_ context.Context) error { return nil } } type ptyWriter struct { Out io.Writer AutoStop bool dirtyLine bool } func (w *ptyWriter) Write(buf []byte) (int, error) { if w.AutoStop && len(buf) > 0 && buf[len(buf)-1] == 4 { n, err := w.Out.Write(buf[:len(buf)-1]) if err != nil { return n, err } if w.dirtyLine || len(buf) > 1 && buf[len(buf)-2] != '\n' { _, _ = w.Out.Write([]byte("\n")) return n, io.EOF } return n, io.EOF } w.dirtyLine = strings.LastIndex(string(buf), "\n") < len(buf)-1 return w.Out.Write(buf) } type localEnv struct { env map[string]string } func (l *localEnv) Getenv(name string) string { if runtime.GOOS == "windows" { for k, v := range l.env { if strings.EqualFold(name, k) { return v } } return "" } return l.env[name] } func lookupPathHost(cmd string, env map[string]string, writer io.Writer) (string, error) { f, err := lookpath.LookPath2(cmd, &localEnv{env: env}) if err != nil { err := "Cannot find: " + fmt.Sprint(cmd) + " in PATH" if _, _err := writer.Write([]byte(err + "\n")); _err != nil { return "", fmt.Errorf("%v: %w", err, _err) } return "", errors.New(err) } return f, nil } func setupPty(cmd *exec.Cmd, cmdline string) (*os.File, *os.File, error) { ppty, tty, err := openPty() if err != nil { return nil, nil, err } if term.IsTerminal(int(tty.Fd())) { _, err := term.MakeRaw(int(tty.Fd())) if err != nil { ppty.Close() tty.Close() return nil, nil, err } } cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty cmd.SysProcAttr = getSysProcAttr(cmdline, true) return ppty, tty, nil } func writeKeepAlive(ppty io.Writer) { c := 1 var err error for c == 1 && err == nil { c, err = ppty.Write([]byte{4}) <-time.After(time.Second) } } func copyPtyOutput(writer io.Writer, ppty io.Reader, finishLog context.CancelFunc) { defer func() { finishLog() }() if _, err := io.Copy(writer, ppty); err != nil { return } } func (e *HostEnvironment) UpdateFromImageEnv(_ *map[string]string) common.Executor { return func(_ context.Context) error { return nil } } func getEnvListFromMap(env map[string]string) []string { envList := make([]string, 0) for k, v := range env { envList = append(envList, fmt.Sprintf("%s=%s", k, v)) } return envList } func (e *HostEnvironment) exec(ctx context.Context, command []string, cmdline string, env map[string]string, _, workdir string) error { envList := getEnvListFromMap(env) var wd string if workdir != "" { if filepath.IsAbs(workdir) { wd = workdir } else { wd = filepath.Join(e.Path, workdir) } } else { wd = e.Path } f, err := lookupPathHost(command[0], env, e.StdOut) if err != nil { return err } cmd := exec.CommandContext(ctx, f) cmd.Path = f cmd.Args = command cmd.Stdin = nil cmd.Stdout = e.StdOut cmd.Env = envList cmd.Stderr = e.StdOut cmd.Dir = wd cmd.SysProcAttr = getSysProcAttr(cmdline, false) var ppty *os.File var tty *os.File defer func() { if ppty != nil { ppty.Close() } if tty != nil { tty.Close() } }() if true /* allocate Terminal */ { var err error ppty, tty, err = setupPty(cmd, cmdline) if err != nil { common.Logger(ctx).Debugf("Failed to setup Pty %v\n", err.Error()) } } writer := &ptyWriter{Out: e.StdOut} logctx, finishLog := context.WithCancel(context.Background()) if ppty != nil { go copyPtyOutput(writer, ppty, finishLog) } else { finishLog() } if ppty != nil { go writeKeepAlive(ppty) } err = cmd.Run() if err != nil { return err } if tty != nil { writer.AutoStop = true if _, err := tty.Write([]byte("\x04")); err != nil { common.Logger(ctx).Debug("Failed to write EOT") } } <-logctx.Done() if ppty != nil { ppty.Close() ppty = nil } return err } func (e *HostEnvironment) Exec(command []string /*cmdline string, */, env map[string]string, user, workdir string) common.Executor { return e.ExecWithCmdLine(command, "", env, user, workdir) } func (e *HostEnvironment) ExecWithCmdLine(command []string, cmdline string, env map[string]string, user, workdir string) common.Executor { return func(ctx context.Context) error { if err := e.exec(ctx, command, cmdline, env, user, workdir); err != nil { select { case <-ctx.Done(): return fmt.Errorf("this step has been cancelled: %w", err) default: return err } } return nil } } func (e *HostEnvironment) UpdateFromEnv(srcPath string, env *map[string]string) common.Executor { return parseEnvFile(e, srcPath, env) } func (e *HostEnvironment) Remove() common.Executor { return func(_ context.Context) error { if e.CleanUp != nil { e.CleanUp() } return os.RemoveAll(e.Path) } } func (e *HostEnvironment) ToContainerPath(path string) string { if bp, err := filepath.Rel(e.Workdir, path); err != nil { return filepath.Join(e.Path, bp) } else if filepath.Clean(e.Workdir) == filepath.Clean(path) { return e.Path } return path } func (e *HostEnvironment) GetActPath() string { actPath := e.ActPath if runtime.GOOS == "windows" { actPath = strings.ReplaceAll(actPath, "\\", "/") } return actPath } func (*HostEnvironment) GetPathVariableName() string { if runtime.GOOS == "plan9" { return "path" } else if runtime.GOOS == "windows" { return "Path" // Actually we need a case insensitive map } return "PATH" } func (e *HostEnvironment) DefaultPathVariable() string { v, _ := os.LookupEnv(e.GetPathVariableName()) return v } func (*HostEnvironment) JoinPathVariable(paths ...string) string { return strings.Join(paths, string(filepath.ListSeparator)) } // Reference for Arch values for runner.arch // https://docs.github.com/en/actions/learn-github-actions/contexts#runner-context func goArchToActionArch(arch string) string { archMapper := map[string]string{ "amd64": "X64", "x86_64": "X64", "386": "X86", "aarch64": "ARM64", } if arch, ok := archMapper[arch]; ok { return arch } return arch } func goOsToActionOs(os string) string { osMapper := map[string]string{ "linux": "Linux", "windows": "Windows", "darwin": "macOS", } if os, ok := osMapper[os]; ok { return os } return os } func (e *HostEnvironment) GetRunnerContext(_ context.Context) map[string]interface{} { return map[string]interface{}{ "os": goOsToActionOs(runtime.GOOS), "arch": goArchToActionArch(runtime.GOARCH), "temp": e.TmpDir, "tool_cache": e.ToolCache, } } func (e *HostEnvironment) GetHealth(_ context.Context) Health { return HealthHealthy } func (e *HostEnvironment) ReplaceLogWriter(stdout io.Writer, _ io.Writer) (io.Writer, io.Writer) { org := e.StdOut e.StdOut = stdout return org, org } func (*HostEnvironment) IsEnvironmentCaseInsensitive() bool { return runtime.GOOS == "windows" }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_stub.go
pkg/container/docker_stub.go
//go:build WITHOUT_DOCKER || !(linux || darwin || windows || netbsd) package container import ( "context" "runtime" "github.com/docker/docker/api/types/system" "github.com/nektos/act/pkg/common" "github.com/pkg/errors" ) // ImageExistsLocally returns a boolean indicating if an image with the // requested name, tag and architecture exists in the local docker image store func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) { return false, errors.New("Unsupported Operation") } // RemoveImage removes image from local store, the function is used to run different // container image architectures func RemoveImage(ctx context.Context, imageName string, force bool, pruneChildren bool) (bool, error) { return false, errors.New("Unsupported Operation") } // NewDockerBuildExecutor function to create a run executor for the container func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor { return func(ctx context.Context) error { return errors.New("Unsupported Operation") } } // NewDockerPullExecutor function to create a run executor for the container func NewDockerPullExecutor(input NewDockerPullExecutorInput) common.Executor { return func(ctx context.Context) error { return errors.New("Unsupported Operation") } } // NewContainer creates a reference to a container func NewContainer(input *NewContainerInput) ExecutionsEnvironment { return nil } func RunnerArch(ctx context.Context) string { return runtime.GOOS } func GetHostInfo(ctx context.Context) (info system.Info, err error) { return system.Info{}, nil } func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor { return func(ctx context.Context) error { return nil } } func NewDockerNetworkCreateExecutor(name string) common.Executor { return func(ctx context.Context) error { return nil } } func NewDockerNetworkRemoveExecutor(name string) common.Executor { return func(ctx context.Context) error { return nil } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/util.go
pkg/container/util.go
//go:build (!windows && !plan9 && !openbsd) || (!windows && !plan9 && !mips64) package container import ( "os" "syscall" "github.com/creack/pty" ) func getSysProcAttr(_ string, tty bool) *syscall.SysProcAttr { if tty { return &syscall.SysProcAttr{ Setsid: true, Setctty: true, } } return &syscall.SysProcAttr{ Setpgid: true, } } func openPty() (*os.File, *os.File, error) { return pty.Open() }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_images_test.go
pkg/container/docker_images_test.go
package container import ( "context" "io" "testing" "github.com/docker/docker/api/types/image" "github.com/docker/docker/client" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) func init() { log.SetLevel(log.DebugLevel) } func TestImageExistsLocally(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } ctx := context.Background() // to help make this test reliable and not flaky, we need to have // an image that will exist, and onew that won't exist // Test if image exists with specific tag invalidImageTag, err := ImageExistsLocally(ctx, "library/alpine:this-random-tag-will-never-exist", "linux/amd64") assert.Nil(t, err) assert.Equal(t, false, invalidImageTag) // Test if image exists with specific architecture (image platform) invalidImagePlatform, err := ImageExistsLocally(ctx, "alpine:latest", "windows/amd64") assert.Nil(t, err) assert.Equal(t, false, invalidImagePlatform) // pull an image cli, err := client.NewClientWithOpts(client.FromEnv) assert.Nil(t, err) cli.NegotiateAPIVersion(context.Background()) // Chose alpine latest because it's so small // maybe we should build an image instead so that tests aren't reliable on dockerhub readerDefault, err := cli.ImagePull(ctx, "node:16-buster-slim", image.PullOptions{ Platform: "linux/amd64", }) assert.Nil(t, err) defer readerDefault.Close() _, err = io.ReadAll(readerDefault) assert.Nil(t, err) imageDefaultArchExists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/amd64") assert.Nil(t, err) assert.Equal(t, true, imageDefaultArchExists) // Validate if another architecture platform can be pulled readerArm64, err := cli.ImagePull(ctx, "node:16-buster-slim", image.PullOptions{ Platform: "linux/arm64", }) assert.Nil(t, err) defer readerArm64.Close() _, err = io.ReadAll(readerArm64) assert.Nil(t, err) imageArm64Exists, err := ImageExistsLocally(ctx, "node:16-buster-slim", "linux/arm64") assert.Nil(t, err) assert.Equal(t, true, imageArm64Exists) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/executions_environment.go
pkg/container/executions_environment.go
package container import "context" type ExecutionsEnvironment interface { Container ToContainerPath(string) string GetActPath() string GetPathVariableName() string DefaultPathVariable() string JoinPathVariable(...string) string GetRunnerContext(ctx context.Context) map[string]interface{} // On windows PATH and Path are the same key IsEnvironmentCaseInsensitive() bool }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_socket.go
pkg/container/docker_socket.go
package container import ( "fmt" "os" "path/filepath" "strings" log "github.com/sirupsen/logrus" ) var CommonSocketLocations = []string{ "/var/run/docker.sock", "/run/podman/podman.sock", "$HOME/.colima/docker.sock", "$XDG_RUNTIME_DIR/docker.sock", "$XDG_RUNTIME_DIR/podman/podman.sock", `\\.\pipe\docker_engine`, "$HOME/.docker/run/docker.sock", } // returns socket URI or false if not found any func socketLocation() (string, bool) { if dockerHost, exists := os.LookupEnv("DOCKER_HOST"); exists { return dockerHost, true } for _, p := range CommonSocketLocations { if _, err := os.Lstat(os.ExpandEnv(p)); err == nil { if strings.HasPrefix(p, `\\.\`) { return "npipe://" + filepath.ToSlash(os.ExpandEnv(p)), true } return "unix://" + filepath.ToSlash(os.ExpandEnv(p)), true } } return "", false } // This function, `isDockerHostURI`, takes a string argument `daemonPath`. It checks if the // `daemonPath` is a valid Docker host URI. It does this by checking if the scheme of the URI (the // part before "://") contains only alphabetic characters. If it does, the function returns true, // indicating that the `daemonPath` is a Docker host URI. If it doesn't, or if the "://" delimiter // is not found in the `daemonPath`, the function returns false. func isDockerHostURI(daemonPath string) bool { if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 { scheme := daemonPath[:protoIndex] if strings.IndexFunc(scheme, func(r rune) bool { return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z') }) == -1 { return true } } return false } type SocketAndHost struct { Socket string Host string } func GetSocketAndHost(containerSocket string) (SocketAndHost, error) { log.Debugf("Handling container host and socket") // Prefer DOCKER_HOST, don't override it dockerHost, hasDockerHost := socketLocation() socketHost := SocketAndHost{Socket: containerSocket, Host: dockerHost} // ** socketHost.Socket cases ** // Case 1: User does _not_ want to mount a daemon socket (passes a dash) // Case 2: User passes a filepath to the socket; is that even valid? // Case 3: User passes a valid socket; do nothing // Case 4: User omitted the flag; set a sane default // ** DOCKER_HOST cases ** // Case A: DOCKER_HOST is set; use it, i.e. do nothing // Case B: DOCKER_HOST is empty; use sane defaults // Set host for sanity's sake, when the socket isn't useful if !hasDockerHost && (socketHost.Socket == "-" || !isDockerHostURI(socketHost.Socket) || socketHost.Socket == "") { // Cases: 1B, 2B, 4B socket, found := socketLocation() socketHost.Host = socket hasDockerHost = found } // A - (dash) in socketHost.Socket means don't mount, preserve this value // otherwise if socketHost.Socket is a filepath don't use it as socket // Exit early if we're in an invalid state (e.g. when no DOCKER_HOST and user supplied "-", a dash or omitted) if !hasDockerHost && socketHost.Socket != "" && !isDockerHostURI(socketHost.Socket) { // Cases: 1B, 2B // Should we early-exit here, since there is no host nor socket to talk to? return SocketAndHost{}, fmt.Errorf("DOCKER_HOST was not set, couldn't be found in the usual locations, and the container daemon socket ('%s') is invalid", socketHost.Socket) } // Default to DOCKER_HOST if set if socketHost.Socket == "" && hasDockerHost { // Cases: 4A log.Debugf("Defaulting container socket to DOCKER_HOST") socketHost.Socket = socketHost.Host } // Set sane default socket location if user omitted it if socketHost.Socket == "" { // Cases: 4B socket, _ := socketLocation() // socket is empty if it isn't found, so assignment here is at worst a no-op log.Debugf("Defaulting container socket to default '%s'", socket) socketHost.Socket = socket } // Exit if both the DOCKER_HOST and socket are fulfilled if hasDockerHost { // Cases: 1A, 2A, 3A, 4A if !isDockerHostURI(socketHost.Socket) { // Cases: 1A, 2A log.Debugf("DOCKER_HOST is set, but socket is invalid '%s'", socketHost.Socket) } return socketHost, nil } // Set a sane DOCKER_HOST default if we can if isDockerHostURI(socketHost.Socket) { // Cases: 3B log.Debugf("Setting DOCKER_HOST to container socket '%s'", socketHost.Socket) socketHost.Host = socketHost.Socket // Both DOCKER_HOST and container socket are valid; short-circuit exit return socketHost, nil } // Here there is no DOCKER_HOST _and_ the supplied container socket is not a valid URI (either invalid or a file path) // Cases: 2B <- but is already handled at the top // I.e. this path should never be taken return SocketAndHost{}, fmt.Errorf("no DOCKER_HOST and an invalid container socket '%s'", socketHost.Socket) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_run.go
pkg/container/docker_run.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) package container import ( "archive/tar" "bytes" "context" "errors" "fmt" "io" "os" "path/filepath" "regexp" "runtime" "strconv" "strings" "dario.cat/mergo" "github.com/Masterminds/semver" "github.com/docker/cli/cli/connhelper" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/system" "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" "github.com/go-git/go-billy/v5/helper/polyfill" "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-git/v5/plumbing/format/gitignore" "github.com/joho/godotenv" "github.com/kballard/go-shellquote" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/spf13/pflag" "golang.org/x/term" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/filecollector" ) // NewContainer creates a reference to a container func NewContainer(input *NewContainerInput) ExecutionsEnvironment { cr := new(containerReference) cr.input = input return cr } // supportsContainerImagePlatform returns true if the underlying Docker server // API version is 1.41 and beyond func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) bool { logger := common.Logger(ctx) ver, err := cli.ServerVersion(ctx) if err != nil { logger.Panicf("Failed to get Docker API Version: %s", err) return false } sv, err := semver.NewVersion(ver.APIVersion) if err != nil { logger.Panicf("Failed to unmarshal Docker Version: %s", err) return false } constraint, _ := semver.NewConstraint(">= 1.41") return constraint.Check(sv) } func (cr *containerReference) Create(capAdd []string, capDrop []string) common.Executor { return common. NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode). Then( common.NewPipelineExecutor( cr.connect(), cr.find(), cr.create(capAdd, capDrop), ).IfNot(common.Dryrun), ) } func (cr *containerReference) Start(attach bool) common.Executor { return common. NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode). Then( common.NewPipelineExecutor( cr.connect(), cr.find(), cr.attach().IfBool(attach), cr.start(), cr.wait().IfBool(attach), cr.tryReadUID(), cr.tryReadGID(), func(ctx context.Context) error { // If this fails, then folders have wrong permissions on non root container if cr.UID != 0 || cr.GID != 0 { _ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), cr.input.WorkingDir}, nil, "0", "")(ctx) } return nil }, ).IfNot(common.Dryrun), ) } func (cr *containerReference) Pull(forcePull bool) common.Executor { return common. NewInfoExecutor("%sdocker pull image=%s platform=%s username=%s forcePull=%t", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Username, forcePull). Then( NewDockerPullExecutor(NewDockerPullExecutorInput{ Image: cr.input.Image, ForcePull: forcePull, Platform: cr.input.Platform, Username: cr.input.Username, Password: cr.input.Password, }), ) } func (cr *containerReference) Copy(destPath string, files ...*FileEntry) common.Executor { return common.NewPipelineExecutor( cr.connect(), cr.find(), cr.copyContent(destPath, files...), ).IfNot(common.Dryrun) } func (cr *containerReference) CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor { return common.NewPipelineExecutor( common.NewInfoExecutor("%sdocker cp src=%s dst=%s", logPrefix, srcPath, destPath), cr.copyDir(destPath, srcPath, useGitIgnore), func(ctx context.Context) error { // If this fails, then folders have wrong permissions on non root container if cr.UID != 0 || cr.GID != 0 { _ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), destPath}, nil, "0", "")(ctx) } return nil }, ).IfNot(common.Dryrun) } func (cr *containerReference) GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error) { if common.Dryrun(ctx) { return nil, fmt.Errorf("DRYRUN is not supported in GetContainerArchive") } a, _, err := cr.cli.CopyFromContainer(ctx, cr.id, srcPath) return a, err } func (cr *containerReference) UpdateFromEnv(srcPath string, env *map[string]string) common.Executor { return parseEnvFile(cr, srcPath, env).IfNot(common.Dryrun) } func (cr *containerReference) UpdateFromImageEnv(env *map[string]string) common.Executor { return cr.extractFromImageEnv(env).IfNot(common.Dryrun) } func (cr *containerReference) Exec(command []string, env map[string]string, user, workdir string) common.Executor { return common.NewPipelineExecutor( common.NewInfoExecutor("%sdocker exec cmd=[%s] user=%s workdir=%s", logPrefix, strings.Join(command, " "), user, workdir), cr.connect(), cr.find(), cr.exec(command, env, user, workdir), ).IfNot(common.Dryrun) } func (cr *containerReference) Remove() common.Executor { return common.NewPipelineExecutor( cr.connect(), cr.find(), ).Finally( cr.remove(), ).IfNot(common.Dryrun) } func (cr *containerReference) GetHealth(ctx context.Context) Health { resp, err := cr.cli.ContainerInspect(ctx, cr.id) logger := common.Logger(ctx) if err != nil { logger.Errorf("failed to query container health %s", err) return HealthUnHealthy } if resp.Config == nil || resp.Config.Healthcheck == nil || resp.State == nil || resp.State.Health == nil || len(resp.Config.Healthcheck.Test) == 1 && strings.EqualFold(resp.Config.Healthcheck.Test[0], "NONE") { logger.Debugf("no container health check defined") return HealthHealthy } logger.Infof("container health of %s (%s) is %s", cr.id, resp.Config.Image, resp.State.Health.Status) switch resp.State.Health.Status { case "starting": return HealthStarting case "healthy": return HealthHealthy case "unhealthy": return HealthUnHealthy } return HealthUnHealthy } func (cr *containerReference) ReplaceLogWriter(stdout io.Writer, stderr io.Writer) (io.Writer, io.Writer) { out := cr.input.Stdout err := cr.input.Stderr cr.input.Stdout = stdout cr.input.Stderr = stderr return out, err } type containerReference struct { cli client.APIClient id string input *NewContainerInput UID int GID int LinuxContainerEnvironmentExtensions } func GetDockerClient(ctx context.Context) (cli client.APIClient, err error) { dockerHost := os.Getenv("DOCKER_HOST") if strings.HasPrefix(dockerHost, "ssh://") { var helper *connhelper.ConnectionHelper helper, err = connhelper.GetConnectionHelper(dockerHost) if err != nil { return nil, err } cli, err = client.NewClientWithOpts( client.WithHost(helper.Host), client.WithDialContext(helper.Dialer), ) } else { cli, err = client.NewClientWithOpts(client.FromEnv) } if err != nil { return nil, fmt.Errorf("failed to connect to docker daemon: %w", err) } cli.NegotiateAPIVersion(ctx) return cli, nil } func GetHostInfo(ctx context.Context) (info system.Info, err error) { var cli client.APIClient cli, err = GetDockerClient(ctx) if err != nil { return info, err } defer cli.Close() info, err = cli.Info(ctx) if err != nil { return info, err } return info, nil } // Arch fetches values from docker info and translates architecture to // GitHub actions compatible runner.arch values // https://github.com/github/docs/blob/main/data/reusables/actions/runner-arch-description.md func RunnerArch(ctx context.Context) string { info, err := GetHostInfo(ctx) if err != nil { return "" } archMapper := map[string]string{ "x86_64": "X64", "amd64": "X64", "386": "X86", "aarch64": "ARM64", "arm64": "ARM64", } if arch, ok := archMapper[info.Architecture]; ok { return arch } return info.Architecture } func (cr *containerReference) connect() common.Executor { return func(ctx context.Context) error { if cr.cli != nil { return nil } cli, err := GetDockerClient(ctx) if err != nil { return err } cr.cli = cli return nil } } func (cr *containerReference) Close() common.Executor { return func(_ context.Context) error { if cr.cli != nil { err := cr.cli.Close() cr.cli = nil if err != nil { return fmt.Errorf("failed to close client: %w", err) } } return nil } } func (cr *containerReference) find() common.Executor { return func(ctx context.Context) error { if cr.id != "" { return nil } containers, err := cr.cli.ContainerList(ctx, container.ListOptions{ All: true, }) if err != nil { return fmt.Errorf("failed to list containers: %w", err) } for _, c := range containers { for _, name := range c.Names { if name[1:] == cr.input.Name { cr.id = c.ID return nil } } } cr.id = "" return nil } } func (cr *containerReference) remove() common.Executor { return func(ctx context.Context) error { if cr.id == "" { return nil } logger := common.Logger(ctx) err := cr.cli.ContainerRemove(ctx, cr.id, container.RemoveOptions{ RemoveVolumes: true, Force: true, }) if err != nil { logger.Error(fmt.Errorf("failed to remove container: %w", err)) } logger.Debugf("Removed container: %v", cr.id) cr.id = "" return nil } } func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config *container.Config, hostConfig *container.HostConfig) (*container.Config, *container.HostConfig, error) { logger := common.Logger(ctx) input := cr.input if input.Options == "" { return config, hostConfig, nil } // parse configuration from CLI container.options flags := pflag.NewFlagSet("container_flags", pflag.ContinueOnError) copts := addFlags(flags) optionsArgs, err := shellquote.Split(input.Options) if err != nil { return nil, nil, fmt.Errorf("Cannot split container options: '%s': '%w'", input.Options, err) } err = flags.Parse(optionsArgs) if err != nil { return nil, nil, fmt.Errorf("Cannot parse container options: '%s': '%w'", input.Options, err) } if len(copts.netMode.Value()) == 0 { if err = copts.netMode.Set(cr.input.NetworkMode); err != nil { return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err) } } containerConfig, err := parse(flags, copts, runtime.GOOS) if err != nil { return nil, nil, fmt.Errorf("Cannot process container options: '%s': '%w'", input.Options, err) } logger.Debugf("Custom container.Config from options ==> %+v", containerConfig.Config) err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride) if err != nil { return nil, nil, fmt.Errorf("Cannot merge container.Config options: '%s': '%w'", input.Options, err) } logger.Debugf("Merged container.Config ==> %+v", config) logger.Debugf("Custom container.HostConfig from options ==> %+v", containerConfig.HostConfig) hostConfig.Binds = append(hostConfig.Binds, containerConfig.HostConfig.Binds...) hostConfig.Mounts = append(hostConfig.Mounts, containerConfig.HostConfig.Mounts...) binds := hostConfig.Binds mounts := hostConfig.Mounts err = mergo.Merge(hostConfig, containerConfig.HostConfig, mergo.WithOverride) if err != nil { return nil, nil, fmt.Errorf("Cannot merge container.HostConfig options: '%s': '%w'", input.Options, err) } hostConfig.Binds = binds hostConfig.Mounts = mounts logger.Debugf("Merged container.HostConfig ==> %+v", hostConfig) return config, hostConfig, nil } func (cr *containerReference) create(capAdd []string, capDrop []string) common.Executor { return func(ctx context.Context) error { if cr.id != "" { return nil } logger := common.Logger(ctx) isTerminal := term.IsTerminal(int(os.Stdout.Fd())) input := cr.input config := &container.Config{ Image: input.Image, WorkingDir: input.WorkingDir, Env: input.Env, ExposedPorts: input.ExposedPorts, Tty: isTerminal, } logger.Debugf("Common container.Config ==> %+v", config) if len(input.Cmd) != 0 { config.Cmd = input.Cmd } if len(input.Entrypoint) != 0 { config.Entrypoint = input.Entrypoint } mounts := make([]mount.Mount, 0) for mountSource, mountTarget := range input.Mounts { mounts = append(mounts, mount.Mount{ Type: mount.TypeVolume, Source: mountSource, Target: mountTarget, }) } var platSpecs *specs.Platform if supportsContainerImagePlatform(ctx, cr.cli) && cr.input.Platform != "" { desiredPlatform := strings.SplitN(cr.input.Platform, `/`, 2) if len(desiredPlatform) != 2 { return fmt.Errorf("incorrect container platform option '%s'", cr.input.Platform) } platSpecs = &specs.Platform{ Architecture: desiredPlatform[1], OS: desiredPlatform[0], } } hostConfig := &container.HostConfig{ CapAdd: capAdd, CapDrop: capDrop, Binds: input.Binds, Mounts: mounts, NetworkMode: container.NetworkMode(input.NetworkMode), Privileged: input.Privileged, UsernsMode: container.UsernsMode(input.UsernsMode), PortBindings: input.PortBindings, } logger.Debugf("Common container.HostConfig ==> %+v", hostConfig) config, hostConfig, err := cr.mergeContainerConfigs(ctx, config, hostConfig) if err != nil { return err } var networkingConfig *network.NetworkingConfig logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases) n := hostConfig.NetworkMode // IsUserDefined and IsHost are broken on windows if n.IsUserDefined() && n != "host" && len(input.NetworkAliases) > 0 { endpointConfig := &network.EndpointSettings{ Aliases: input.NetworkAliases, } networkingConfig = &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ input.NetworkMode: endpointConfig, }, } } resp, err := cr.cli.ContainerCreate(ctx, config, hostConfig, networkingConfig, platSpecs, input.Name) if err != nil { return fmt.Errorf("failed to create container: '%w'", err) } logger.Debugf("Created container name=%s id=%v from image %v (platform: %s)", input.Name, resp.ID, input.Image, input.Platform) logger.Debugf("ENV ==> %v", input.Env) cr.id = resp.ID return nil } } func (cr *containerReference) extractFromImageEnv(env *map[string]string) common.Executor { envMap := *env return func(ctx context.Context) error { logger := common.Logger(ctx) inspect, err := cr.cli.ImageInspect(ctx, cr.input.Image) if err != nil { logger.Error(err) return fmt.Errorf("inspect image: %w", err) } if inspect.Config == nil { return nil } imageEnv, err := godotenv.Unmarshal(strings.Join(inspect.Config.Env, "\n")) if err != nil { logger.Error(err) return fmt.Errorf("unmarshal image env: %w", err) } for k, v := range imageEnv { if k == "PATH" { if envMap[k] == "" { envMap[k] = v } else { envMap[k] += `:` + v } } else if envMap[k] == "" { envMap[k] = v } } env = &envMap return nil } } func (cr *containerReference) exec(cmd []string, env map[string]string, user, workdir string) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) // Fix slashes when running on Windows if runtime.GOOS == "windows" { var newCmd []string for _, v := range cmd { newCmd = append(newCmd, strings.ReplaceAll(v, `\`, `/`)) } cmd = newCmd } logger.Debugf("Exec command '%s'", cmd) isTerminal := term.IsTerminal(int(os.Stdout.Fd())) envList := make([]string, 0) for k, v := range env { envList = append(envList, fmt.Sprintf("%s=%s", k, v)) } var wd string if workdir != "" { if strings.HasPrefix(workdir, "/") { wd = workdir } else { wd = fmt.Sprintf("%s/%s", cr.input.WorkingDir, workdir) } } else { wd = cr.input.WorkingDir } logger.Debugf("Working directory '%s'", wd) idResp, err := cr.cli.ContainerExecCreate(ctx, cr.id, container.ExecOptions{ User: user, Cmd: cmd, WorkingDir: wd, Env: envList, Tty: isTerminal, AttachStderr: true, AttachStdout: true, }) if err != nil { return fmt.Errorf("failed to create exec: %w", err) } resp, err := cr.cli.ContainerExecAttach(ctx, idResp.ID, container.ExecStartOptions{ Tty: isTerminal, }) if err != nil { return fmt.Errorf("failed to attach to exec: %w", err) } defer resp.Close() err = cr.waitForCommand(ctx, isTerminal, resp) if err != nil { return err } inspectResp, err := cr.cli.ContainerExecInspect(ctx, idResp.ID) if err != nil { return fmt.Errorf("failed to inspect exec: %w", err) } switch inspectResp.ExitCode { case 0: return nil case 127: return fmt.Errorf("exitcode '%d': command not found, please refer to https://github.com/nektos/act/issues/107 for more information", inspectResp.ExitCode) default: return fmt.Errorf("exitcode '%d': failure", inspectResp.ExitCode) } } } func (cr *containerReference) tryReadID(opt string, cbk func(id int)) common.Executor { return func(ctx context.Context) error { idResp, err := cr.cli.ContainerExecCreate(ctx, cr.id, container.ExecOptions{ Cmd: []string{"id", opt}, AttachStdout: true, AttachStderr: true, }) if err != nil { return nil } resp, err := cr.cli.ContainerExecAttach(ctx, idResp.ID, container.ExecStartOptions{}) if err != nil { return nil } defer resp.Close() sid, err := resp.Reader.ReadString('\n') if err != nil { return nil } exp := regexp.MustCompile(`\d+\n`) found := exp.FindString(sid) id, err := strconv.ParseInt(strings.TrimSpace(found), 10, 32) if err != nil { return nil } cbk(int(id)) return nil } } func (cr *containerReference) tryReadUID() common.Executor { return cr.tryReadID("-u", func(id int) { cr.UID = id }) } func (cr *containerReference) tryReadGID() common.Executor { return cr.tryReadID("-g", func(id int) { cr.GID = id }) } func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal bool, resp types.HijackedResponse) error { logger := common.Logger(ctx) cmdResponse := make(chan error) go func() { var outWriter io.Writer outWriter = cr.input.Stdout if outWriter == nil { outWriter = os.Stdout } errWriter := cr.input.Stderr if errWriter == nil { errWriter = os.Stderr } var err error if !isTerminal || os.Getenv("NORAW") != "" { _, err = stdcopy.StdCopy(outWriter, errWriter, resp.Reader) } else { _, err = io.Copy(outWriter, resp.Reader) } cmdResponse <- err }() select { case <-ctx.Done(): // send ctrl + c _, err := resp.Conn.Write([]byte{3}) if err != nil { logger.Warnf("Failed to send CTRL+C: %+s", err) } // we return the context canceled error to prevent other steps // from executing return ctx.Err() case err := <-cmdResponse: if err != nil { logger.Error(err) } return nil } } func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error { if common.Dryrun(ctx) { return nil } // Mkdir buf := &bytes.Buffer{} tw := tar.NewWriter(buf) _ = tw.WriteHeader(&tar.Header{ Name: destPath, Mode: 0o777, Typeflag: tar.TypeDir, }) tw.Close() err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, container.CopyToContainerOptions{}) if err != nil { return fmt.Errorf("failed to mkdir to copy content to container: %w", err) } // Copy Content err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, container.CopyToContainerOptions{}) if err != nil { return fmt.Errorf("failed to copy content to container: %w", err) } // If this fails, then folders have wrong permissions on non root container if cr.UID != 0 || cr.GID != 0 { _ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), destPath}, nil, "0", "")(ctx) } return nil } func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgnore bool) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) tarFile, err := os.CreateTemp("", "act") if err != nil { return err } logger.Debugf("Writing tarball %s from %s", tarFile.Name(), srcPath) defer func(tarFile *os.File) { name := tarFile.Name() err := tarFile.Close() if !errors.Is(err, os.ErrClosed) { logger.Error(err) } err = os.Remove(name) if err != nil { logger.Error(err) } }(tarFile) tw := tar.NewWriter(tarFile) srcPrefix := filepath.Dir(srcPath) if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) { srcPrefix += string(filepath.Separator) } logger.Debugf("Stripping prefix:%s src:%s", srcPrefix, srcPath) var ignorer gitignore.Matcher if useGitIgnore { ps, err := gitignore.ReadPatterns(polyfill.New(osfs.New(srcPath)), nil) if err != nil { logger.Debugf("Error loading .gitignore: %v", err) } ignorer = gitignore.NewMatcher(ps) } fc := &filecollector.FileCollector{ Fs: &filecollector.DefaultFs{}, Ignorer: ignorer, SrcPath: srcPath, SrcPrefix: srcPrefix, Handler: &filecollector.TarCollector{ TarWriter: tw, UID: cr.UID, GID: cr.GID, DstDir: dstPath[1:], }, } err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{})) if err != nil { return err } if err := tw.Close(); err != nil { return err } logger.Debugf("Extracting content from '%s' to '%s'", tarFile.Name(), dstPath) _, err = tarFile.Seek(0, 0) if err != nil { return fmt.Errorf("failed to seek tar archive: %w", err) } err = cr.cli.CopyToContainer(ctx, cr.id, "/", tarFile, container.CopyToContainerOptions{}) if err != nil { return fmt.Errorf("failed to copy content to container: %w", err) } return nil } } func (cr *containerReference) copyContent(dstPath string, files ...*FileEntry) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) var buf bytes.Buffer tw := tar.NewWriter(&buf) for _, file := range files { logger.Debugf("Writing entry to tarball %s len:%d", file.Name, len(file.Body)) hdr := &tar.Header{ Name: file.Name, Mode: int64(file.Mode), Size: int64(len(file.Body)), Uid: cr.UID, Gid: cr.GID, } if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := tw.Write([]byte(file.Body)); err != nil { return err } } if err := tw.Close(); err != nil { return err } logger.Debugf("Extracting content to '%s'", dstPath) err := cr.cli.CopyToContainer(ctx, cr.id, dstPath, &buf, container.CopyToContainerOptions{}) if err != nil { return fmt.Errorf("failed to copy content to container: %w", err) } return nil } } func (cr *containerReference) attach() common.Executor { return func(ctx context.Context) error { out, err := cr.cli.ContainerAttach(ctx, cr.id, container.AttachOptions{ Stream: true, Stdout: true, Stderr: true, }) if err != nil { return fmt.Errorf("failed to attach to container: %w", err) } isTerminal := term.IsTerminal(int(os.Stdout.Fd())) var outWriter io.Writer outWriter = cr.input.Stdout if outWriter == nil { outWriter = os.Stdout } errWriter := cr.input.Stderr if errWriter == nil { errWriter = os.Stderr } go func() { if !isTerminal || os.Getenv("NORAW") != "" { _, err = stdcopy.StdCopy(outWriter, errWriter, out.Reader) } else { _, err = io.Copy(outWriter, out.Reader) } if err != nil { common.Logger(ctx).Error(err) } }() return nil } } func (cr *containerReference) start() common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) logger.Debugf("Starting container: %v", cr.id) if err := cr.cli.ContainerStart(ctx, cr.id, container.StartOptions{}); err != nil { return fmt.Errorf("failed to start container: %w", err) } logger.Debugf("Started container: %v", cr.id) return nil } } func (cr *containerReference) wait() common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) statusCh, errCh := cr.cli.ContainerWait(ctx, cr.id, container.WaitConditionNotRunning) var statusCode int64 select { case err := <-errCh: if err != nil { return fmt.Errorf("failed to wait for container: %w", err) } case status := <-statusCh: statusCode = status.StatusCode } logger.Debugf("Return status: %v", statusCode) if statusCode == 0 { return nil } return fmt.Errorf("exit with `FAILURE`: %v", statusCode) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/util_windows.go
pkg/container/util_windows.go
package container import ( "errors" "os" "syscall" ) func getSysProcAttr(cmdLine string, tty bool) *syscall.SysProcAttr { return &syscall.SysProcAttr{CmdLine: cmdLine, CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP} } func openPty() (*os.File, *os.File, error) { return nil, nil, errors.New("Unsupported") }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_cli.go
pkg/container/docker_cli.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) // This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go // appended with license information. // // docker/cli is licensed under the Apache License, Version 2.0. // See DOCKER_LICENSE for the full license text. // //nolint:unparam,errcheck,depguard,deadcode,unused package container import ( "bytes" "encoding/json" "fmt" "os" "path" "path/filepath" "reflect" "regexp" "strconv" "strings" "time" "github.com/docker/cli/cli/compose/loader" "github.com/docker/cli/opts" "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/errdefs" "github.com/docker/go-connections/nat" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) var ( deviceCgroupRuleRegexp = regexp.MustCompile(`^[acb] ([0-9]+|\*):([0-9]+|\*) [rwm]{1,3}$`) ) // containerOptions is a data object with all the options for creating a container type containerOptions struct { attach opts.ListOpts volumes opts.ListOpts tmpfs opts.ListOpts mounts opts.MountOpt blkioWeightDevice opts.WeightdeviceOpt deviceReadBps opts.ThrottledeviceOpt deviceWriteBps opts.ThrottledeviceOpt links opts.ListOpts aliases opts.ListOpts linkLocalIPs opts.ListOpts deviceReadIOps opts.ThrottledeviceOpt deviceWriteIOps opts.ThrottledeviceOpt env opts.ListOpts labels opts.ListOpts deviceCgroupRules opts.ListOpts devices opts.ListOpts gpus opts.GpuOpts ulimits *opts.UlimitOpt sysctls *opts.MapOpts publish opts.ListOpts expose opts.ListOpts dns opts.ListOpts dnsSearch opts.ListOpts dnsOptions opts.ListOpts extraHosts opts.ListOpts volumesFrom opts.ListOpts envFile opts.ListOpts capAdd opts.ListOpts capDrop opts.ListOpts groupAdd opts.ListOpts securityOpt opts.ListOpts storageOpt opts.ListOpts labelsFile opts.ListOpts loggingOpts opts.ListOpts privileged bool pidMode string utsMode string usernsMode string cgroupnsMode string publishAll bool stdin bool tty bool oomKillDisable bool oomScoreAdj int containerIDFile string entrypoint string hostname string domainname string memory opts.MemBytes memoryReservation opts.MemBytes memorySwap opts.MemSwapBytes kernelMemory opts.MemBytes user string workingDir string cpuCount int64 cpuShares int64 cpuPercent int64 cpuPeriod int64 cpuRealtimePeriod int64 cpuRealtimeRuntime int64 cpuQuota int64 cpus opts.NanoCPUs cpusetCpus string cpusetMems string blkioWeight uint16 ioMaxBandwidth uint64 ioMaxIOps uint64 swappiness int64 netMode opts.NetworkOpt macAddress string ipv4Address string ipv6Address string ipcMode string pidsLimit int64 restartPolicy string readonlyRootfs bool loggingDriver string cgroupParent string volumeDriver string stopSignal string stopTimeout int isolation string shmSize opts.MemBytes noHealthcheck bool healthCmd string healthInterval time.Duration healthTimeout time.Duration healthStartPeriod time.Duration healthRetries int runtime string autoRemove bool init bool Image string Args []string } // addFlags adds all command line flags that will be used by parse to the FlagSet func addFlags(flags *pflag.FlagSet) *containerOptions { copts := &containerOptions{ aliases: opts.NewListOpts(nil), attach: opts.NewListOpts(validateAttach), blkioWeightDevice: opts.NewWeightdeviceOpt(opts.ValidateWeightDevice), capAdd: opts.NewListOpts(nil), capDrop: opts.NewListOpts(nil), dns: opts.NewListOpts(opts.ValidateIPAddress), dnsOptions: opts.NewListOpts(nil), dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), deviceCgroupRules: opts.NewListOpts(validateDeviceCgroupRule), deviceReadBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), deviceReadIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), deviceWriteBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), deviceWriteIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), devices: opts.NewListOpts(nil), // devices can only be validated after we know the server OS env: opts.NewListOpts(opts.ValidateEnv), envFile: opts.NewListOpts(nil), expose: opts.NewListOpts(nil), extraHosts: opts.NewListOpts(opts.ValidateExtraHost), groupAdd: opts.NewListOpts(nil), labels: opts.NewListOpts(opts.ValidateLabel), labelsFile: opts.NewListOpts(nil), linkLocalIPs: opts.NewListOpts(nil), links: opts.NewListOpts(opts.ValidateLink), loggingOpts: opts.NewListOpts(nil), publish: opts.NewListOpts(nil), securityOpt: opts.NewListOpts(nil), storageOpt: opts.NewListOpts(nil), sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), tmpfs: opts.NewListOpts(nil), ulimits: opts.NewUlimitOpt(nil), volumes: opts.NewListOpts(nil), volumesFrom: opts.NewListOpts(nil), } // General purpose flags flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") flags.Var(&copts.deviceCgroupRules, "device-cgroup-rule", "Add a rule to the cgroup allowed devices list") flags.Var(&copts.devices, "device", "Add a host device to the container") flags.Var(&copts.gpus, "gpus", "GPU devices to add to the container ('all' to pass all GPUs)") flags.SetAnnotation("gpus", "version", []string{"1.40"}) flags.VarP(&copts.env, "env", "e", "Set environment variables") flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") flags.StringVar(&copts.domainname, "domainname", "", "Container NIS domain name") flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") flags.StringVar(&copts.stopSignal, "stop-signal", "", "Signal to stop the container") flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) flags.Var(copts.sysctls, "sysctl", "Sysctl options") flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") flags.Var(copts.ulimits, "ulimit", "Ulimit options") flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: <name|uid>[:<group|gid>])") flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") // Security flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") flags.Var(&copts.securityOpt, "security-opt", "Security Options") flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") flags.StringVar(&copts.cgroupnsMode, "cgroupns", "", `Cgroup namespace to use (host|private) 'host': Run the container in the Docker host's cgroup namespace 'private': Run the container in its own private cgroup namespace '': Use the cgroup namespace as configured by the default-cgroupns-mode option on the daemon (default)`) flags.SetAnnotation("cgroupns", "version", []string{"1.41"}) // Network and port publishing flag flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") flags.Var(&copts.dns, "dns", "Set custom DNS servers") // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. // This is to be consistent with service create/update flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") flags.MarkHidden("dns-opt") flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") flags.StringVar(&copts.ipv4Address, "ip", "", "IPv4 address (e.g., 172.30.100.104)") flags.StringVar(&copts.ipv6Address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") flags.Var(&copts.links, "link", "Add link to another container") flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g., 92:d0:c6:0a:29:33)") flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") // We allow for both "--net" and "--network", although the latter is the recommended way. flags.Var(&copts.netMode, "net", "Connect a container to a network") flags.Var(&copts.netMode, "network", "Connect a container to a network") flags.MarkHidden("net") // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") flags.MarkHidden("net-alias") // Logging and storage flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") flags.Var(&copts.mounts, "mount", "Attach a filesystem mount to the container") // Health-checking flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ms|s|m|h) (default 0s)") flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ms|s|m|h) (default 0s)") flags.DurationVar(&copts.healthStartPeriod, "health-start-period", 0, "Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)") flags.SetAnnotation("health-start-period", "version", []string{"1.29"}) flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") // Resource management flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") flags.SetAnnotation("cpu-count", "ostype", []string{"windows"}) flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") flags.SetAnnotation("cpu-percent", "ostype", []string{"windows"}) flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") flags.Var(&copts.cpus, "cpus", "Number of CPUs") flags.SetAnnotation("cpus", "version", []string{"1.25"}) flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") flags.Uint64Var(&copts.ioMaxBandwidth, "io-maxbandwidth", 0, "Maximum IO bandwidth limit for the system drive (Windows only)") flags.SetAnnotation("io-maxbandwidth", "ostype", []string{"windows"}) flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") flags.SetAnnotation("io-maxiops", "ostype", []string{"windows"}) flags.Var(&copts.kernelMemory, "kernel-memory", "Kernel memory limit") flags.VarP(&copts.memory, "memory", "m", "Memory limit") flags.Var(&copts.memoryReservation, "memory-reservation", "Memory soft limit") flags.Var(&copts.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") // Low-level execution (cgroups, namespaces, ...) flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") flags.StringVar(&copts.ipcMode, "ipc", "", "IPC mode to use") flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") flags.Var(&copts.shmSize, "shm-size", "Size of /dev/shm") flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") flags.SetAnnotation("init", "version", []string{"1.25"}) return copts } type containerConfig struct { Config *container.Config HostConfig *container.HostConfig NetworkingConfig *networktypes.NetworkingConfig } // parse parses the args for the specified command and generates a Config, // a HostConfig and returns them with the specified command. // If the specified args are not valid, it will return an error. // //nolint:gocyclo func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*containerConfig, error) { var ( attachStdin = copts.attach.Get("stdin") attachStdout = copts.attach.Get("stdout") attachStderr = copts.attach.Get("stderr") ) // Validate the input mac address if copts.macAddress != "" { if _, err := opts.ValidateMACAddress(copts.macAddress); err != nil { return nil, errors.Errorf("%s is not a valid mac address", copts.macAddress) } } if copts.stdin { attachStdin = true } // If -a is not set, attach to stdout and stderr if copts.attach.Len() == 0 { attachStdout = true attachStderr = true } var err error swappiness := copts.swappiness if swappiness != -1 && (swappiness < 0 || swappiness > 100) { return nil, errors.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) } mounts := copts.mounts.Value() if len(mounts) > 0 && copts.volumeDriver != "" { logrus.Warn("`--volume-driver` is ignored for volumes specified via `--mount`. Use `--mount type=volume,volume-driver=...` instead.") } var binds []string volumes := copts.volumes.GetMap() // add any bind targets to the list of container volumes for bind := range copts.volumes.GetMap() { parsed, _ := loader.ParseVolume(bind) if parsed.Source != "" { toBind := bind if parsed.Type == string(mounttypes.TypeBind) { if arr := strings.SplitN(bind, ":", 2); len(arr) == 2 { hostPart := arr[0] if strings.HasPrefix(hostPart, "."+string(filepath.Separator)) || hostPart == "." { if absHostPart, err := filepath.Abs(hostPart); err == nil { hostPart = absHostPart } } toBind = hostPart + ":" + arr[1] } } // after creating the bind mount we want to delete it from the copts.volumes values because // we do not want bind mounts being committed to image configs binds = append(binds, toBind) // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if // there are duplicates entries. delete(volumes, bind) } } // Can't evaluate options passed into --tmpfs until we actually mount tmpfs := make(map[string]string) for _, t := range copts.tmpfs.GetSlice() { if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { tmpfs[arr[0]] = arr[1] } else { tmpfs[arr[0]] = "" } } var ( runCmd strslice.StrSlice entrypoint strslice.StrSlice ) if len(copts.Args) > 0 { runCmd = strslice.StrSlice(copts.Args) } if copts.entrypoint != "" { entrypoint = strslice.StrSlice{copts.entrypoint} } else if flags.Changed("entrypoint") { // if `--entrypoint=` is parsed then Entrypoint is reset entrypoint = []string{""} } publishOpts := copts.publish.GetSlice() var ( ports map[nat.Port]struct{} portBindings map[nat.Port][]nat.PortBinding convertedOpts []string ) convertedOpts, err = convertToStandardNotation(publishOpts) if err != nil { return nil, err } ports, portBindings, err = nat.ParsePortSpecs(convertedOpts) if err != nil { return nil, err } // Merge in exposed ports to the map of published ports for _, e := range copts.expose.GetSlice() { if strings.Contains(e, ":") { return nil, errors.Errorf("invalid port format for --expose: %s", e) } // support two formats for expose, original format <portnum>/[<proto>] // or <startport-endport>/[<proto>] proto, port := nat.SplitProtoPort(e) // parse the start and end port and create a sequence of ports to expose // if expose a port, the start and end port are the same start, end, err := nat.ParsePortRange(port) if err != nil { return nil, errors.Errorf("invalid range format for --expose: %s, error: %s", e, err) } for i := start; i <= end; i++ { p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) if err != nil { return nil, err } if _, exists := ports[p]; !exists { ports[p] = struct{}{} } } } // validate and parse device mappings. Note we do late validation of the // device path (as opposed to during flag parsing), as at the time we are // parsing flags, we haven't yet sent a _ping to the daemon to determine // what operating system it is. deviceMappings := []container.DeviceMapping{} for _, device := range copts.devices.GetSlice() { var ( validated string deviceMapping container.DeviceMapping err error ) validated, err = validateDevice(device, serverOS) if err != nil { return nil, err } deviceMapping, err = parseDevice(validated, serverOS) if err != nil { return nil, err } deviceMappings = append(deviceMappings, deviceMapping) } // collect all the environment variables for the container envVariables, err := opts.ReadKVEnvStrings(copts.envFile.GetSlice(), copts.env.GetSlice()) if err != nil { return nil, err } // collect all the labels for the container labels, err := opts.ReadKVStrings(copts.labelsFile.GetSlice(), copts.labels.GetSlice()) if err != nil { return nil, err } pidMode := container.PidMode(copts.pidMode) if !pidMode.Valid() { return nil, errors.Errorf("--pid: invalid PID mode") } utsMode := container.UTSMode(copts.utsMode) if !utsMode.Valid() { return nil, errors.Errorf("--uts: invalid UTS mode") } usernsMode := container.UsernsMode(copts.usernsMode) if !usernsMode.Valid() { return nil, errors.Errorf("--userns: invalid USER mode") } cgroupnsMode := container.CgroupnsMode(copts.cgroupnsMode) if !cgroupnsMode.Valid() { return nil, errors.Errorf("--cgroupns: invalid CGROUP mode") } restartPolicy, err := opts.ParseRestartPolicy(copts.restartPolicy) if err != nil { return nil, err } loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetSlice()) if err != nil { return nil, err } securityOpts, err := parseSecurityOpts(copts.securityOpt.GetSlice()) if err != nil { return nil, err } securityOpts, maskedPaths, readonlyPaths := parseSystemPaths(securityOpts) storageOpts, err := parseStorageOpts(copts.storageOpt.GetSlice()) if err != nil { return nil, err } // Healthcheck var healthConfig *container.HealthConfig haveHealthSettings := copts.healthCmd != "" || copts.healthInterval != 0 || copts.healthTimeout != 0 || copts.healthStartPeriod != 0 || copts.healthRetries != 0 if copts.noHealthcheck { if haveHealthSettings { return nil, errors.Errorf("--no-healthcheck conflicts with --health-* options") } test := strslice.StrSlice{"NONE"} healthConfig = &container.HealthConfig{Test: test} } else if haveHealthSettings { var probe strslice.StrSlice if copts.healthCmd != "" { args := []string{"CMD-SHELL", copts.healthCmd} probe = strslice.StrSlice(args) } if copts.healthInterval < 0 { return nil, errors.Errorf("--health-interval cannot be negative") } if copts.healthTimeout < 0 { return nil, errors.Errorf("--health-timeout cannot be negative") } if copts.healthRetries < 0 { return nil, errors.Errorf("--health-retries cannot be negative") } if copts.healthStartPeriod < 0 { return nil, fmt.Errorf("--health-start-period cannot be negative") } healthConfig = &container.HealthConfig{ Test: probe, Interval: copts.healthInterval, Timeout: copts.healthTimeout, StartPeriod: copts.healthStartPeriod, Retries: copts.healthRetries, } } resources := container.Resources{ CgroupParent: copts.cgroupParent, Memory: copts.memory.Value(), MemoryReservation: copts.memoryReservation.Value(), MemorySwap: copts.memorySwap.Value(), MemorySwappiness: &copts.swappiness, KernelMemory: copts.kernelMemory.Value(), OomKillDisable: &copts.oomKillDisable, NanoCPUs: copts.cpus.Value(), CPUCount: copts.cpuCount, CPUPercent: copts.cpuPercent, CPUShares: copts.cpuShares, CPUPeriod: copts.cpuPeriod, CpusetCpus: copts.cpusetCpus, CpusetMems: copts.cpusetMems, CPUQuota: copts.cpuQuota, CPURealtimePeriod: copts.cpuRealtimePeriod, CPURealtimeRuntime: copts.cpuRealtimeRuntime, PidsLimit: &copts.pidsLimit, BlkioWeight: copts.blkioWeight, BlkioWeightDevice: copts.blkioWeightDevice.GetList(), BlkioDeviceReadBps: copts.deviceReadBps.GetList(), BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), IOMaximumIOps: copts.ioMaxIOps, IOMaximumBandwidth: copts.ioMaxBandwidth, Ulimits: copts.ulimits.GetList(), DeviceCgroupRules: copts.deviceCgroupRules.GetSlice(), Devices: deviceMappings, DeviceRequests: copts.gpus.Value(), } config := &container.Config{ Hostname: copts.hostname, Domainname: copts.domainname, ExposedPorts: ports, User: copts.user, Tty: copts.tty, // TODO: deprecated, it comes from -n, --networking // it's still needed internally to set the network to disabled // if e.g. bridge is none in daemon opts, and in inspect NetworkDisabled: false, OpenStdin: copts.stdin, AttachStdin: attachStdin, AttachStdout: attachStdout, AttachStderr: attachStderr, Env: envVariables, Cmd: runCmd, Image: copts.Image, Volumes: volumes, MacAddress: copts.macAddress, Entrypoint: entrypoint, WorkingDir: copts.workingDir, Labels: opts.ConvertKVStringsToMap(labels), StopSignal: copts.stopSignal, Healthcheck: healthConfig, } if flags.Changed("stop-timeout") { config.StopTimeout = &copts.stopTimeout } hostConfig := &container.HostConfig{ Binds: binds, ContainerIDFile: copts.containerIDFile, OomScoreAdj: copts.oomScoreAdj, AutoRemove: copts.autoRemove, Privileged: copts.privileged, PortBindings: portBindings, Links: copts.links.GetSlice(), PublishAllPorts: copts.publishAll, // Make sure the dns fields are never nil. // New containers don't ever have those fields nil, // but pre created containers can still have those nil values. // See https://github.com/docker/docker/pull/17779 // for a more detailed explanation on why we don't want that. DNS: copts.dns.GetAllOrEmpty(), DNSSearch: copts.dnsSearch.GetAllOrEmpty(), DNSOptions: copts.dnsOptions.GetAllOrEmpty(), ExtraHosts: copts.extraHosts.GetSlice(), VolumesFrom: copts.volumesFrom.GetSlice(), IpcMode: container.IpcMode(copts.ipcMode), NetworkMode: container.NetworkMode(copts.netMode.NetworkMode()), PidMode: pidMode, UTSMode: utsMode, UsernsMode: usernsMode, CgroupnsMode: cgroupnsMode, CapAdd: strslice.StrSlice(copts.capAdd.GetSlice()), CapDrop: strslice.StrSlice(copts.capDrop.GetSlice()), GroupAdd: copts.groupAdd.GetSlice(), RestartPolicy: restartPolicy, SecurityOpt: securityOpts, StorageOpt: storageOpts, ReadonlyRootfs: copts.readonlyRootfs, LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, VolumeDriver: copts.volumeDriver, Isolation: container.Isolation(copts.isolation), ShmSize: copts.shmSize.Value(), Resources: resources, Tmpfs: tmpfs, Sysctls: copts.sysctls.GetAll(), Runtime: copts.runtime, Mounts: mounts, MaskedPaths: maskedPaths, ReadonlyPaths: readonlyPaths, } if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() { return nil, errors.Errorf("Conflicting options: --restart and --rm") } // only set this value if the user provided the flag, else it should default to nil if flags.Changed("init") { hostConfig.Init = &copts.init } // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true } networkingConfig := &networktypes.NetworkingConfig{ EndpointsConfig: make(map[string]*networktypes.EndpointSettings), } networkingConfig.EndpointsConfig, err = parseNetworkOpts(copts) if err != nil { return nil, err } return &containerConfig{ Config: config, HostConfig: hostConfig, NetworkingConfig: networkingConfig, }, nil } // parseNetworkOpts converts --network advanced options to endpoint-specs, and combines // them with the old --network-alias and --links. If returns an error if conflicting options // are found. // // this function may return _multiple_ endpoints, which is not currently supported // by the daemon, but may be in future; it's up to the daemon to produce an error // in case that is not supported. func parseNetworkOpts(copts *containerOptions) (map[string]*networktypes.EndpointSettings, error) { var ( endpoints = make(map[string]*networktypes.EndpointSettings, len(copts.netMode.Value())) hasUserDefined, hasNonUserDefined bool ) for i, n := range copts.netMode.Value() { if container.NetworkMode(n.Target).IsUserDefined() { hasUserDefined = true } else { hasNonUserDefined = true } if i == 0 { // The first network corresponds with what was previously the "only" // network, and what would be used when using the non-advanced syntax // `--network-alias`, `--link`, `--ip`, `--ip6`, and `--link-local-ip` // are set on this network, to preserve backward compatibility with // the non-advanced notation if err := applyContainerOptions(&n, copts); err != nil { return nil, err } } ep, err := parseNetworkAttachmentOpt(n) if err != nil { return nil, err } if _, ok := endpoints[n.Target]; ok { return nil, errdefs.InvalidParameter(errors.Errorf("network %q is specified multiple times", n.Target)) } // For backward compatibility: if no custom options are provided for the network, // and only a single network is specified, omit the endpoint-configuration // on the client (the daemon will still create it when creating the container) if i == 0 && len(copts.netMode.Value()) == 1 { if ep == nil || reflect.DeepEqual(*ep, networktypes.EndpointSettings{}) { continue } } endpoints[n.Target] = ep } if hasUserDefined && hasNonUserDefined { return nil, errdefs.InvalidParameter(errors.New("conflicting options: cannot attach both user-defined and non-user-defined network-modes")) } return endpoints, nil } func applyContainerOptions(n *opts.NetworkAttachmentOpts, copts *containerOptions) error { // TODO should copts.MacAddress actually be set on the first network? (currently it's not) // TODO should we error if _any_ advanced option is used? (i.e. forbid to combine advanced notation with the "old" flags (`--network-alias`, `--link`, `--ip`, `--ip6`)? if len(n.Aliases) > 0 && copts.aliases.Len() > 0 { return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --network-alias and per-network alias")) } if len(n.Links) > 0 && copts.links.Len() > 0 { return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --link and per-network links")) } if n.IPv4Address != "" && copts.ipv4Address != "" { return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --ip and per-network IPv4 address")) } if n.IPv6Address != "" && copts.ipv6Address != "" { return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --ip6 and per-network IPv6 address")) } if copts.aliases.Len() > 0 { n.Aliases = make([]string, copts.aliases.Len()) copy(n.Aliases, copts.aliases.GetSlice()) } if copts.links.Len() > 0 { n.Links = make([]string, copts.links.Len()) copy(n.Links, copts.links.GetSlice()) } if copts.ipv4Address != "" { n.IPv4Address = copts.ipv4Address } if copts.ipv6Address != "" { n.IPv6Address = copts.ipv6Address } // TODO should linkLocalIPs be added to the _first_ network only, or to _all_ networks? (should this be a per-network option as well?)
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
true
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_pull_test.go
pkg/container/docker_pull_test.go
package container import ( "context" "testing" "github.com/docker/cli/cli/config" log "github.com/sirupsen/logrus" assert "github.com/stretchr/testify/assert" ) func init() { log.SetLevel(log.DebugLevel) } func TestCleanImage(t *testing.T) { tables := []struct { imageIn string imageOut string }{ {"myhost.com/foo/bar", "myhost.com/foo/bar"}, {"localhost:8000/canonical/ubuntu", "localhost:8000/canonical/ubuntu"}, {"localhost/canonical/ubuntu:latest", "localhost/canonical/ubuntu:latest"}, {"localhost:8000/canonical/ubuntu:latest", "localhost:8000/canonical/ubuntu:latest"}, {"ubuntu", "docker.io/library/ubuntu"}, {"ubuntu:18.04", "docker.io/library/ubuntu:18.04"}, {"cibuilds/hugo:0.53", "docker.io/cibuilds/hugo:0.53"}, } for _, table := range tables { imageOut := cleanImage(context.Background(), table.imageIn) assert.Equal(t, table.imageOut, imageOut) } } func TestGetImagePullOptions(t *testing.T) { ctx := context.Background() config.SetDir("/non-existent/docker") options, err := getImagePullOptions(ctx, NewDockerPullExecutorInput{}) assert.Nil(t, err, "Failed to create ImagePullOptions") assert.Equal(t, "", options.RegistryAuth, "RegistryAuth should be empty if no username or password is set") options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{ Image: "", Username: "username", Password: "password", }) assert.Nil(t, err, "Failed to create ImagePullOptions") assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZCJ9", options.RegistryAuth, "Username and Password should be provided") config.SetDir("testdata/docker-pull-options") options, err = getImagePullOptions(ctx, NewDockerPullExecutorInput{ Image: "nektos/act", }) assert.Nil(t, err, "Failed to create ImagePullOptions") assert.Equal(t, "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZFxuIiwic2VydmVyYWRkcmVzcyI6Imh0dHBzOi8vaW5kZXguZG9ja2VyLmlvL3YxLyJ9", options.RegistryAuth, "RegistryAuth should be taken from local docker config") }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/host_environment_test.go
pkg/container/host_environment_test.go
package container import ( "archive/tar" "context" "io" "os" "path" "path/filepath" "testing" "github.com/stretchr/testify/assert" ) // Type assert HostEnvironment implements ExecutionsEnvironment var _ ExecutionsEnvironment = &HostEnvironment{} func TestCopyDir(t *testing.T) { dir, err := os.MkdirTemp("", "test-host-env-*") assert.NoError(t, err) defer os.RemoveAll(dir) ctx := context.Background() e := &HostEnvironment{ Path: filepath.Join(dir, "path"), TmpDir: filepath.Join(dir, "tmp"), ToolCache: filepath.Join(dir, "tool_cache"), ActPath: filepath.Join(dir, "act_path"), StdOut: os.Stdout, Workdir: path.Join("testdata", "scratch"), } _ = os.MkdirAll(e.Path, 0700) _ = os.MkdirAll(e.TmpDir, 0700) _ = os.MkdirAll(e.ToolCache, 0700) _ = os.MkdirAll(e.ActPath, 0700) err = e.CopyDir(e.Workdir, e.Path, true)(ctx) assert.NoError(t, err) } func TestGetContainerArchive(t *testing.T) { dir, err := os.MkdirTemp("", "test-host-env-*") assert.NoError(t, err) defer os.RemoveAll(dir) ctx := context.Background() e := &HostEnvironment{ Path: filepath.Join(dir, "path"), TmpDir: filepath.Join(dir, "tmp"), ToolCache: filepath.Join(dir, "tool_cache"), ActPath: filepath.Join(dir, "act_path"), StdOut: os.Stdout, Workdir: path.Join("testdata", "scratch"), } _ = os.MkdirAll(e.Path, 0700) _ = os.MkdirAll(e.TmpDir, 0700) _ = os.MkdirAll(e.ToolCache, 0700) _ = os.MkdirAll(e.ActPath, 0700) expectedContent := []byte("sdde/7sh") err = os.WriteFile(filepath.Join(e.Path, "action.yml"), expectedContent, 0600) assert.NoError(t, err) archive, err := e.GetContainerArchive(ctx, e.Path) assert.NoError(t, err) defer archive.Close() reader := tar.NewReader(archive) h, err := reader.Next() assert.NoError(t, err) assert.Equal(t, "action.yml", h.Name) content, err := io.ReadAll(reader) assert.NoError(t, err) assert.Equal(t, expectedContent, content) _, err = reader.Next() assert.ErrorIs(t, err, io.EOF) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_build.go
pkg/container/docker_build.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) package container import ( "context" "io" "os" "path/filepath" "github.com/docker/docker/api/types/build" "github.com/moby/go-archive" "github.com/moby/patternmatcher" "github.com/moby/patternmatcher/ignorefile" "github.com/nektos/act/pkg/common" ) // NewDockerBuildExecutor function to create a run executor for the container func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) if input.Platform != "" { logger.Infof("%sdocker build -t %s --platform %s %s", logPrefix, input.ImageTag, input.Platform, input.ContextDir) } else { logger.Infof("%sdocker build -t %s %s", logPrefix, input.ImageTag, input.ContextDir) } if common.Dryrun(ctx) { return nil } cli, err := GetDockerClient(ctx) if err != nil { return err } defer cli.Close() logger.Debugf("Building image from '%v'", input.ContextDir) tags := []string{input.ImageTag} options := build.ImageBuildOptions{ Tags: tags, Remove: true, Platform: input.Platform, AuthConfigs: LoadDockerAuthConfigs(ctx), Dockerfile: input.Dockerfile, } var buildContext io.ReadCloser if input.BuildContext != nil { buildContext = io.NopCloser(input.BuildContext) } else { buildContext, err = createBuildContext(ctx, input.ContextDir, input.Dockerfile) } if err != nil { return err } defer buildContext.Close() logger.Debugf("Creating image from context dir '%s' with tag '%s' and platform '%s'", input.ContextDir, input.ImageTag, input.Platform) resp, err := cli.ImageBuild(ctx, buildContext, options) err = logDockerResponse(logger, resp.Body, err != nil) if err != nil { return err } return nil } } func createBuildContext(ctx context.Context, contextDir string, relDockerfile string) (io.ReadCloser, error) { common.Logger(ctx).Debugf("Creating archive for build context dir '%s' with relative dockerfile '%s'", contextDir, relDockerfile) // And canonicalize dockerfile name to a platform-independent one relDockerfile = filepath.ToSlash(relDockerfile) f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return nil, err } defer f.Close() var excludes []string if err == nil { excludes, err = ignorefile.ReadAll(f) if err != nil { return nil, err } } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The daemon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by validateContextDirectory above. var includes = []string{"."} keepThem1, _ := patternmatcher.Matches(".dockerignore", excludes) keepThem2, _ := patternmatcher.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", relDockerfile) } compression := archive.Uncompressed buildCtx, err := archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: compression, ExcludePatterns: excludes, IncludeFiles: includes, }) if err != nil { return nil, err } return buildCtx, nil }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_logger.go
pkg/container/docker_logger.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) package container import ( "bufio" "encoding/json" "errors" "io" "github.com/sirupsen/logrus" ) type dockerMessage struct { ID string `json:"id"` Stream string `json:"stream"` Error string `json:"error"` ErrorDetail struct { Message string } Status string `json:"status"` Progress string `json:"progress"` } const logPrefix = " \U0001F433 " func logDockerResponse(logger logrus.FieldLogger, dockerResponse io.ReadCloser, isError bool) error { if dockerResponse == nil { return nil } defer dockerResponse.Close() scanner := bufio.NewScanner(dockerResponse) msg := dockerMessage{} for scanner.Scan() { line := scanner.Bytes() msg.ID = "" msg.Stream = "" msg.Error = "" msg.ErrorDetail.Message = "" msg.Status = "" msg.Progress = "" if err := json.Unmarshal(line, &msg); err != nil { writeLog(logger, false, "Unable to unmarshal line [%s] ==> %v", string(line), err) continue } if msg.Error != "" { writeLog(logger, isError, "%s", msg.Error) return errors.New(msg.Error) } if msg.ErrorDetail.Message != "" { writeLog(logger, isError, "%s", msg.ErrorDetail.Message) return errors.New(msg.Error) } if msg.Status != "" { if msg.Progress != "" { writeLog(logger, isError, "%s :: %s :: %s\n", msg.Status, msg.ID, msg.Progress) } else { writeLog(logger, isError, "%s :: %s\n", msg.Status, msg.ID) } } else if msg.Stream != "" { writeLog(logger, isError, "%s", msg.Stream) } else { writeLog(logger, false, "Unable to handle line: %s", string(line)) } } return nil } func writeLog(logger logrus.FieldLogger, isError bool, format string, args ...interface{}) { if isError { logger.Errorf(format, args...) } else { logger.Debugf(format, args...) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/util_openbsd_mips64.go
pkg/container/util_openbsd_mips64.go
package container import ( "errors" "os" "syscall" ) func getSysProcAttr(cmdLine string, tty bool) *syscall.SysProcAttr { return &syscall.SysProcAttr{ Setpgid: true, } } func openPty() (*os.File, *os.File, error) { return nil, nil, errors.New("Unsupported") }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/util_plan9.go
pkg/container/util_plan9.go
package container import ( "errors" "os" "syscall" ) func getSysProcAttr(cmdLine string, tty bool) *syscall.SysProcAttr { return &syscall.SysProcAttr{ Rfork: syscall.RFNOTEG, } } func openPty() (*os.File, *os.File, error) { return nil, nil, errors.New("Unsupported") }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_images.go
pkg/container/docker_images.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) package container import ( "context" "fmt" cerrdefs "github.com/containerd/errdefs" "github.com/docker/docker/api/types/image" "github.com/nektos/act/pkg/common" ) // ImageExistsLocally returns a boolean indicating if an image with the // requested name, tag and architecture exists in the local docker image store func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) { cli, err := GetDockerClient(ctx) if err != nil { return false, err } defer cli.Close() inspectImage, err := cli.ImageInspect(ctx, imageName) if cerrdefs.IsNotFound(err) { return false, nil } else if err != nil { return false, err } imagePlatform := fmt.Sprintf("%s/%s", inspectImage.Os, inspectImage.Architecture) if platform == "" || platform == "any" || imagePlatform == platform { return true, nil } logger := common.Logger(ctx) logger.Infof("image found but platform does not match: %s (image) != %s (platform)\n", imagePlatform, platform) return false, nil } // RemoveImage removes image from local store, the function is used to run different // container image architectures func RemoveImage(ctx context.Context, imageName string, force bool, pruneChildren bool) (bool, error) { cli, err := GetDockerClient(ctx) if err != nil { return false, err } defer cli.Close() inspectImage, err := cli.ImageInspect(ctx, imageName) if cerrdefs.IsNotFound(err) { return false, nil } else if err != nil { return false, err } if _, err = cli.ImageRemove(ctx, inspectImage.ID, image.RemoveOptions{ Force: force, PruneChildren: pruneChildren, }); err != nil { return false, err } return true, nil }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/linux_container_environment_extensions_test.go
pkg/container/linux_container_environment_extensions_test.go
package container import ( "fmt" "os" "runtime" "strings" "testing" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) func TestContainerPath(t *testing.T) { type containerPathJob struct { destinationPath string sourcePath string workDir string } linuxcontainerext := &LinuxContainerEnvironmentExtensions{} if runtime.GOOS == "windows" { cwd, err := os.Getwd() if err != nil { log.Error(err) } rootDrive := os.Getenv("SystemDrive") rootDriveLetter := strings.ReplaceAll(strings.ToLower(rootDrive), `:`, "") for _, v := range []containerPathJob{ {"/mnt/c/Users/act/go/src/github.com/nektos/act", "C:\\Users\\act\\go\\src\\github.com\\nektos\\act\\", ""}, {"/mnt/f/work/dir", `F:\work\dir`, ""}, {"/mnt/c/windows/to/unix", "windows\\to\\unix", fmt.Sprintf("%s\\", rootDrive)}, {fmt.Sprintf("/mnt/%v/act", rootDriveLetter), "act", fmt.Sprintf("%s\\", rootDrive)}, } { if v.workDir != "" { if err := os.Chdir(v.workDir); err != nil { log.Error(err) t.Fail() } } assert.Equal(t, v.destinationPath, linuxcontainerext.ToContainerPath(v.sourcePath)) } if err := os.Chdir(cwd); err != nil { log.Error(err) } } else { cwd, err := os.Getwd() if err != nil { log.Error(err) } for _, v := range []containerPathJob{ {"/home/act/go/src/github.com/nektos/act", "/home/act/go/src/github.com/nektos/act", ""}, {"/home/act", `/home/act/`, ""}, {cwd, ".", ""}, } { assert.Equal(t, v.destinationPath, linuxcontainerext.ToContainerPath(v.sourcePath)) } } } type typeAssertMockContainer struct { Container LinuxContainerEnvironmentExtensions } // Type assert Container + LinuxContainerEnvironmentExtensions implements ExecutionsEnvironment var _ ExecutionsEnvironment = &typeAssertMockContainer{}
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_cli_test.go
pkg/container/docker_cli_test.go
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts_test.go with: // * appended with license information // * commented out case 'invalid-mixed-network-types' in test TestParseNetworkConfig // // docker/cli is licensed under the Apache License, Version 2.0. // See DOCKER_LICENSE for the full license text. // //nolint:unparam,whitespace,depguard,dupl,gocritic package container import ( "fmt" "io" "os" "runtime" "strings" "testing" "time" "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/go-connections/nat" "github.com/pkg/errors" "github.com/spf13/pflag" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) func TestValidateAttach(t *testing.T) { valid := []string{ "stdin", "stdout", "stderr", "STDIN", "STDOUT", "STDERR", } if _, err := validateAttach("invalid"); err == nil { t.Fatal("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") } for _, attach := range valid { value, err := validateAttach(attach) if err != nil { t.Fatal(err) } if value != strings.ToLower(attach) { t.Fatalf("Expected [%v], got [%v]", attach, value) } } } func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { flags, copts := setupRunFlags() if err := flags.Parse(args); err != nil { return nil, nil, nil, err } // TODO: fix tests to accept ContainerConfig containerConfig, err := parse(flags, copts, runtime.GOOS) if err != nil { return nil, nil, nil, err } return containerConfig.Config, containerConfig.HostConfig, containerConfig.NetworkingConfig, err } func setupRunFlags() (*pflag.FlagSet, *containerOptions) { flags := pflag.NewFlagSet("run", pflag.ContinueOnError) flags.SetOutput(io.Discard) flags.Usage = nil copts := addFlags(flags) return flags, copts } func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig) { t.Helper() config, hostConfig, networkingConfig, err := parseRun(append(strings.Split(args, " "), "ubuntu", "bash")) assert.NilError(t, err) return config, hostConfig, networkingConfig } func TestParseRunLinks(t *testing.T) { if _, hostConfig, _ := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) } if _, hostConfig, _ := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) } if _, hostConfig, _ := mustParse(t, ""); len(hostConfig.Links) != 0 { t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) } } func TestParseRunAttach(t *testing.T) { tests := []struct { input string expected container.Config }{ { input: "", expected: container.Config{ AttachStdout: true, AttachStderr: true, }, }, { input: "-i", expected: container.Config{ AttachStdin: true, AttachStdout: true, AttachStderr: true, }, }, { input: "-a stdin", expected: container.Config{ AttachStdin: true, }, }, { input: "-a stdin -a stdout", expected: container.Config{ AttachStdin: true, AttachStdout: true, }, }, { input: "-a stdin -a stdout -a stderr", expected: container.Config{ AttachStdin: true, AttachStdout: true, AttachStderr: true, }, }, } for _, tc := range tests { t.Run(tc.input, func(t *testing.T) { config, _, _ := mustParse(t, tc.input) assert.Equal(t, config.AttachStdin, tc.expected.AttachStdin) assert.Equal(t, config.AttachStdout, tc.expected.AttachStdout) assert.Equal(t, config.AttachStderr, tc.expected.AttachStderr) }) } } func TestParseRunWithInvalidArgs(t *testing.T) { tests := []struct { args []string error string }{ { args: []string{"-a", "ubuntu", "bash"}, error: `invalid argument "ubuntu" for "-a, --attach" flag: valid streams are STDIN, STDOUT and STDERR`, }, { args: []string{"-a", "invalid", "ubuntu", "bash"}, error: `invalid argument "invalid" for "-a, --attach" flag: valid streams are STDIN, STDOUT and STDERR`, }, { args: []string{"-a", "invalid", "-a", "stdout", "ubuntu", "bash"}, error: `invalid argument "invalid" for "-a, --attach" flag: valid streams are STDIN, STDOUT and STDERR`, }, { args: []string{"-a", "stdout", "-a", "stderr", "-z", "ubuntu", "bash"}, error: `unknown shorthand flag: 'z' in -z`, }, { args: []string{"-a", "stdin", "-z", "ubuntu", "bash"}, error: `unknown shorthand flag: 'z' in -z`, }, { args: []string{"-a", "stdout", "-z", "ubuntu", "bash"}, error: `unknown shorthand flag: 'z' in -z`, }, { args: []string{"-a", "stderr", "-z", "ubuntu", "bash"}, error: `unknown shorthand flag: 'z' in -z`, }, { args: []string{"-z", "--rm", "ubuntu", "bash"}, error: `unknown shorthand flag: 'z' in -z`, }, } flags, _ := setupRunFlags() for _, tc := range tests { t.Run(strings.Join(tc.args, " "), func(t *testing.T) { assert.Error(t, flags.Parse(tc.args), tc.error) }) } } //nolint:gocyclo func TestParseWithVolumes(t *testing.T) { // A single volume arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) } else if _, exists := config.Volumes[arr[0]]; !exists { t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) } // Two volumes arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) } else if _, exists := config.Volumes[arr[0]]; !exists { t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) } else if _, exists := config.Volumes[arr[1]]; !exists { t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) } // A single bind mount arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) } // Two bind mounts. arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) if _, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } // Two bind mounts, first read-only, second read-write. // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 arr, tryit = setupPlatformVolume( []string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) if _, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } // Similar to previous test but with alternate modes which are only supported by Linux if runtime.GOOS != "windows" { arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) if _, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) if _, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } } // One bind mount and one volume arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) } else if _, exists := config.Volumes[arr[1]]; !exists { t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) } // Root to non-c: drive letter (Windows specific) if runtime.GOOS == "windows" { arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) if config, hostConfig, _ := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) } } } // setupPlatformVolume takes two arrays of volume specs - a Unix style // spec and a Windows style spec. Depending on the platform being unit tested, // it returns one of them, along with a volume string that would be passed // on the docker CLI (e.g. -v /bar -v /foo). func setupPlatformVolume(u []string, w []string) ([]string, string) { var a []string if runtime.GOOS == "windows" { a = w } else { a = u } s := "" for _, v := range a { s = s + "-v " + v + " " } return a, s } // check if (a == c && b == d) || (a == d && b == c) // because maps are randomized func compareRandomizedStrings(a, b, c, d string) error { if a == c && b == d { return nil } if a == d && b == c { return nil } return errors.Errorf("strings don't match") } // Simple parse with MacAddress validation func TestParseWithMacAddress(t *testing.T) { invalidMacAddress := "--mac-address=invalidMacAddress" validMacAddress := "--mac-address=92:d0:c6:0a:29:33" if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) } config, hostConfig, _ := mustParse(t, validMacAddress) fmt.Printf("MacAddress: %+v\n", hostConfig) assert.Equal(t, "92:d0:c6:0a:29:33", config.MacAddress) //nolint:staticcheck } func TestRunFlagsParseWithMemory(t *testing.T) { flags, _ := setupRunFlags() args := []string{"--memory=invalid", "img", "cmd"} err := flags.Parse(args) assert.ErrorContains(t, err, `invalid argument "invalid" for "-m, --memory" flag`) _, hostconfig, _ := mustParse(t, "--memory=1G") assert.Check(t, is.Equal(int64(1073741824), hostconfig.Memory)) } func TestParseWithMemorySwap(t *testing.T) { flags, _ := setupRunFlags() args := []string{"--memory-swap=invalid", "img", "cmd"} err := flags.Parse(args) assert.ErrorContains(t, err, `invalid argument "invalid" for "--memory-swap" flag`) _, hostconfig, _ := mustParse(t, "--memory-swap=1G") assert.Check(t, is.Equal(int64(1073741824), hostconfig.MemorySwap)) _, hostconfig, _ = mustParse(t, "--memory-swap=-1") assert.Check(t, is.Equal(int64(-1), hostconfig.MemorySwap)) } func TestParseHostname(t *testing.T) { validHostnames := map[string]string{ "hostname": "hostname", "host-name": "host-name", "hostname123": "hostname123", "123hostname": "123hostname", "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", } hostnameWithDomain := "--hostname=hostname.domainname" hostnameWithDomainTld := "--hostname=hostname.domainname.tld" for hostname, expectedHostname := range validHostnames { if config, _, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { t.Fatalf("Expected the config to have 'hostname' as %q, got %q", expectedHostname, config.Hostname) } } if config, _, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" || config.Domainname != "" { t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got %q", config.Hostname) } if config, _, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" || config.Domainname != "" { t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got %q", config.Hostname) } } func TestParseHostnameDomainname(t *testing.T) { validDomainnames := map[string]string{ "domainname": "domainname", "domain-name": "domain-name", "domainname123": "domainname123", "123domainname": "123domainname", "domainname-63-bytes-long-should-be-valid-and-without-any-errors": "domainname-63-bytes-long-should-be-valid-and-without-any-errors", } for domainname, expectedDomainname := range validDomainnames { if config, _, _ := mustParse(t, "--domainname="+domainname); config.Domainname != expectedDomainname { t.Fatalf("Expected the config to have 'domainname' as %q, got %q", expectedDomainname, config.Domainname) } } if config, _, _ := mustParse(t, "--hostname=some.prefix --domainname=domainname"); config.Hostname != "some.prefix" || config.Domainname != "domainname" { t.Fatalf("Expected the config to have 'hostname' as 'some.prefix' and 'domainname' as 'domainname', got %q and %q", config.Hostname, config.Domainname) } if config, _, _ := mustParse(t, "--hostname=another-prefix --domainname=domainname.tld"); config.Hostname != "another-prefix" || config.Domainname != "domainname.tld" { t.Fatalf("Expected the config to have 'hostname' as 'another-prefix' and 'domainname' as 'domainname.tld', got %q and %q", config.Hostname, config.Domainname) } } func TestParseWithExpose(t *testing.T) { invalids := map[string]string{ ":": "invalid port format for --expose: :", "8080:9090": "invalid port format for --expose: 8080:9090", "/tcp": "invalid range format for --expose: /tcp, error: empty string specified for ports", "/udp": "invalid range format for --expose: /udp, error: empty string specified for ports", "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, } valids := map[string][]nat.Port{ "8080/tcp": {"8080/tcp"}, "8080/udp": {"8080/udp"}, "8080/ncp": {"8080/ncp"}, "8080-8080/udp": {"8080/udp"}, "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, } for expose, expectedError := range invalids { if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) } } for expose, exposedPorts := range valids { config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.ExposedPorts) != len(exposedPorts) { t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) } for _, port := range exposedPorts { if _, ok := config.ExposedPorts[port]; !ok { t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) } } } // Merge with actual published port config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.ExposedPorts) != 2 { t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) } ports := []nat.Port{"80/tcp", "81/tcp"} for _, port := range ports { if _, ok := config.ExposedPorts[port]; !ok { t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) } } } func TestParseDevice(t *testing.T) { skip.If(t, runtime.GOOS != "linux") // Windows and macOS validate server-side valids := map[string]container.DeviceMapping{ "/dev/snd": { PathOnHost: "/dev/snd", PathInContainer: "/dev/snd", CgroupPermissions: "rwm", }, "/dev/snd:rw": { PathOnHost: "/dev/snd", PathInContainer: "/dev/snd", CgroupPermissions: "rw", }, "/dev/snd:/something": { PathOnHost: "/dev/snd", PathInContainer: "/something", CgroupPermissions: "rwm", }, "/dev/snd:/something:rw": { PathOnHost: "/dev/snd", PathInContainer: "/something", CgroupPermissions: "rw", }, } for device, deviceMapping := range valids { _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) if err != nil { t.Fatal(err) } if len(hostconfig.Devices) != 1 { t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) } if hostconfig.Devices[0] != deviceMapping { t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) } } } func TestParseNetworkConfig(t *testing.T) { tests := []struct { name string flags []string expected map[string]*networktypes.EndpointSettings expectedCfg container.HostConfig expectedErr string }{ { name: "single-network-legacy", flags: []string{"--network", "net1"}, expected: map[string]*networktypes.EndpointSettings{}, expectedCfg: container.HostConfig{NetworkMode: "net1"}, }, { name: "single-network-advanced", flags: []string{"--network", "name=net1"}, expected: map[string]*networktypes.EndpointSettings{}, expectedCfg: container.HostConfig{NetworkMode: "net1"}, }, { name: "single-network-legacy-with-options", flags: []string{ "--ip", "172.20.88.22", "--ip6", "2001:db8::8822", "--link", "foo:bar", "--link", "bar:baz", "--link-local-ip", "169.254.2.2", "--link-local-ip", "fe80::169:254:2:2", "--network", "name=net1", "--network-alias", "web1", "--network-alias", "web2", }, expected: map[string]*networktypes.EndpointSettings{ "net1": { IPAMConfig: &networktypes.EndpointIPAMConfig{ IPv4Address: "172.20.88.22", IPv6Address: "2001:db8::8822", LinkLocalIPs: []string{"169.254.2.2", "fe80::169:254:2:2"}, }, Links: []string{"foo:bar", "bar:baz"}, Aliases: []string{"web1", "web2"}, }, }, expectedCfg: container.HostConfig{NetworkMode: "net1"}, }, { name: "multiple-network-advanced-mixed", flags: []string{ "--ip", "172.20.88.22", "--ip6", "2001:db8::8822", "--link", "foo:bar", "--link", "bar:baz", "--link-local-ip", "169.254.2.2", "--link-local-ip", "fe80::169:254:2:2", "--network", "name=net1,driver-opt=field1=value1", "--network-alias", "web1", "--network-alias", "web2", "--network", "net2", "--network", "name=net3,alias=web3,driver-opt=field3=value3,ip=172.20.88.22,ip6=2001:db8::8822", }, expected: map[string]*networktypes.EndpointSettings{ "net1": { DriverOpts: map[string]string{"field1": "value1"}, IPAMConfig: &networktypes.EndpointIPAMConfig{ IPv4Address: "172.20.88.22", IPv6Address: "2001:db8::8822", LinkLocalIPs: []string{"169.254.2.2", "fe80::169:254:2:2"}, }, Links: []string{"foo:bar", "bar:baz"}, Aliases: []string{"web1", "web2"}, }, "net2": {}, "net3": { DriverOpts: map[string]string{"field3": "value3"}, IPAMConfig: &networktypes.EndpointIPAMConfig{ IPv4Address: "172.20.88.22", IPv6Address: "2001:db8::8822", }, Aliases: []string{"web3"}, }, }, expectedCfg: container.HostConfig{NetworkMode: "net1"}, }, { name: "single-network-advanced-with-options", flags: []string{"--network", "name=net1,alias=web1,alias=web2,driver-opt=field1=value1,driver-opt=field2=value2,ip=172.20.88.22,ip6=2001:db8::8822"}, expected: map[string]*networktypes.EndpointSettings{ "net1": { DriverOpts: map[string]string{ "field1": "value1", "field2": "value2", }, IPAMConfig: &networktypes.EndpointIPAMConfig{ IPv4Address: "172.20.88.22", IPv6Address: "2001:db8::8822", }, Aliases: []string{"web1", "web2"}, }, }, expectedCfg: container.HostConfig{NetworkMode: "net1"}, }, { name: "multiple-networks", flags: []string{"--network", "net1", "--network", "name=net2"}, expected: map[string]*networktypes.EndpointSettings{"net1": {}, "net2": {}}, expectedCfg: container.HostConfig{NetworkMode: "net1"}, }, { name: "conflict-network", flags: []string{"--network", "duplicate", "--network", "name=duplicate"}, expectedErr: `network "duplicate" is specified multiple times`, }, { name: "conflict-options-alias", flags: []string{"--network", "name=net1,alias=web1", "--network-alias", "web1"}, expectedErr: `conflicting options: cannot specify both --network-alias and per-network alias`, }, { name: "conflict-options-ip", flags: []string{"--network", "name=net1,ip=172.20.88.22,ip6=2001:db8::8822", "--ip", "172.20.88.22"}, expectedErr: `conflicting options: cannot specify both --ip and per-network IPv4 address`, }, { name: "conflict-options-ip6", flags: []string{"--network", "name=net1,ip=172.20.88.22,ip6=2001:db8::8822", "--ip6", "2001:db8::8822"}, expectedErr: `conflicting options: cannot specify both --ip6 and per-network IPv6 address`, }, // case is skipped as it fails w/o any change // //{ // name: "invalid-mixed-network-types", // flags: []string{"--network", "name=host", "--network", "net1"}, // expectedErr: `conflicting options: cannot attach both user-defined and non-user-defined network-modes`, //}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { _, hConfig, nwConfig, err := parseRun(tc.flags) if tc.expectedErr != "" { assert.Error(t, err, tc.expectedErr) return } assert.NilError(t, err) assert.DeepEqual(t, hConfig.NetworkMode, tc.expectedCfg.NetworkMode) assert.DeepEqual(t, nwConfig.EndpointsConfig, tc.expected) }) } } func TestParseModes(t *testing.T) { // pid ko flags, copts := setupRunFlags() args := []string{"--pid=container:", "img", "cmd"} assert.NilError(t, flags.Parse(args)) _, err := parse(flags, copts, runtime.GOOS) assert.ErrorContains(t, err, "--pid: invalid PID mode") // pid ok _, hostconfig, _, err := parseRun([]string{"--pid=host", "img", "cmd"}) assert.NilError(t, err) if !hostconfig.PidMode.Valid() { t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) } // uts ko _, _, _, err = parseRun([]string{"--uts=container:", "img", "cmd"}) //nolint:dogsled assert.ErrorContains(t, err, "--uts: invalid UTS mode") // uts ok _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) assert.NilError(t, err) if !hostconfig.UTSMode.Valid() { t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) } } func TestRunFlagsParseShmSize(t *testing.T) { // shm-size ko flags, _ := setupRunFlags() args := []string{"--shm-size=a128m", "img", "cmd"} expectedErr := `invalid argument "a128m" for "--shm-size" flag:` err := flags.Parse(args) assert.ErrorContains(t, err, expectedErr) // shm-size ok _, hostconfig, _, err := parseRun([]string{"--shm-size=128m", "img", "cmd"}) assert.NilError(t, err) if hostconfig.ShmSize != 134217728 { t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) } } func TestParseRestartPolicy(t *testing.T) { invalids := map[string]string{ "always:2:3": "invalid restart policy format: maximum retry count must be an integer", "on-failure:invalid": "invalid restart policy format: maximum retry count must be an integer", } valids := map[string]container.RestartPolicy{ "": {}, "always": { Name: "always", MaximumRetryCount: 0, }, "on-failure:1": { Name: "on-failure", MaximumRetryCount: 1, }, } for restart, expectedError := range invalids { if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) } } for restart, expected := range valids { _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) if err != nil { t.Fatal(err) } if hostconfig.RestartPolicy != expected { t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) } } } func TestParseRestartPolicyAutoRemove(t *testing.T) { expected := "Conflicting options: --restart and --rm" _, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"}) //nolint:dogsled if err == nil || err.Error() != expected { t.Fatalf("Expected error %v, but got none", expected) } } func TestParseHealth(t *testing.T) { checkOk := func(args ...string) *container.HealthConfig { config, _, _, err := parseRun(args) if err != nil { t.Fatalf("%#v: %v", args, err) } return config.Healthcheck } checkError := func(expected string, args ...string) { config, _, _, err := parseRun(args) if err == nil { t.Fatalf("Expected error, but got %#v", config) } if err.Error() != expected { t.Fatalf("Expected %#v, got %#v", expected, err) } } health := checkOk("--no-healthcheck", "img", "cmd") if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { t.Fatalf("--no-healthcheck failed: %#v", health) } health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { t.Fatalf("--health-cmd: got %#v", health.Test) } if health.Timeout != 0 { t.Fatalf("--health-cmd: timeout = %s", health.Timeout) } checkError("--no-healthcheck conflicts with --health-* options", "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "--health-start-period=5s", "img", "cmd") if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond || health.StartPeriod != 5*time.Second { t.Fatalf("--health-*: got %#v", health) } } func TestParseLoggingOpts(t *testing.T) { // logging opts ko if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) } // logging opts ok _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) if err != nil { t.Fatal(err) } if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) } } func TestParseEnvfileVariables(t *testing.T) { e := "open nonexistent: no such file or directory" if runtime.GOOS == "windows" { e = "open nonexistent: The system cannot find the file specified." } // env ko if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { t.Fatalf("Expected an error with message '%s', got %v", e, err) } // env ok config, _, _, err := parseRun([]string{"--env-file=testdata/valid.env", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) } config, _, _, err = parseRun([]string{"--env-file=testdata/valid.env", "--env=ENV2=value2", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) } } func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) { // UTF8 with BOM config, _, _, err := parseRun([]string{"--env-file=testdata/utf8.env", "img", "cmd"}) if err != nil { t.Fatal(err) } env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"} if len(config.Env) != len(env) { t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env) } for i, v := range env { if config.Env[i] != v { t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i])) } } // UTF16 with BOM e := "invalid env file" if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { t.Fatalf("Expected an error with message '%s', got %v", e, err) } // UTF16BE with BOM if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { t.Fatalf("Expected an error with message '%s', got %v", e, err) } } func TestParseLabelfileVariables(t *testing.T) { e := "open nonexistent: no such file or directory" if runtime.GOOS == "windows" { e = "open nonexistent: The system cannot find the file specified." } // label ko if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { t.Fatalf("Expected an error with message '%s', got %v", e, err) } // label ok config, _, _, err := parseRun([]string{"--label-file=testdata/valid.label", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) } config, _, _, err = parseRun([]string{"--label-file=testdata/valid.label", "--label=LABEL2=value2", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) } } func TestParseEntryPoint(t *testing.T) { config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) if err != nil { t.Fatal(err) } if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) } } func TestValidateDevice(t *testing.T) { skip.If(t, runtime.GOOS != "linux") // Windows and macOS validate server-side valid := []string{ "/home", "/home:/home", "/home:/something/else", "/with space", "/home:/with space", "relative:/absolute-path", "hostPath:/containerPath:r", "/hostPath:/containerPath:rw", "/hostPath:/containerPath:mrw", } invalid := map[string]string{ "": "bad format for path: ", "./": "./ is not an absolute path", "../": "../ is not an absolute path", "/:../": "../ is not an absolute path", "/:path": "path is not an absolute path", ":": "bad format for path: :", "/tmp:": " is not an absolute path", ":test": "bad format for path: :test", ":/test": "bad format for path: :/test",
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
true
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/container_types.go
pkg/container/container_types.go
package container import ( "context" "io" "github.com/docker/go-connections/nat" "github.com/nektos/act/pkg/common" ) // NewContainerInput the input for the New function type NewContainerInput struct { Image string Username string Password string Entrypoint []string Cmd []string WorkingDir string Env []string Binds []string Mounts map[string]string Name string Stdout io.Writer Stderr io.Writer NetworkMode string Privileged bool UsernsMode string Platform string Options string NetworkAliases []string ExposedPorts nat.PortSet PortBindings nat.PortMap } // FileEntry is a file to copy to a container type FileEntry struct { Name string Mode uint32 Body string } // Container for managing docker run containers type Container interface { Create(capAdd []string, capDrop []string) common.Executor Copy(destPath string, files ...*FileEntry) common.Executor CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error) Pull(forcePull bool) common.Executor Start(attach bool) common.Executor Exec(command []string, env map[string]string, user, workdir string) common.Executor UpdateFromEnv(srcPath string, env *map[string]string) common.Executor UpdateFromImageEnv(env *map[string]string) common.Executor Remove() common.Executor Close() common.Executor ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer) GetHealth(ctx context.Context) Health } // NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function type NewDockerBuildExecutorInput struct { ContextDir string Dockerfile string BuildContext io.Reader ImageTag string Platform string } // NewDockerPullExecutorInput the input for the NewDockerPullExecutor function type NewDockerPullExecutorInput struct { Image string ForcePull bool Platform string Username string Password string } type Health int const ( HealthStarting Health = iota HealthHealthy HealthUnHealthy )
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/container/docker_network.go
pkg/container/docker_network.go
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)) package container import ( "context" "github.com/docker/docker/api/types/network" "github.com/nektos/act/pkg/common" ) func NewDockerNetworkCreateExecutor(name string) common.Executor { return func(ctx context.Context) error { cli, err := GetDockerClient(ctx) if err != nil { return err } defer cli.Close() // Only create the network if it doesn't exist networks, err := cli.NetworkList(ctx, network.ListOptions{}) if err != nil { return err } common.Logger(ctx).Debugf("%v", networks) for _, network := range networks { if network.Name == name { common.Logger(ctx).Debugf("Network %v exists", name) return nil } } _, err = cli.NetworkCreate(ctx, name, network.CreateOptions{ Driver: "bridge", Scope: "local", }) if err != nil { return err } return nil } } func NewDockerNetworkRemoveExecutor(name string) common.Executor { return func(ctx context.Context) error { cli, err := GetDockerClient(ctx) if err != nil { return err } defer cli.Close() // Make sure that all network of the specified name are removed // cli.NetworkRemove refuses to remove a network if there are duplicates networks, err := cli.NetworkList(ctx, network.ListOptions{}) if err != nil { return err } common.Logger(ctx).Debugf("%v", networks) for _, net := range networks { if net.Name == name { result, err := cli.NetworkInspect(ctx, net.ID, network.InspectOptions{}) if err != nil { return err } if len(result.Containers) == 0 { if err = cli.NetworkRemove(ctx, net.ID); err != nil { common.Logger(ctx).Debugf("%v", err) } } else { common.Logger(ctx).Debugf("Refusing to remove network %v because it still has active endpoints", name) } } } return err } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/filecollector/file_collector.go
pkg/filecollector/file_collector.go
package filecollector import ( "archive/tar" "context" "fmt" "io" "io/fs" "os" "path" "path/filepath" "strings" git "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing/filemode" "github.com/go-git/go-git/v5/plumbing/format/gitignore" "github.com/go-git/go-git/v5/plumbing/format/index" ) type Handler interface { WriteFile(path string, fi fs.FileInfo, linkName string, f io.Reader) error } type TarCollector struct { TarWriter *tar.Writer UID int GID int DstDir string } func (tc TarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error { // create a new dir/file header header, err := tar.FileInfoHeader(fi, linkName) if err != nil { return err } // update the name to correctly reflect the desired destination when untaring header.Name = path.Join(tc.DstDir, fpath) header.Mode = int64(fi.Mode()) header.ModTime = fi.ModTime() header.Uid = tc.UID header.Gid = tc.GID // write the header if err := tc.TarWriter.WriteHeader(header); err != nil { return err } // this is a symlink no reader provided if f == nil { return nil } // copy file data into tar writer if _, err := io.Copy(tc.TarWriter, f); err != nil { return err } return nil } type CopyCollector struct { DstDir string } func (cc *CopyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error { fdestpath := filepath.Join(cc.DstDir, fpath) if err := os.MkdirAll(filepath.Dir(fdestpath), 0o777); err != nil { return err } if linkName != "" { return os.Symlink(linkName, fdestpath) } df, err := os.OpenFile(fdestpath, os.O_CREATE|os.O_WRONLY, fi.Mode()) if err != nil { return err } defer df.Close() if _, err := io.Copy(df, f); err != nil { return err } return nil } type FileCollector struct { Ignorer gitignore.Matcher SrcPath string SrcPrefix string Fs Fs Handler Handler } type Fs interface { Walk(root string, fn filepath.WalkFunc) error OpenGitIndex(path string) (*index.Index, error) Open(path string) (io.ReadCloser, error) Readlink(path string) (string, error) } type DefaultFs struct { } func (*DefaultFs) Walk(root string, fn filepath.WalkFunc) error { return filepath.Walk(root, fn) } func (*DefaultFs) OpenGitIndex(path string) (*index.Index, error) { r, err := git.PlainOpen(path) if err != nil { return nil, err } i, err := r.Storer.Index() if err != nil { return nil, err } return i, nil } func (*DefaultFs) Open(path string) (io.ReadCloser, error) { return os.Open(path) } func (*DefaultFs) Readlink(path string) (string, error) { return os.Readlink(path) } //nolint:gocyclo func (fc *FileCollector) CollectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc { i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...))) return func(file string, fi os.FileInfo, err error) error { if err != nil { return err } if ctx != nil { select { case <-ctx.Done(): return fmt.Errorf("copy cancelled") default: } } sansPrefix := strings.TrimPrefix(file, fc.SrcPrefix) split := strings.Split(sansPrefix, string(filepath.Separator)) // The root folders should be skipped, submodules only have the last path component set to "." by filepath.Walk if fi.IsDir() && len(split) > 0 && split[len(split)-1] == "." { return nil } var entry *index.Entry if i != nil { entry, err = i.Entry(strings.Join(split[len(submodulePath):], "/")) } else { err = index.ErrEntryNotFound } if err != nil && fc.Ignorer != nil && fc.Ignorer.Match(split, fi.IsDir()) { if fi.IsDir() { if i != nil { ms, err := i.Glob(strings.Join(append(split[len(submodulePath):], "**"), "/")) if err != nil || len(ms) == 0 { return filepath.SkipDir } } else { return filepath.SkipDir } } else { return nil } } if err == nil && entry.Mode == filemode.Submodule { err = fc.Fs.Walk(file, fc.CollectFiles(ctx, split)) if err != nil { return err } return filepath.SkipDir } path := filepath.ToSlash(sansPrefix) // return on non-regular files (thanks to [kumo](https://medium.com/@komuw/just-like-you-did-fbdd7df829d3) for this suggested update) if fi.Mode()&os.ModeSymlink == os.ModeSymlink { linkName, err := fc.Fs.Readlink(file) if err != nil { return fmt.Errorf("unable to readlink '%s': %w", file, err) } return fc.Handler.WriteFile(path, fi, linkName, nil) } else if !fi.Mode().IsRegular() { return nil } // open file f, err := fc.Fs.Open(file) if err != nil { return err } defer f.Close() if ctx != nil { // make io.Copy cancellable by closing the file cpctx, cpfinish := context.WithCancel(ctx) defer cpfinish() go func() { select { case <-cpctx.Done(): case <-ctx.Done(): f.Close() } }() } return fc.Handler.WriteFile(path, fi, "", f) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/filecollector/file_collector_test.go
pkg/filecollector/file_collector_test.go
package filecollector import ( "archive/tar" "context" "io" "path/filepath" "strings" "testing" "github.com/go-git/go-billy/v5" "github.com/go-git/go-billy/v5/memfs" git "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing/cache" "github.com/go-git/go-git/v5/plumbing/format/gitignore" "github.com/go-git/go-git/v5/plumbing/format/index" "github.com/go-git/go-git/v5/storage/filesystem" "github.com/stretchr/testify/assert" ) type memoryFs struct { billy.Filesystem } func (mfs *memoryFs) walk(root string, fn filepath.WalkFunc) error { dir, err := mfs.ReadDir(root) if err != nil { return err } for i := 0; i < len(dir); i++ { filename := filepath.Join(root, dir[i].Name()) err = fn(filename, dir[i], nil) if dir[i].IsDir() { if err == filepath.SkipDir { err = nil } else if err := mfs.walk(filename, fn); err != nil { return err } } if err != nil { return err } } return nil } func (mfs *memoryFs) Walk(root string, fn filepath.WalkFunc) error { stat, err := mfs.Lstat(root) if err != nil { return err } err = fn(strings.Join([]string{root, "."}, string(filepath.Separator)), stat, nil) if err != nil { return err } return mfs.walk(root, fn) } func (mfs *memoryFs) OpenGitIndex(path string) (*index.Index, error) { f, _ := mfs.Filesystem.Chroot(filepath.Join(path, ".git")) storage := filesystem.NewStorage(f, cache.NewObjectLRUDefault()) i, err := storage.Index() if err != nil { return nil, err } return i, nil } func (mfs *memoryFs) Open(path string) (io.ReadCloser, error) { return mfs.Filesystem.Open(path) } func (mfs *memoryFs) Readlink(path string) (string, error) { return mfs.Filesystem.Readlink(path) } func TestIgnoredTrackedfile(t *testing.T) { fs := memfs.New() _ = fs.MkdirAll("mygitrepo/.git", 0o777) dotgit, _ := fs.Chroot("mygitrepo/.git") worktree, _ := fs.Chroot("mygitrepo") repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree) f, _ := worktree.Create(".gitignore") _, _ = f.Write([]byte(".*\n")) f.Close() // This file shouldn't be in the tar f, _ = worktree.Create(".env") _, _ = f.Write([]byte("test=val1\n")) f.Close() w, _ := repo.Worktree() // .gitignore is in the tar after adding it to the index _, _ = w.Add(".gitignore") tmpTar, _ := fs.Create("temp.tar") tw := tar.NewWriter(tmpTar) ps, _ := gitignore.ReadPatterns(worktree, []string{}) ignorer := gitignore.NewMatcher(ps) fc := &FileCollector{ Fs: &memoryFs{Filesystem: fs}, Ignorer: ignorer, SrcPath: "mygitrepo", SrcPrefix: "mygitrepo" + string(filepath.Separator), Handler: &TarCollector{ TarWriter: tw, }, } err := fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{})) assert.NoError(t, err, "successfully collect files") tw.Close() _, _ = tmpTar.Seek(0, io.SeekStart) tr := tar.NewReader(tmpTar) h, err := tr.Next() assert.NoError(t, err, "tar must not be empty") assert.Equal(t, ".gitignore", h.Name) _, err = tr.Next() assert.ErrorIs(t, err, io.EOF, "tar must only contain one element") } func TestSymlinks(t *testing.T) { fs := memfs.New() _ = fs.MkdirAll("mygitrepo/.git", 0o777) dotgit, _ := fs.Chroot("mygitrepo/.git") worktree, _ := fs.Chroot("mygitrepo") repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree) // This file shouldn't be in the tar f, err := worktree.Create(".env") assert.NoError(t, err) _, err = f.Write([]byte("test=val1\n")) assert.NoError(t, err) f.Close() err = worktree.Symlink(".env", "test.env") assert.NoError(t, err) w, err := repo.Worktree() assert.NoError(t, err) // .gitignore is in the tar after adding it to the index _, err = w.Add(".env") assert.NoError(t, err) _, err = w.Add("test.env") assert.NoError(t, err) tmpTar, _ := fs.Create("temp.tar") tw := tar.NewWriter(tmpTar) ps, _ := gitignore.ReadPatterns(worktree, []string{}) ignorer := gitignore.NewMatcher(ps) fc := &FileCollector{ Fs: &memoryFs{Filesystem: fs}, Ignorer: ignorer, SrcPath: "mygitrepo", SrcPrefix: "mygitrepo" + string(filepath.Separator), Handler: &TarCollector{ TarWriter: tw, }, } err = fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{})) assert.NoError(t, err, "successfully collect files") tw.Close() _, _ = tmpTar.Seek(0, io.SeekStart) tr := tar.NewReader(tmpTar) h, err := tr.Next() files := map[string]tar.Header{} for err == nil { files[h.Name] = *h h, err = tr.Next() } assert.Equal(t, ".env", files[".env"].Name) assert.Equal(t, "test.env", files["test.env"].Name) assert.Equal(t, ".env", files["test.env"].Linkname) assert.ErrorIs(t, err, io.EOF, "tar must be read cleanly to EOF") }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifactcache/storage.go
pkg/artifactcache/storage.go
package artifactcache import ( "fmt" "io" "net/http" "os" "path/filepath" ) type Storage struct { rootDir string } func NewStorage(rootDir string) (*Storage, error) { if err := os.MkdirAll(rootDir, 0o755); err != nil { return nil, err } return &Storage{ rootDir: rootDir, }, nil } func (s *Storage) Exist(id uint64) (bool, error) { name := s.filename(id) if _, err := os.Stat(name); os.IsNotExist(err) { return false, nil } else if err != nil { return false, err } return true, nil } func (s *Storage) Write(id uint64, offset int64, reader io.Reader) error { name := s.tempName(id, offset) if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil { return err } file, err := os.Create(name) if err != nil { return err } defer file.Close() _, err = io.Copy(file, reader) return err } func (s *Storage) Commit(id uint64, size int64) (int64, error) { defer func() { _ = os.RemoveAll(s.tempDir(id)) }() name := s.filename(id) tempNames, err := s.tempNames(id) if err != nil { return 0, err } if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil { return 0, err } file, err := os.Create(name) if err != nil { return 0, err } defer file.Close() var written int64 for _, v := range tempNames { f, err := os.Open(v) if err != nil { return 0, err } n, err := io.Copy(file, f) _ = f.Close() if err != nil { return 0, err } written += n } // If size is less than 0, it means the size is unknown. // We can't check the size of the file, just skip the check. // It happens when the request comes from old versions of actions, like `actions/cache@v2`. if size >= 0 && written != size { _ = file.Close() _ = os.Remove(name) return 0, fmt.Errorf("broken file: %v != %v", written, size) } return written, nil } func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id uint64) { name := s.filename(id) http.ServeFile(w, r, name) } func (s *Storage) Remove(id uint64) { _ = os.Remove(s.filename(id)) _ = os.RemoveAll(s.tempDir(id)) } func (s *Storage) filename(id uint64) string { return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id)) } func (s *Storage) tempDir(id uint64) string { return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id)) } func (s *Storage) tempName(id uint64, offset int64) string { return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset)) } func (s *Storage) tempNames(id uint64) ([]string, error) { dir := s.tempDir(id) files, err := os.ReadDir(dir) if err != nil { return nil, err } var names []string for _, v := range files { if !v.IsDir() { names = append(names, filepath.Join(dir, v.Name())) } } return names, nil }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifactcache/handler_test.go
pkg/artifactcache/handler_test.go
package artifactcache import ( "bytes" "crypto/rand" "encoding/json" "fmt" "io" "net/http" "path/filepath" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/timshannon/bolthold" "go.etcd.io/bbolt" ) func TestHandler(t *testing.T) { dir := filepath.Join(t.TempDir(), "artifactcache") handler, err := StartHandler(dir, "", "", 0, nil) require.NoError(t, err) base := fmt.Sprintf("%s%s", handler.ExternalURL(), urlBase) defer func() { t.Run("inpect db", func(t *testing.T) { db, err := handler.openDB() require.NoError(t, err) defer db.Close() require.NoError(t, db.Bolt().View(func(tx *bbolt.Tx) error { return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error { t.Logf("%s: %s", k, v) return nil }) })) }) t.Run("close", func(t *testing.T) { require.NoError(t, handler.Close()) assert.Nil(t, handler.server) assert.Nil(t, handler.listener) _, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil) assert.Error(t, err) }) }() t.Run("get not exist", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) require.NoError(t, err) require.Equal(t, 204, resp.StatusCode) }) t.Run("reserve and upload", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" content := make([]byte, 100) _, err := rand.Read(content) require.NoError(t, err) uploadCacheNormally(t, base, key, version, content) }) t.Run("clean", func(t *testing.T) { resp, err := http.Post(fmt.Sprintf("%s/clean", base), "", nil) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) }) t.Run("reserve with bad request", func(t *testing.T) { body := []byte(`invalid json`) require.NoError(t, err) resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body)) require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) }) t.Run("duplicate reserve", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" var first, second struct { CacheID uint64 `json:"cacheId"` } { body, err := json.Marshal(&Request{ Key: key, Version: version, Size: 100, }) require.NoError(t, err) resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body)) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) require.NoError(t, json.NewDecoder(resp.Body).Decode(&first)) assert.NotZero(t, first.CacheID) } { body, err := json.Marshal(&Request{ Key: key, Version: version, Size: 100, }) require.NoError(t, err) resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body)) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) require.NoError(t, json.NewDecoder(resp.Body).Decode(&second)) assert.NotZero(t, second.CacheID) } assert.NotEqual(t, first.CacheID, second.CacheID) }) t.Run("upload with bad id", func(t *testing.T) { req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/invalid_id", base), bytes.NewReader(nil)) require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) }) t.Run("upload without reserve", func(t *testing.T) { req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, 1000), bytes.NewReader(nil)) require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) }) t.Run("upload with complete", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" var id uint64 content := make([]byte, 100) _, err := rand.Read(content) require.NoError(t, err) { body, err := json.Marshal(&Request{ Key: key, Version: version, Size: 100, }) require.NoError(t, err) resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body)) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) got := struct { CacheID uint64 `json:"cacheId"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) id = got.CacheID } { req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content)) require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content)) require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } }) t.Run("upload with invalid range", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" var id uint64 content := make([]byte, 100) _, err := rand.Read(content) require.NoError(t, err) { body, err := json.Marshal(&Request{ Key: key, Version: version, Size: 100, }) require.NoError(t, err) resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body)) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) got := struct { CacheID uint64 `json:"cacheId"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) id = got.CacheID } { req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content)) require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes xx-99/*") resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } }) t.Run("commit with bad id", func(t *testing.T) { { resp, err := http.Post(fmt.Sprintf("%s/caches/invalid_id", base), "", nil) require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } }) t.Run("commit with not exist id", func(t *testing.T) { { resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil) require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } }) t.Run("duplicate commit", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" var id uint64 content := make([]byte, 100) _, err := rand.Read(content) require.NoError(t, err) { body, err := json.Marshal(&Request{ Key: key, Version: version, Size: 100, }) require.NoError(t, err) resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body)) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) got := struct { CacheID uint64 `json:"cacheId"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) id = got.CacheID } { req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content)) require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } }) t.Run("commit early", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" var id uint64 content := make([]byte, 100) _, err := rand.Read(content) require.NoError(t, err) { body, err := json.Marshal(&Request{ Key: key, Version: version, Size: 100, }) require.NoError(t, err) resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body)) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) got := struct { CacheID uint64 `json:"cacheId"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) id = got.CacheID } { req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content[:50])) require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-59/*") resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) require.NoError(t, err) assert.Equal(t, 500, resp.StatusCode) } }) t.Run("get with bad id", func(t *testing.T) { resp, err := http.Get(fmt.Sprintf("%s/artifacts/invalid_id", base)) require.NoError(t, err) require.Equal(t, 400, resp.StatusCode) }) t.Run("get with not exist id", func(t *testing.T) { resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100)) require.NoError(t, err) require.Equal(t, 404, resp.StatusCode) }) t.Run("get with not exist id", func(t *testing.T) { resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100)) require.NoError(t, err) require.Equal(t, 404, resp.StatusCode) }) t.Run("get with multiple keys", func(t *testing.T) { version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" key := strings.ToLower(t.Name()) keys := [3]string{ key + "_a_b_c", key + "_a_b", key + "_a", } contents := [3][]byte{ make([]byte, 100), make([]byte, 200), make([]byte, 300), } for i := range contents { _, err := rand.Read(contents[i]) require.NoError(t, err) uploadCacheNormally(t, base, keys[i], version, contents[i]) time.Sleep(time.Second) // ensure CreatedAt of caches are different } reqKeys := strings.Join([]string{ key + "_a_b_x", key + "_a_b", key + "_a", }, ",") resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) /* Expect `key_a_b` because: - `key_a_b_x" doesn't match any caches. - `key_a_b" matches `key_a_b` and `key_a_b_c`, but `key_a_b` is newer. */ except := 1 got := struct { Result string `json:"result"` ArchiveLocation string `json:"archiveLocation"` CacheKey string `json:"cacheKey"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) assert.Equal(t, "hit", got.Result) assert.Equal(t, keys[except], got.CacheKey) contentResp, err := http.Get(got.ArchiveLocation) require.NoError(t, err) require.Equal(t, 200, contentResp.StatusCode) content, err := io.ReadAll(contentResp.Body) require.NoError(t, err) assert.Equal(t, contents[except], content) }) t.Run("case insensitive", func(t *testing.T) { version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" key := strings.ToLower(t.Name()) content := make([]byte, 100) _, err := rand.Read(content) require.NoError(t, err) uploadCacheNormally(t, base, key+"_ABC", version, content) { reqKey := key + "_aBc" resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version)) require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) got := struct { Result string `json:"result"` ArchiveLocation string `json:"archiveLocation"` CacheKey string `json:"cacheKey"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) assert.Equal(t, "hit", got.Result) assert.Equal(t, key+"_abc", got.CacheKey) } }) t.Run("exact keys are preferred (key 0)", func(t *testing.T) { version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" key := strings.ToLower(t.Name()) keys := [3]string{ key + "_a", key + "_a_b_c", key + "_a_b", } contents := [3][]byte{ make([]byte, 100), make([]byte, 200), make([]byte, 300), } for i := range contents { _, err := rand.Read(contents[i]) require.NoError(t, err) uploadCacheNormally(t, base, keys[i], version, contents[i]) time.Sleep(time.Second) // ensure CreatedAt of caches are different } reqKeys := strings.Join([]string{ key + "_a", key + "_a_b", }, ",") resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) /* Expect `key_a` because: - `key_a` matches `key_a`, `key_a_b` and `key_a_b_c`, but `key_a` is an exact match. - `key_a_b` matches `key_a_b` and `key_a_b_c`, but previous key had a match */ expect := 0 got := struct { ArchiveLocation string `json:"archiveLocation"` CacheKey string `json:"cacheKey"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) assert.Equal(t, keys[expect], got.CacheKey) contentResp, err := http.Get(got.ArchiveLocation) require.NoError(t, err) require.Equal(t, 200, contentResp.StatusCode) content, err := io.ReadAll(contentResp.Body) require.NoError(t, err) assert.Equal(t, contents[expect], content) }) t.Run("exact keys are preferred (key 1)", func(t *testing.T) { version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" key := strings.ToLower(t.Name()) keys := [3]string{ key + "_a", key + "_a_b_c", key + "_a_b", } contents := [3][]byte{ make([]byte, 100), make([]byte, 200), make([]byte, 300), } for i := range contents { _, err := rand.Read(contents[i]) require.NoError(t, err) uploadCacheNormally(t, base, keys[i], version, contents[i]) time.Sleep(time.Second) // ensure CreatedAt of caches are different } reqKeys := strings.Join([]string{ "------------------------------------------------------", key + "_a", key + "_a_b", }, ",") resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) /* Expect `key_a` because: - `------------------------------------------------------` doesn't match any caches. - `key_a` matches `key_a`, `key_a_b` and `key_a_b_c`, but `key_a` is an exact match. - `key_a_b` matches `key_a_b` and `key_a_b_c`, but previous key had a match */ expect := 0 got := struct { ArchiveLocation string `json:"archiveLocation"` CacheKey string `json:"cacheKey"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) assert.Equal(t, keys[expect], got.CacheKey) contentResp, err := http.Get(got.ArchiveLocation) require.NoError(t, err) require.Equal(t, 200, contentResp.StatusCode) content, err := io.ReadAll(contentResp.Body) require.NoError(t, err) assert.Equal(t, contents[expect], content) }) } func uploadCacheNormally(t *testing.T, base, key, version string, content []byte) { var id uint64 { body, err := json.Marshal(&Request{ Key: key, Version: version, Size: int64(len(content)), }) require.NoError(t, err) resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body)) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) got := struct { CacheID uint64 `json:"cacheId"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) id = got.CacheID } { req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content)) require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } var archiveLocation string { resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) got := struct { Result string `json:"result"` ArchiveLocation string `json:"archiveLocation"` CacheKey string `json:"cacheKey"` }{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) assert.Equal(t, "hit", got.Result) assert.Equal(t, strings.ToLower(key), got.CacheKey) archiveLocation = got.ArchiveLocation } { resp, err := http.Get(archiveLocation) //nolint:gosec require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) got, err := io.ReadAll(resp.Body) require.NoError(t, err) assert.Equal(t, content, got) } } func TestHandler_CustomExternalURL(t *testing.T) { dir := filepath.Join(t.TempDir(), "artifactcache") handler, err := StartHandler(dir, "", "", 0, nil) require.NoError(t, err) defer func() { require.NoError(t, handler.Close()) }() handler.customExternalURL = fmt.Sprintf("http://%s:%d", "127.0.0.1", handler.GetActualPort()) assert.Equal(t, fmt.Sprintf("http://%s:%d", "127.0.0.1", handler.GetActualPort()), handler.ExternalURL()) base := fmt.Sprintf("%s%s", handler.ExternalURL(), urlBase) t.Run("advertise url set wrong", func(t *testing.T) { original := handler.customExternalURL defer func() { handler.customExternalURL = original }() handler.customExternalURL = "http://127.0.0.999:1234" assert.Equal(t, "http://127.0.0.999:1234", handler.ExternalURL()) }) t.Run("reserve and upload", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" content := make([]byte, 100) _, err := rand.Read(content) require.NoError(t, err) uploadCacheNormally(t, base, key, version, content) }) } func TestHandler_gcCache(t *testing.T) { dir := filepath.Join(t.TempDir(), "artifactcache") handler, err := StartHandler(dir, "", "", 0, nil) require.NoError(t, err) defer func() { require.NoError(t, handler.Close()) }() now := time.Now() cases := []struct { Cache *Cache Kept bool }{ { // should be kept, since it's used recently and not too old. Cache: &Cache{ Key: "test_key_1", Version: "test_version", Complete: true, UsedAt: now.Unix(), CreatedAt: now.Add(-time.Hour).Unix(), }, Kept: true, }, { // should be removed, since it's not complete and not used for a while. Cache: &Cache{ Key: "test_key_2", Version: "test_version", Complete: false, UsedAt: now.Add(-(keepTemp + time.Second)).Unix(), CreatedAt: now.Add(-(keepTemp + time.Hour)).Unix(), }, Kept: false, }, { // should be removed, since it's not used for a while. Cache: &Cache{ Key: "test_key_3", Version: "test_version", Complete: true, UsedAt: now.Add(-(keepUnused + time.Second)).Unix(), CreatedAt: now.Add(-(keepUnused + time.Hour)).Unix(), }, Kept: false, }, { // should be removed, since it's used but too old. Cache: &Cache{ Key: "test_key_3", Version: "test_version", Complete: true, UsedAt: now.Unix(), CreatedAt: now.Add(-(keepUsed + time.Second)).Unix(), }, Kept: false, }, { // should be kept, since it has a newer edition but be used recently. Cache: &Cache{ Key: "test_key_1", Version: "test_version", Complete: true, UsedAt: now.Add(-(keepOld - time.Minute)).Unix(), CreatedAt: now.Add(-(time.Hour + time.Second)).Unix(), }, Kept: true, }, { // should be removed, since it has a newer edition and not be used recently. Cache: &Cache{ Key: "test_key_1", Version: "test_version", Complete: true, UsedAt: now.Add(-(keepOld + time.Second)).Unix(), CreatedAt: now.Add(-(time.Hour + time.Second)).Unix(), }, Kept: false, }, } db, err := handler.openDB() require.NoError(t, err) for _, c := range cases { require.NoError(t, insertCache(db, c.Cache)) } require.NoError(t, db.Close()) handler.gcAt = time.Time{} // ensure gcCache will not skip handler.gcCache() db, err = handler.openDB() require.NoError(t, err) for i, v := range cases { t.Run(fmt.Sprintf("%d_%s", i, v.Cache.Key), func(t *testing.T) { cache := &Cache{} err = db.Get(v.Cache.ID, cache) if v.Kept { assert.NoError(t, err) } else { assert.ErrorIs(t, err, bolthold.ErrNotFound) } }) } require.NoError(t, db.Close()) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifactcache/model.go
pkg/artifactcache/model.go
package artifactcache type Request struct { Key string `json:"key" ` Version string `json:"version"` Size int64 `json:"cacheSize"` } func (c *Request) ToCache() *Cache { if c == nil { return nil } ret := &Cache{ Key: c.Key, Version: c.Version, Size: c.Size, } if c.Size == 0 { // So the request comes from old versions of actions, like `actions/cache@v2`. // It doesn't send cache size. Set it to -1 to indicate that. ret.Size = -1 } return ret } type Cache struct { ID uint64 `json:"id" boltholdKey:"ID"` Key string `json:"key" boltholdIndex:"Key"` Version string `json:"version" boltholdIndex:"Version"` Size int64 `json:"cacheSize"` Complete bool `json:"complete" boltholdIndex:"Complete"` UsedAt int64 `json:"usedAt" boltholdIndex:"UsedAt"` CreatedAt int64 `json:"createdAt" boltholdIndex:"CreatedAt"` }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifactcache/handler.go
pkg/artifactcache/handler.go
package artifactcache import ( "encoding/json" "errors" "fmt" "io" "net" "net/http" "os" "path/filepath" "regexp" "strconv" "strings" "sync/atomic" "time" "github.com/julienschmidt/httprouter" "github.com/sirupsen/logrus" "github.com/timshannon/bolthold" "go.etcd.io/bbolt" "github.com/nektos/act/pkg/common" ) const ( urlBase = "/_apis/artifactcache" ) type Handler struct { dir string storage *Storage router *httprouter.Router listener net.Listener server *http.Server logger logrus.FieldLogger gcing atomic.Bool gcAt time.Time outboundIP string customExternalURL string } func StartHandler(dir, customExternalURL string, outboundIP string, port uint16, logger logrus.FieldLogger) (*Handler, error) { h := &Handler{} if logger == nil { discard := logrus.New() discard.Out = io.Discard logger = discard } logger = logger.WithField("module", "artifactcache") h.logger = logger if dir == "" { home, err := os.UserHomeDir() if err != nil { return nil, err } dir = filepath.Join(home, ".cache", "actcache") } if err := os.MkdirAll(dir, 0o755); err != nil { return nil, err } h.dir = dir storage, err := NewStorage(filepath.Join(dir, "cache")) if err != nil { return nil, err } h.storage = storage if customExternalURL != "" { h.customExternalURL = customExternalURL } if outboundIP != "" { h.outboundIP = outboundIP } else if ip := common.GetOutboundIP(); ip == nil { return nil, fmt.Errorf("unable to determine outbound IP address") } else { h.outboundIP = ip.String() } router := httprouter.New() router.GET(urlBase+"/cache", h.middleware(h.find)) router.POST(urlBase+"/caches", h.middleware(h.reserve)) router.PATCH(urlBase+"/caches/:id", h.middleware(h.upload)) router.POST(urlBase+"/caches/:id", h.middleware(h.commit)) router.GET(urlBase+"/artifacts/:id", h.middleware(h.get)) router.POST(urlBase+"/clean", h.middleware(h.clean)) h.router = router h.gcCache() listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces if err != nil { return nil, err } server := &http.Server{ ReadHeaderTimeout: 2 * time.Second, Handler: router, } go func() { if err := server.Serve(listener); err != nil && errors.Is(err, net.ErrClosed) { logger.Errorf("http serve: %v", err) } }() h.listener = listener h.server = server return h, nil } func (h *Handler) GetActualPort() int { return h.listener.Addr().(*net.TCPAddr).Port } func (h *Handler) ExternalURL() string { if h.customExternalURL != "" { return h.customExternalURL } return fmt.Sprintf("http://%s:%d", h.outboundIP, h.GetActualPort()) } func (h *Handler) Close() error { if h == nil { return nil } var retErr error if h.server != nil { err := h.server.Close() if err != nil { retErr = err } h.server = nil } if h.listener != nil { err := h.listener.Close() if errors.Is(err, net.ErrClosed) { err = nil } if err != nil { retErr = err } h.listener = nil } return retErr } func (h *Handler) openDB() (*bolthold.Store, error) { return bolthold.Open(filepath.Join(h.dir, "bolt.db"), 0o644, &bolthold.Options{ Encoder: json.Marshal, Decoder: json.Unmarshal, Options: &bbolt.Options{ Timeout: 5 * time.Second, NoGrowSync: bbolt.DefaultOptions.NoGrowSync, FreelistType: bbolt.DefaultOptions.FreelistType, }, }) } // GET /_apis/artifactcache/cache func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { keys := strings.Split(r.URL.Query().Get("keys"), ",") // cache keys are case insensitive for i, key := range keys { keys[i] = strings.ToLower(key) } version := r.URL.Query().Get("version") db, err := h.openDB() if err != nil { h.responseJSON(w, r, 500, err) return } defer db.Close() cache, err := findCache(db, keys, version) if err != nil { h.responseJSON(w, r, 500, err) return } if cache == nil { h.responseJSON(w, r, 204) return } if ok, err := h.storage.Exist(cache.ID); err != nil { h.responseJSON(w, r, 500, err) return } else if !ok { _ = db.Delete(cache.ID, cache) h.responseJSON(w, r, 204) return } h.responseJSON(w, r, 200, map[string]any{ "result": "hit", "archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID), "cacheKey": cache.Key, }) } // POST /_apis/artifactcache/caches func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { api := &Request{} if err := json.NewDecoder(r.Body).Decode(api); err != nil { h.responseJSON(w, r, 400, err) return } // cache keys are case insensitive api.Key = strings.ToLower(api.Key) cache := api.ToCache() db, err := h.openDB() if err != nil { h.responseJSON(w, r, 500, err) return } defer db.Close() now := time.Now().Unix() cache.CreatedAt = now cache.UsedAt = now if err := insertCache(db, cache); err != nil { h.responseJSON(w, r, 500, err) return } h.responseJSON(w, r, 200, map[string]any{ "cacheId": cache.ID, }) } // PATCH /_apis/artifactcache/caches/:id func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) { id, err := strconv.ParseUint(params.ByName("id"), 10, 64) if err != nil { h.responseJSON(w, r, 400, err) return } cache := &Cache{} db, err := h.openDB() if err != nil { h.responseJSON(w, r, 500, err) return } defer db.Close() if err := db.Get(id, cache); err != nil { if errors.Is(err, bolthold.ErrNotFound) { h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id)) return } h.responseJSON(w, r, 500, err) return } if cache.Complete { h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key)) return } db.Close() start, _, err := parseContentRange(r.Header.Get("Content-Range")) if err != nil { h.responseJSON(w, r, 400, err) return } if err := h.storage.Write(cache.ID, start, r.Body); err != nil { h.responseJSON(w, r, 500, err) } h.useCache(id) h.responseJSON(w, r, 200) } // POST /_apis/artifactcache/caches/:id func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) { id, err := strconv.ParseInt(params.ByName("id"), 10, 64) if err != nil { h.responseJSON(w, r, 400, err) return } cache := &Cache{} db, err := h.openDB() if err != nil { h.responseJSON(w, r, 500, err) return } defer db.Close() if err := db.Get(id, cache); err != nil { if errors.Is(err, bolthold.ErrNotFound) { h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id)) return } h.responseJSON(w, r, 500, err) return } if cache.Complete { h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key)) return } db.Close() size, err := h.storage.Commit(cache.ID, cache.Size) if err != nil { h.responseJSON(w, r, 500, err) return } // write real size back to cache, it may be different from the current value when the request doesn't specify it. cache.Size = size db, err = h.openDB() if err != nil { h.responseJSON(w, r, 500, err) return } defer db.Close() cache.Complete = true if err := db.Update(cache.ID, cache); err != nil { h.responseJSON(w, r, 500, err) return } h.responseJSON(w, r, 200) } // GET /_apis/artifactcache/artifacts/:id func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) { id, err := strconv.ParseUint(params.ByName("id"), 10, 64) if err != nil { h.responseJSON(w, r, 400, err) return } h.useCache(id) h.storage.Serve(w, r, id) } // POST /_apis/artifactcache/clean func (h *Handler) clean(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { // TODO: don't support force deleting cache entries // see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries h.responseJSON(w, r, 200) } func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle { return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) { h.logger.Debugf("%s %s", r.Method, r.RequestURI) handler(w, r, params) go h.gcCache() } } // if not found, return (nil, nil) instead of an error. func findCache(db *bolthold.Store, keys []string, version string) (*Cache, error) { cache := &Cache{} for _, prefix := range keys { // if a key in the list matches exactly, don't return partial matches if err := db.FindOne(cache, bolthold.Where("Key").Eq(prefix). And("Version").Eq(version). And("Complete").Eq(true). SortBy("CreatedAt").Reverse()); err == nil || !errors.Is(err, bolthold.ErrNotFound) { if err != nil { return nil, fmt.Errorf("find cache: %w", err) } return cache, nil } prefixPattern := fmt.Sprintf("^%s", regexp.QuoteMeta(prefix)) re, err := regexp.Compile(prefixPattern) if err != nil { continue } if err := db.FindOne(cache, bolthold.Where("Key").RegExp(re). And("Version").Eq(version). And("Complete").Eq(true). SortBy("CreatedAt").Reverse()); err != nil { if errors.Is(err, bolthold.ErrNotFound) { continue } return nil, fmt.Errorf("find cache: %w", err) } return cache, nil } return nil, nil } func insertCache(db *bolthold.Store, cache *Cache) error { if err := db.Insert(bolthold.NextSequence(), cache); err != nil { return fmt.Errorf("insert cache: %w", err) } // write back id to db if err := db.Update(cache.ID, cache); err != nil { return fmt.Errorf("write back id to db: %w", err) } return nil } func (h *Handler) useCache(id uint64) { db, err := h.openDB() if err != nil { return } defer db.Close() cache := &Cache{} if err := db.Get(id, cache); err != nil { return } cache.UsedAt = time.Now().Unix() _ = db.Update(cache.ID, cache) } const ( keepUsed = 30 * 24 * time.Hour keepUnused = 7 * 24 * time.Hour keepTemp = 5 * time.Minute keepOld = 5 * time.Minute ) func (h *Handler) gcCache() { if h.gcing.Load() { return } if !h.gcing.CompareAndSwap(false, true) { return } defer h.gcing.Store(false) if time.Since(h.gcAt) < time.Hour { h.logger.Debugf("skip gc: %v", h.gcAt.String()) return } h.gcAt = time.Now() h.logger.Debugf("gc: %v", h.gcAt.String()) db, err := h.openDB() if err != nil { return } defer db.Close() // Remove the caches which are not completed for a while, they are most likely to be broken. var caches []*Cache if err := db.Find(&caches, bolthold. Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix()). And("Complete").Eq(false), ); err != nil { h.logger.Warnf("find caches: %v", err) } else { for _, cache := range caches { h.storage.Remove(cache.ID) if err := db.Delete(cache.ID, cache); err != nil { h.logger.Warnf("delete cache: %v", err) continue } h.logger.Infof("deleted cache: %+v", cache) } } // Remove the old caches which have not been used recently. caches = caches[:0] if err := db.Find(&caches, bolthold. Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix()), ); err != nil { h.logger.Warnf("find caches: %v", err) } else { for _, cache := range caches { h.storage.Remove(cache.ID) if err := db.Delete(cache.ID, cache); err != nil { h.logger.Warnf("delete cache: %v", err) continue } h.logger.Infof("deleted cache: %+v", cache) } } // Remove the old caches which are too old. caches = caches[:0] if err := db.Find(&caches, bolthold. Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix()), ); err != nil { h.logger.Warnf("find caches: %v", err) } else { for _, cache := range caches { h.storage.Remove(cache.ID) if err := db.Delete(cache.ID, cache); err != nil { h.logger.Warnf("delete cache: %v", err) continue } h.logger.Infof("deleted cache: %+v", cache) } } // Remove the old caches with the same key and version, keep the latest one. // Also keep the olds which have been used recently for a while in case of the cache is still in use. if results, err := db.FindAggregate( &Cache{}, bolthold.Where("Complete").Eq(true), "Key", "Version", ); err != nil { h.logger.Warnf("find aggregate caches: %v", err) } else { for _, result := range results { if result.Count() <= 1 { continue } result.Sort("CreatedAt") caches = caches[:0] result.Reduction(&caches) for _, cache := range caches[:len(caches)-1] { if time.Since(time.Unix(cache.UsedAt, 0)) < keepOld { // Keep it since it has been used recently, even if it's old. // Or it could break downloading in process. continue } h.storage.Remove(cache.ID) if err := db.Delete(cache.ID, cache); err != nil { h.logger.Warnf("delete cache: %v", err) continue } h.logger.Infof("deleted cache: %+v", cache) } } } } func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int, v ...any) { w.Header().Set("Content-Type", "application/json; charset=utf-8") var data []byte if len(v) == 0 || v[0] == nil { data, _ = json.Marshal(struct{}{}) } else if err, ok := v[0].(error); ok { h.logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err) data, _ = json.Marshal(map[string]any{ "error": err.Error(), }) } else { data, _ = json.Marshal(v[0]) } w.WriteHeader(code) _, _ = w.Write(data) } func parseContentRange(s string) (int64, int64, error) { // support the format like "bytes 11-22/*" only s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/") s1, s2, _ := strings.Cut(s, "-") start, err := strconv.ParseInt(s1, 10, 64) if err != nil { return 0, 0, fmt.Errorf("parse %q: %w", s, err) } stop, err := strconv.ParseInt(s2, 10, 64) if err != nil { return 0, 0, fmt.Errorf("parse %q: %w", s, err) } return start, stop, nil }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifactcache/doc.go
pkg/artifactcache/doc.go
// Package artifactcache provides a cache handler for the runner. // // Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server // // TODO: Authorization // TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache // TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries package artifactcache
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifacts/artifacts_v4.go
pkg/artifacts/artifacts_v4.go
// Copyright 2024 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package artifacts // GitHub Actions Artifacts V4 API Simple Description // // 1. Upload artifact // 1.1. CreateArtifact // Post: /twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact // Request: // { // "workflow_run_backend_id": "21", // "workflow_job_run_backend_id": "49", // "name": "test", // "version": 4 // } // Response: // { // "ok": true, // "signedUploadUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75" // } // 1.2. Upload Zip Content to Blobstorage (unauthenticated request) // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block // 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock // 1.4. Unknown xml payload to Blobstorage (unauthenticated request), ignored for now // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList // 1.5. FinalizeArtifact // Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact // Request // { // "workflow_run_backend_id": "21", // "workflow_job_run_backend_id": "49", // "name": "test", // "size": "2097", // "hash": "sha256:b6325614d5649338b87215d9536b3c0477729b8638994c74cdefacb020a2cad4" // } // Response // { // "ok": true, // "artifactId": "4" // } // 2. Download artifact // 2.1. ListArtifacts and optionally filter by artifact exact name or id // Post: /twirp/github.actions.results.api.v1.ArtifactService/ListArtifacts // Request // { // "workflow_run_backend_id": "21", // "workflow_job_run_backend_id": "49", // "name_filter": "test" // } // Response // { // "artifacts": [ // { // "workflowRunBackendId": "21", // "workflowJobRunBackendId": "49", // "databaseId": "4", // "name": "test", // "size": "2093", // "createdAt": "2024-01-23T00:13:28Z" // } // ] // } // 2.2. GetSignedArtifactURL get the URL to download the artifact zip file of a specific artifact // Post: /twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL // Request // { // "workflow_run_backend_id": "21", // "workflow_job_run_backend_id": "49", // "name": "test" // } // Response // { // "signedUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76" // } // 2.3. Download Zip from Blobstorage (unauthenticated request) // GET: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76 import ( "crypto/hmac" "crypto/sha256" "encoding/base64" "errors" "fmt" "hash/fnv" "io" "io/fs" "net/http" "net/url" "os" "path" "strconv" "strings" "time" "github.com/julienschmidt/httprouter" log "github.com/sirupsen/logrus" "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/known/timestamppb" ) const ( ArtifactV4RouteBase = "/twirp/github.actions.results.api.v1.ArtifactService" ArtifactV4ContentEncoding = "application/zip" ) type artifactV4Routes struct { prefix string fs WriteFS rfs fs.FS AppURL string baseDir string } type ArtifactContext struct { Req *http.Request Resp http.ResponseWriter } func artifactNameToID(s string) int64 { h := fnv.New32a() h.Write([]byte(s)) return int64(h.Sum32()) } func (c ArtifactContext) Error(status int, _ ...interface{}) { c.Resp.WriteHeader(status) } func (c ArtifactContext) JSON(status int, _ ...interface{}) { c.Resp.WriteHeader(status) } func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (interface{}, int64, bool) { runID, err := strconv.ParseInt(rawRunID, 10, 64) if err != nil /* || task.Job.RunID != runID*/ { log.Error("Error runID not match") ctx.Error(http.StatusBadRequest, "run-id does not match") return nil, 0, false } return nil, runID, true } func RoutesV4(router *httprouter.Router, baseDir string, fsys WriteFS, rfs fs.FS) { route := &artifactV4Routes{ fs: fsys, rfs: rfs, baseDir: baseDir, prefix: ArtifactV4RouteBase, } router.POST(path.Join(ArtifactV4RouteBase, "CreateArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { route.AppURL = r.Host route.createArtifact(&ArtifactContext{ Req: r, Resp: w, }) }) router.POST(path.Join(ArtifactV4RouteBase, "FinalizeArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { route.finalizeArtifact(&ArtifactContext{ Req: r, Resp: w, }) }) router.POST(path.Join(ArtifactV4RouteBase, "ListArtifacts"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { route.listArtifacts(&ArtifactContext{ Req: r, Resp: w, }) }) router.POST(path.Join(ArtifactV4RouteBase, "GetSignedArtifactURL"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { route.AppURL = r.Host route.getSignedArtifactURL(&ArtifactContext{ Req: r, Resp: w, }) }) router.POST(path.Join(ArtifactV4RouteBase, "DeleteArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { route.AppURL = r.Host route.deleteArtifact(&ArtifactContext{ Req: r, Resp: w, }) }) router.PUT(path.Join(ArtifactV4RouteBase, "UploadArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { route.uploadArtifact(&ArtifactContext{ Req: r, Resp: w, }) }) router.GET(path.Join(ArtifactV4RouteBase, "DownloadArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { route.downloadArtifact(&ArtifactContext{ Req: r, Resp: w, }) }) } func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID int64) []byte { mac := hmac.New(sha256.New, []byte{0xba, 0xdb, 0xee, 0xf0}) mac.Write([]byte(endp)) mac.Write([]byte(expires)) mac.Write([]byte(artifactName)) mac.Write([]byte(fmt.Sprint(taskID))) return mac.Sum(nil) } func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID int64) string { expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST") uploadURL := "http://" + strings.TrimSuffix(r.AppURL, "/") + strings.TrimSuffix(r.prefix, "/") + "/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID) return uploadURL } func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (int64, string, bool) { rawTaskID := ctx.Req.URL.Query().Get("taskID") sig := ctx.Req.URL.Query().Get("sig") expires := ctx.Req.URL.Query().Get("expires") artifactName := ctx.Req.URL.Query().Get("artifactName") dsig, _ := base64.URLEncoding.DecodeString(sig) taskID, _ := strconv.ParseInt(rawTaskID, 10, 64) expecedsig := r.buildSignature(endp, expires, artifactName, taskID) if !hmac.Equal(dsig, expecedsig) { log.Error("Error unauthorized") ctx.Error(http.StatusUnauthorized, "Error unauthorized") return -1, "", false } t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", expires) if err != nil || t.Before(time.Now()) { log.Error("Error link expired") ctx.Error(http.StatusUnauthorized, "Error link expired") return -1, "", false } return taskID, artifactName, true } func (r *artifactV4Routes) parseProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) bool { body, err := io.ReadAll(ctx.Req.Body) if err != nil { log.Errorf("Error decode request body: %v", err) ctx.Error(http.StatusInternalServerError, "Error decode request body") return false } err = protojson.Unmarshal(body, req) if err != nil { log.Errorf("Error decode request body: %v", err) ctx.Error(http.StatusInternalServerError, "Error decode request body") return false } return true } func (r *artifactV4Routes) sendProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) { resp, err := protojson.Marshal(req) if err != nil { log.Errorf("Error encode response body: %v", err) ctx.Error(http.StatusInternalServerError, "Error encode response body") return } ctx.Resp.Header().Set("Content-Type", "application/json;charset=utf-8") ctx.Resp.WriteHeader(http.StatusOK) _, _ = ctx.Resp.Write(resp) } func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) { var req CreateArtifactRequest if ok := r.parseProtbufBody(ctx, &req); !ok { return } _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId) if !ok { return } artifactName := req.Name safeRunPath := safeResolve(r.baseDir, fmt.Sprint(runID)) safePath := safeResolve(safeRunPath, artifactName) safePath = safeResolve(safePath, artifactName+".zip") file, err := r.fs.OpenWritable(safePath) if err != nil { panic(err) } file.Close() respData := CreateArtifactResponse{ Ok: true, SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, runID), } r.sendProtbufBody(ctx, &respData) } func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) { task, artifactName, ok := r.verifySignature(ctx, "UploadArtifact") if !ok { return } comp := ctx.Req.URL.Query().Get("comp") switch comp { case "block", "appendBlock": safeRunPath := safeResolve(r.baseDir, fmt.Sprint(task)) safePath := safeResolve(safeRunPath, artifactName) safePath = safeResolve(safePath, artifactName+".zip") file, err := r.fs.OpenAppendable(safePath) if err != nil { panic(err) } defer file.Close() writer, ok := file.(io.Writer) if !ok { panic(errors.New("File is not writable")) } if ctx.Req.Body == nil { panic(errors.New("No body given")) } _, err = io.Copy(writer, ctx.Req.Body) if err != nil { panic(err) } file.Close() ctx.JSON(http.StatusCreated, "appended") case "blocklist": ctx.JSON(http.StatusCreated, "created") } } func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) { var req FinalizeArtifactRequest if ok := r.parseProtbufBody(ctx, &req); !ok { return } _, _, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId) if !ok { return } respData := FinalizeArtifactResponse{ Ok: true, ArtifactId: artifactNameToID(req.Name), } r.sendProtbufBody(ctx, &respData) } func (r *artifactV4Routes) listArtifacts(ctx *ArtifactContext) { var req ListArtifactsRequest if ok := r.parseProtbufBody(ctx, &req); !ok { return } _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId) if !ok { return } safePath := safeResolve(r.baseDir, fmt.Sprint(runID)) entries, err := fs.ReadDir(r.rfs, safePath) if err != nil { panic(err) } list := []*ListArtifactsResponse_MonolithArtifact{} for _, entry := range entries { id := artifactNameToID(entry.Name()) if (req.NameFilter == nil || req.NameFilter.Value == entry.Name()) && (req.IdFilter == nil || req.IdFilter.Value == id) { data := &ListArtifactsResponse_MonolithArtifact{ Name: entry.Name(), CreatedAt: timestamppb.Now(), DatabaseId: id, WorkflowRunBackendId: req.WorkflowRunBackendId, WorkflowJobRunBackendId: req.WorkflowJobRunBackendId, Size: 0, } if info, err := entry.Info(); err == nil { data.Size = info.Size() data.CreatedAt = timestamppb.New(info.ModTime()) } list = append(list, data) } } respData := ListArtifactsResponse{ Artifacts: list, } r.sendProtbufBody(ctx, &respData) } func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) { var req GetSignedArtifactURLRequest if ok := r.parseProtbufBody(ctx, &req); !ok { return } _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId) if !ok { return } artifactName := req.Name respData := GetSignedArtifactURLResponse{} respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, runID) r.sendProtbufBody(ctx, &respData) } func (r *artifactV4Routes) downloadArtifact(ctx *ArtifactContext) { task, artifactName, ok := r.verifySignature(ctx, "DownloadArtifact") if !ok { return } safeRunPath := safeResolve(r.baseDir, fmt.Sprint(task)) safePath := safeResolve(safeRunPath, artifactName) safePath = safeResolve(safePath, artifactName+".zip") file, _ := r.rfs.Open(safePath) _, _ = io.Copy(ctx.Resp, file) } func (r *artifactV4Routes) deleteArtifact(ctx *ArtifactContext) { var req DeleteArtifactRequest if ok := r.parseProtbufBody(ctx, &req); !ok { return } _, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId) if !ok { return } safeRunPath := safeResolve(r.baseDir, fmt.Sprint(runID)) safePath := safeResolve(safeRunPath, req.Name) _ = os.RemoveAll(safePath) respData := DeleteArtifactResponse{ Ok: true, ArtifactId: artifactNameToID(req.Name), } r.sendProtbufBody(ctx, &respData) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifacts/server_test.go
pkg/artifacts/server_test.go
package artifacts import ( "context" "encoding/json" "fmt" "net/http" "net/http/httptest" "os" "path" "path/filepath" "strings" "testing" "testing/fstest" "github.com/julienschmidt/httprouter" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/nektos/act/pkg/model" "github.com/nektos/act/pkg/runner" ) type writableMapFile struct { fstest.MapFile } func (f *writableMapFile) Write(data []byte) (int, error) { f.Data = data return len(data), nil } func (f *writableMapFile) Close() error { return nil } type writeMapFS struct { fstest.MapFS } func (fsys writeMapFS) OpenWritable(name string) (WritableFile, error) { var file = &writableMapFile{ MapFile: fstest.MapFile{ Data: []byte("content2"), }, } fsys.MapFS[name] = &file.MapFile return file, nil } func (fsys writeMapFS) OpenAppendable(name string) (WritableFile, error) { var file = &writableMapFile{ MapFile: fstest.MapFile{ Data: []byte("content2"), }, } fsys.MapFS[name] = &file.MapFile return file, nil } func TestNewArtifactUploadPrepare(t *testing.T) { assert := assert.New(t) var memfs = fstest.MapFS(map[string]*fstest.MapFile{}) router := httprouter.New() uploads(router, "artifact/server/path", writeMapFS{memfs}) req, _ := http.NewRequest("POST", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil) rr := httptest.NewRecorder() router.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusOK { assert.Fail("Wrong status") } response := FileContainerResourceURL{} err := json.Unmarshal(rr.Body.Bytes(), &response) if err != nil { panic(err) } assert.Equal("http://localhost/upload/1", response.FileContainerResourceURL) } func TestArtifactUploadBlob(t *testing.T) { assert := assert.New(t) var memfs = fstest.MapFS(map[string]*fstest.MapFile{}) router := httprouter.New() uploads(router, "artifact/server/path", writeMapFS{memfs}) req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=some/file", strings.NewReader("content")) rr := httptest.NewRecorder() router.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusOK { assert.Fail("Wrong status") } response := ResponseMessage{} err := json.Unmarshal(rr.Body.Bytes(), &response) if err != nil { panic(err) } assert.Equal("success", response.Message) assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data)) } func TestFinalizeArtifactUpload(t *testing.T) { assert := assert.New(t) var memfs = fstest.MapFS(map[string]*fstest.MapFile{}) router := httprouter.New() uploads(router, "artifact/server/path", writeMapFS{memfs}) req, _ := http.NewRequest("PATCH", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil) rr := httptest.NewRecorder() router.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusOK { assert.Fail("Wrong status") } response := ResponseMessage{} err := json.Unmarshal(rr.Body.Bytes(), &response) if err != nil { panic(err) } assert.Equal("success", response.Message) } func TestListArtifacts(t *testing.T) { assert := assert.New(t) var memfs = fstest.MapFS(map[string]*fstest.MapFile{ "artifact/server/path/1/file.txt": { Data: []byte(""), }, }) router := httprouter.New() downloads(router, "artifact/server/path", memfs) req, _ := http.NewRequest("GET", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil) rr := httptest.NewRecorder() router.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusOK { assert.FailNow(fmt.Sprintf("Wrong status: %d", status)) } response := NamedFileContainerResourceURLResponse{} err := json.Unmarshal(rr.Body.Bytes(), &response) if err != nil { panic(err) } assert.Equal(1, response.Count) assert.Equal("file.txt", response.Value[0].Name) assert.Equal("http://localhost/download/1", response.Value[0].FileContainerResourceURL) } func TestListArtifactContainer(t *testing.T) { assert := assert.New(t) var memfs = fstest.MapFS(map[string]*fstest.MapFile{ "artifact/server/path/1/some/file": { Data: []byte(""), }, }) router := httprouter.New() downloads(router, "artifact/server/path", memfs) req, _ := http.NewRequest("GET", "http://localhost/download/1?itemPath=some/file", nil) rr := httptest.NewRecorder() router.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusOK { assert.FailNow(fmt.Sprintf("Wrong status: %d", status)) } response := ContainerItemResponse{} err := json.Unmarshal(rr.Body.Bytes(), &response) if err != nil { panic(err) } assert.Equal(1, len(response.Value)) assert.Equal("some/file", response.Value[0].Path) assert.Equal("file", response.Value[0].ItemType) assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation) } func TestDownloadArtifactFile(t *testing.T) { assert := assert.New(t) var memfs = fstest.MapFS(map[string]*fstest.MapFile{ "artifact/server/path/1/some/file": { Data: []byte("content"), }, }) router := httprouter.New() downloads(router, "artifact/server/path", memfs) req, _ := http.NewRequest("GET", "http://localhost/artifact/1/some/file", nil) rr := httptest.NewRecorder() router.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusOK { assert.FailNow(fmt.Sprintf("Wrong status: %d", status)) } data := rr.Body.Bytes() assert.Equal("content", string(data)) } type TestJobFileInfo struct { workdir string workflowPath string eventName string errorMessage string platforms map[string]string containerArchitecture string } var ( artifactsPath = path.Join(os.TempDir(), "test-artifacts") artifactsAddr = "127.0.0.1" artifactsPort = "12345" ) func TestArtifactFlow(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } ctx := context.Background() cancel := Serve(ctx, artifactsPath, artifactsAddr, artifactsPort) defer cancel() platforms := map[string]string{ "ubuntu-latest": "node:16-buster", // Don't use node:16-buster-slim because it doesn't have curl command, which is used in the tests } tables := []TestJobFileInfo{ {"testdata", "upload-and-download", "push", "", platforms, ""}, {"testdata", "GHSL-2023-004", "push", "", platforms, ""}, {"testdata", "v4", "push", "", platforms, ""}, } log.SetLevel(log.DebugLevel) for _, table := range tables { runTestJobFile(ctx, t, table) } } func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) { t.Run(tjfi.workflowPath, func(t *testing.T) { fmt.Printf("::group::%s\n", tjfi.workflowPath) if err := os.RemoveAll(artifactsPath); err != nil { panic(err) } workdir, err := filepath.Abs(tjfi.workdir) assert.Nil(t, err, workdir) fullWorkflowPath := filepath.Join(workdir, tjfi.workflowPath) runnerConfig := &runner.Config{ Workdir: workdir, BindWorkdir: false, EventName: tjfi.eventName, Platforms: tjfi.platforms, ReuseContainers: false, ContainerArchitecture: tjfi.containerArchitecture, GitHubInstance: "github.com", ArtifactServerPath: artifactsPath, ArtifactServerAddr: artifactsAddr, ArtifactServerPort: artifactsPort, } runner, err := runner.New(runnerConfig) assert.Nil(t, err, tjfi.workflowPath) planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true, false) assert.Nil(t, err, fullWorkflowPath) plan, err := planner.PlanEvent(tjfi.eventName) if err == nil { err = runner.NewPlanExecutor(plan)(ctx) if tjfi.errorMessage == "" { assert.Nil(t, err, fullWorkflowPath) } else { assert.Error(t, err, tjfi.errorMessage) } } else { assert.Nil(t, plan) } fmt.Println("::endgroup::") }) } func TestMkdirFsImplSafeResolve(t *testing.T) { baseDir := "/foo/bar" tests := map[string]struct { input string want string }{ "simple": {input: "baz", want: "/foo/bar/baz"}, "nested": {input: "baz/blue", want: "/foo/bar/baz/blue"}, "dots in middle": {input: "baz/../../blue", want: "/foo/bar/blue"}, "leading dots": {input: "../../parent", want: "/foo/bar/parent"}, "root path": {input: "/root", want: "/foo/bar/root"}, "root": {input: "/", want: "/foo/bar"}, "empty": {input: "", want: "/foo/bar"}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { assert := assert.New(t) assert.Equal(tc.want, safeResolve(baseDir, tc.input)) }) } } func TestDownloadArtifactFileUnsafePath(t *testing.T) { assert := assert.New(t) var memfs = fstest.MapFS(map[string]*fstest.MapFile{ "artifact/server/path/some/file": { Data: []byte("content"), }, }) router := httprouter.New() downloads(router, "artifact/server/path", memfs) req, _ := http.NewRequest("GET", "http://localhost/artifact/2/../../some/file", nil) rr := httptest.NewRecorder() router.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusOK { assert.FailNow(fmt.Sprintf("Wrong status: %d", status)) } data := rr.Body.Bytes() assert.Equal("content", string(data)) } func TestArtifactUploadBlobUnsafePath(t *testing.T) { assert := assert.New(t) var memfs = fstest.MapFS(map[string]*fstest.MapFile{}) router := httprouter.New() uploads(router, "artifact/server/path", writeMapFS{memfs}) req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=../../some/file", strings.NewReader("content")) rr := httptest.NewRecorder() router.ServeHTTP(rr, req) if status := rr.Code; status != http.StatusOK { assert.Fail("Wrong status") } response := ResponseMessage{} err := json.Unmarshal(rr.Body.Bytes(), &response) if err != nil { panic(err) } assert.Equal("success", response.Message) assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data)) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifacts/artifact.pb.go
pkg/artifacts/artifact.pb.go
// Copyright 2024 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.32.0 // protoc v4.25.2 // source: artifact.proto package artifacts import ( reflect "reflect" sync "sync" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) type CreateArtifactRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"` WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` Version int32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` } func (x *CreateArtifactRequest) Reset() { *x = CreateArtifactRequest{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateArtifactRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateArtifactRequest) ProtoMessage() {} func (x *CreateArtifactRequest) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateArtifactRequest.ProtoReflect.Descriptor instead. func (*CreateArtifactRequest) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{0} } func (x *CreateArtifactRequest) GetWorkflowRunBackendId() string { if x != nil { return x.WorkflowRunBackendId } return "" } func (x *CreateArtifactRequest) GetWorkflowJobRunBackendId() string { if x != nil { return x.WorkflowJobRunBackendId } return "" } func (x *CreateArtifactRequest) GetName() string { if x != nil { return x.Name } return "" } func (x *CreateArtifactRequest) GetExpiresAt() *timestamppb.Timestamp { if x != nil { return x.ExpiresAt } return nil } func (x *CreateArtifactRequest) GetVersion() int32 { if x != nil { return x.Version } return 0 } type CreateArtifactResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` SignedUploadUrl string `protobuf:"bytes,2,opt,name=signed_upload_url,json=signedUploadUrl,proto3" json:"signed_upload_url,omitempty"` } func (x *CreateArtifactResponse) Reset() { *x = CreateArtifactResponse{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateArtifactResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateArtifactResponse) ProtoMessage() {} func (x *CreateArtifactResponse) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateArtifactResponse.ProtoReflect.Descriptor instead. func (*CreateArtifactResponse) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{1} } func (x *CreateArtifactResponse) GetOk() bool { if x != nil { return x.Ok } return false } func (x *CreateArtifactResponse) GetSignedUploadUrl() string { if x != nil { return x.SignedUploadUrl } return "" } type FinalizeArtifactRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"` WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` Hash *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=hash,proto3" json:"hash,omitempty"` } func (x *FinalizeArtifactRequest) Reset() { *x = FinalizeArtifactRequest{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FinalizeArtifactRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*FinalizeArtifactRequest) ProtoMessage() {} func (x *FinalizeArtifactRequest) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FinalizeArtifactRequest.ProtoReflect.Descriptor instead. func (*FinalizeArtifactRequest) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{2} } func (x *FinalizeArtifactRequest) GetWorkflowRunBackendId() string { if x != nil { return x.WorkflowRunBackendId } return "" } func (x *FinalizeArtifactRequest) GetWorkflowJobRunBackendId() string { if x != nil { return x.WorkflowJobRunBackendId } return "" } func (x *FinalizeArtifactRequest) GetName() string { if x != nil { return x.Name } return "" } func (x *FinalizeArtifactRequest) GetSize() int64 { if x != nil { return x.Size } return 0 } func (x *FinalizeArtifactRequest) GetHash() *wrapperspb.StringValue { if x != nil { return x.Hash } return nil } type FinalizeArtifactResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` ArtifactId int64 `protobuf:"varint,2,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"` } func (x *FinalizeArtifactResponse) Reset() { *x = FinalizeArtifactResponse{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FinalizeArtifactResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*FinalizeArtifactResponse) ProtoMessage() {} func (x *FinalizeArtifactResponse) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FinalizeArtifactResponse.ProtoReflect.Descriptor instead. func (*FinalizeArtifactResponse) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{3} } func (x *FinalizeArtifactResponse) GetOk() bool { if x != nil { return x.Ok } return false } func (x *FinalizeArtifactResponse) GetArtifactId() int64 { if x != nil { return x.ArtifactId } return 0 } type ListArtifactsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"` WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"` NameFilter *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=name_filter,json=nameFilter,proto3" json:"name_filter,omitempty"` IdFilter *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=id_filter,json=idFilter,proto3" json:"id_filter,omitempty"` } func (x *ListArtifactsRequest) Reset() { *x = ListArtifactsRequest{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListArtifactsRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListArtifactsRequest) ProtoMessage() {} func (x *ListArtifactsRequest) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListArtifactsRequest.ProtoReflect.Descriptor instead. func (*ListArtifactsRequest) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{4} } func (x *ListArtifactsRequest) GetWorkflowRunBackendId() string { if x != nil { return x.WorkflowRunBackendId } return "" } func (x *ListArtifactsRequest) GetWorkflowJobRunBackendId() string { if x != nil { return x.WorkflowJobRunBackendId } return "" } func (x *ListArtifactsRequest) GetNameFilter() *wrapperspb.StringValue { if x != nil { return x.NameFilter } return nil } func (x *ListArtifactsRequest) GetIdFilter() *wrapperspb.Int64Value { if x != nil { return x.IdFilter } return nil } type ListArtifactsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Artifacts []*ListArtifactsResponse_MonolithArtifact `protobuf:"bytes,1,rep,name=artifacts,proto3" json:"artifacts,omitempty"` } func (x *ListArtifactsResponse) Reset() { *x = ListArtifactsResponse{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListArtifactsResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListArtifactsResponse) ProtoMessage() {} func (x *ListArtifactsResponse) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListArtifactsResponse.ProtoReflect.Descriptor instead. func (*ListArtifactsResponse) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{5} } func (x *ListArtifactsResponse) GetArtifacts() []*ListArtifactsResponse_MonolithArtifact { if x != nil { return x.Artifacts } return nil } type ListArtifactsResponse_MonolithArtifact struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"` WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"` DatabaseId int64 `protobuf:"varint,3,opt,name=database_id,json=databaseId,proto3" json:"database_id,omitempty"` Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` Size int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` CreatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` } func (x *ListArtifactsResponse_MonolithArtifact) Reset() { *x = ListArtifactsResponse_MonolithArtifact{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListArtifactsResponse_MonolithArtifact) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListArtifactsResponse_MonolithArtifact) ProtoMessage() {} func (x *ListArtifactsResponse_MonolithArtifact) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListArtifactsResponse_MonolithArtifact.ProtoReflect.Descriptor instead. func (*ListArtifactsResponse_MonolithArtifact) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{6} } func (x *ListArtifactsResponse_MonolithArtifact) GetWorkflowRunBackendId() string { if x != nil { return x.WorkflowRunBackendId } return "" } func (x *ListArtifactsResponse_MonolithArtifact) GetWorkflowJobRunBackendId() string { if x != nil { return x.WorkflowJobRunBackendId } return "" } func (x *ListArtifactsResponse_MonolithArtifact) GetDatabaseId() int64 { if x != nil { return x.DatabaseId } return 0 } func (x *ListArtifactsResponse_MonolithArtifact) GetName() string { if x != nil { return x.Name } return "" } func (x *ListArtifactsResponse_MonolithArtifact) GetSize() int64 { if x != nil { return x.Size } return 0 } func (x *ListArtifactsResponse_MonolithArtifact) GetCreatedAt() *timestamppb.Timestamp { if x != nil { return x.CreatedAt } return nil } type GetSignedArtifactURLRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"` WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } func (x *GetSignedArtifactURLRequest) Reset() { *x = GetSignedArtifactURLRequest{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetSignedArtifactURLRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetSignedArtifactURLRequest) ProtoMessage() {} func (x *GetSignedArtifactURLRequest) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetSignedArtifactURLRequest.ProtoReflect.Descriptor instead. func (*GetSignedArtifactURLRequest) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{7} } func (x *GetSignedArtifactURLRequest) GetWorkflowRunBackendId() string { if x != nil { return x.WorkflowRunBackendId } return "" } func (x *GetSignedArtifactURLRequest) GetWorkflowJobRunBackendId() string { if x != nil { return x.WorkflowJobRunBackendId } return "" } func (x *GetSignedArtifactURLRequest) GetName() string { if x != nil { return x.Name } return "" } type GetSignedArtifactURLResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields SignedUrl string `protobuf:"bytes,1,opt,name=signed_url,json=signedUrl,proto3" json:"signed_url,omitempty"` } func (x *GetSignedArtifactURLResponse) Reset() { *x = GetSignedArtifactURLResponse{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetSignedArtifactURLResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetSignedArtifactURLResponse) ProtoMessage() {} func (x *GetSignedArtifactURLResponse) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetSignedArtifactURLResponse.ProtoReflect.Descriptor instead. func (*GetSignedArtifactURLResponse) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{8} } func (x *GetSignedArtifactURLResponse) GetSignedUrl() string { if x != nil { return x.SignedUrl } return "" } type DeleteArtifactRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields WorkflowRunBackendId string `protobuf:"bytes,1,opt,name=workflow_run_backend_id,json=workflowRunBackendId,proto3" json:"workflow_run_backend_id,omitempty"` WorkflowJobRunBackendId string `protobuf:"bytes,2,opt,name=workflow_job_run_backend_id,json=workflowJobRunBackendId,proto3" json:"workflow_job_run_backend_id,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } func (x *DeleteArtifactRequest) Reset() { *x = DeleteArtifactRequest{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteArtifactRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteArtifactRequest) ProtoMessage() {} func (x *DeleteArtifactRequest) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteArtifactRequest.ProtoReflect.Descriptor instead. func (*DeleteArtifactRequest) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{9} } func (x *DeleteArtifactRequest) GetWorkflowRunBackendId() string { if x != nil { return x.WorkflowRunBackendId } return "" } func (x *DeleteArtifactRequest) GetWorkflowJobRunBackendId() string { if x != nil { return x.WorkflowJobRunBackendId } return "" } func (x *DeleteArtifactRequest) GetName() string { if x != nil { return x.Name } return "" } type DeleteArtifactResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` ArtifactId int64 `protobuf:"varint,2,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"` } func (x *DeleteArtifactResponse) Reset() { *x = DeleteArtifactResponse{} if protoimpl.UnsafeEnabled { mi := &file_artifact_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteArtifactResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteArtifactResponse) ProtoMessage() {} func (x *DeleteArtifactResponse) ProtoReflect() protoreflect.Message { mi := &file_artifact_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteArtifactResponse.ProtoReflect.Descriptor instead. func (*DeleteArtifactResponse) Descriptor() ([]byte, []int) { return file_artifact_proto_rawDescGZIP(), []int{10} } func (x *DeleteArtifactResponse) GetOk() bool { if x != nil { return x.Ok } return false } func (x *DeleteArtifactResponse) GetArtifactId() int64 { if x != nil { return x.ArtifactId } return 0 } var File_artifact_proto protoreflect.FileDescriptor var file_artifact_proto_rawDesc = []byte{ 0x0a, 0x0e, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf5, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x72, 0x6c, 0x22, 0xe8, 0x01, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0x4b, 0x0a, 0x18, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x22, 0x84, 0x02, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x09, 0x69, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x69, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x7c, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x4d, 0x6f, 0x6e, 0x6f, 0x6c, 0x69, 0x74, 0x68, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x26, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x4d, 0x6f, 0x6e, 0x6f, 0x6c, 0x69, 0x74, 0x68, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0xa6, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x49, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
true
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/artifacts/server.go
pkg/artifacts/server.go
package artifacts import ( "context" "encoding/json" "errors" "fmt" "io" "io/fs" "net/http" "os" "path/filepath" "strings" "time" "github.com/julienschmidt/httprouter" "github.com/nektos/act/pkg/common" ) type FileContainerResourceURL struct { FileContainerResourceURL string `json:"fileContainerResourceUrl"` } type NamedFileContainerResourceURL struct { Name string `json:"name"` FileContainerResourceURL string `json:"fileContainerResourceUrl"` } type NamedFileContainerResourceURLResponse struct { Count int `json:"count"` Value []NamedFileContainerResourceURL `json:"value"` } type ContainerItem struct { Path string `json:"path"` ItemType string `json:"itemType"` ContentLocation string `json:"contentLocation"` } type ContainerItemResponse struct { Value []ContainerItem `json:"value"` } type ResponseMessage struct { Message string `json:"message"` } type WritableFile interface { io.WriteCloser } type WriteFS interface { OpenWritable(name string) (WritableFile, error) OpenAppendable(name string) (WritableFile, error) } type readWriteFSImpl struct { } func (fwfs readWriteFSImpl) Open(name string) (fs.File, error) { return os.Open(name) } func (fwfs readWriteFSImpl) OpenWritable(name string) (WritableFile, error) { if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil { return nil, err } return os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) } func (fwfs readWriteFSImpl) OpenAppendable(name string) (WritableFile, error) { if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil { return nil, err } file, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o644) if err != nil { return nil, err } _, err = file.Seek(0, io.SeekEnd) if err != nil { return nil, err } return file, nil } var gzipExtension = ".gz__" func safeResolve(baseDir string, relPath string) string { return filepath.Join(baseDir, filepath.Clean(filepath.Join(string(os.PathSeparator), relPath))) } func uploads(router *httprouter.Router, baseDir string, fsys WriteFS) { router.POST("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) { runID := params.ByName("runId") json, err := json.Marshal(FileContainerResourceURL{ FileContainerResourceURL: fmt.Sprintf("http://%s/upload/%s", req.Host, runID), }) if err != nil { panic(err) } _, err = w.Write(json) if err != nil { panic(err) } }) router.PUT("/upload/:runId", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) { itemPath := req.URL.Query().Get("itemPath") runID := params.ByName("runId") if req.Header.Get("Content-Encoding") == "gzip" { itemPath += gzipExtension } safeRunPath := safeResolve(baseDir, runID) safePath := safeResolve(safeRunPath, itemPath) file, err := func() (WritableFile, error) { contentRange := req.Header.Get("Content-Range") if contentRange != "" && !strings.HasPrefix(contentRange, "bytes 0-") { return fsys.OpenAppendable(safePath) } return fsys.OpenWritable(safePath) }() if err != nil { panic(err) } defer file.Close() writer, ok := file.(io.Writer) if !ok { panic(errors.New("File is not writable")) } if req.Body == nil { panic(errors.New("No body given")) } _, err = io.Copy(writer, req.Body) if err != nil { panic(err) } json, err := json.Marshal(ResponseMessage{ Message: "success", }) if err != nil { panic(err) } _, err = w.Write(json) if err != nil { panic(err) } }) router.PATCH("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { json, err := json.Marshal(ResponseMessage{ Message: "success", }) if err != nil { panic(err) } _, err = w.Write(json) if err != nil { panic(err) } }) } func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) { router.GET("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) { runID := params.ByName("runId") safePath := safeResolve(baseDir, runID) entries, err := fs.ReadDir(fsys, safePath) if err != nil { panic(err) } var list []NamedFileContainerResourceURL for _, entry := range entries { list = append(list, NamedFileContainerResourceURL{ Name: entry.Name(), FileContainerResourceURL: fmt.Sprintf("http://%s/download/%s", req.Host, runID), }) } json, err := json.Marshal(NamedFileContainerResourceURLResponse{ Count: len(list), Value: list, }) if err != nil { panic(err) } _, err = w.Write(json) if err != nil { panic(err) } }) router.GET("/download/:container", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) { container := params.ByName("container") itemPath := req.URL.Query().Get("itemPath") safePath := safeResolve(baseDir, filepath.Join(container, itemPath)) var files []ContainerItem err := fs.WalkDir(fsys, safePath, func(path string, entry fs.DirEntry, _ error) error { if !entry.IsDir() { rel, err := filepath.Rel(safePath, path) if err != nil { panic(err) } // if it was upload as gzip rel = strings.TrimSuffix(rel, gzipExtension) path := filepath.Join(itemPath, rel) rel = filepath.ToSlash(rel) path = filepath.ToSlash(path) files = append(files, ContainerItem{ Path: path, ItemType: "file", ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel), }) } return nil }) if err != nil { panic(err) } json, err := json.Marshal(ContainerItemResponse{ Value: files, }) if err != nil { panic(err) } _, err = w.Write(json) if err != nil { panic(err) } }) router.GET("/artifact/*path", func(w http.ResponseWriter, _ *http.Request, params httprouter.Params) { path := params.ByName("path")[1:] safePath := safeResolve(baseDir, path) file, err := fsys.Open(safePath) if err != nil { // try gzip file file, err = fsys.Open(safePath + gzipExtension) if err != nil { panic(err) } w.Header().Add("Content-Encoding", "gzip") } _, err = io.Copy(w, file) if err != nil { panic(err) } }) } func Serve(ctx context.Context, artifactPath string, addr string, port string) context.CancelFunc { serverContext, cancel := context.WithCancel(ctx) logger := common.Logger(serverContext) if artifactPath == "" { return cancel } router := httprouter.New() logger.Debugf("Artifacts base path '%s'", artifactPath) fsys := readWriteFSImpl{} uploads(router, artifactPath, fsys) downloads(router, artifactPath, fsys) RoutesV4(router, artifactPath, fsys, fsys) server := &http.Server{ Addr: fmt.Sprintf("%s:%s", addr, port), ReadHeaderTimeout: 2 * time.Second, Handler: router, } // run server go func() { logger.Infof("Start server on http://%s:%s", addr, port) if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { logger.Fatal(err) } }() // wait for cancel to gracefully shutdown server go func() { <-serverContext.Done() if err := server.Shutdown(ctx); err != nil { logger.Errorf("Failed shutdown gracefully - force shutdown: %v", err) server.Close() } }() return cancel }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/file.go
pkg/common/file.go
package common import ( "fmt" "io" "os" ) // CopyFile copy file func CopyFile(source string, dest string) (err error) { sourcefile, err := os.Open(source) if err != nil { return err } defer sourcefile.Close() destfile, err := os.Create(dest) if err != nil { return err } defer destfile.Close() _, err = io.Copy(destfile, sourcefile) if err == nil { sourceinfo, err := os.Stat(source) if err != nil { _ = os.Chmod(dest, sourceinfo.Mode()) } } return } // CopyDir recursive copy of directory func CopyDir(source string, dest string) (err error) { // get properties of source dir sourceinfo, err := os.Stat(source) if err != nil { return err } // create dest dir err = os.MkdirAll(dest, sourceinfo.Mode()) if err != nil { return err } objects, err := os.ReadDir(source) for _, obj := range objects { sourcefilepointer := source + "/" + obj.Name() destinationfilepointer := dest + "/" + obj.Name() if obj.IsDir() { // create sub-directories - recursively err = CopyDir(sourcefilepointer, destinationfilepointer) if err != nil { fmt.Println(err) } } else { // perform copy err = CopyFile(sourcefilepointer, destinationfilepointer) if err != nil { fmt.Println(err) } } } return err }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/executor_test.go
pkg/common/executor_test.go
package common import ( "context" "fmt" "testing" "time" "github.com/stretchr/testify/assert" ) func TestNewWorkflow(t *testing.T) { assert := assert.New(t) ctx := context.Background() // empty emptyWorkflow := NewPipelineExecutor() assert.Nil(emptyWorkflow(ctx)) // error case errorWorkflow := NewErrorExecutor(fmt.Errorf("test error")) assert.NotNil(errorWorkflow(ctx)) // multiple success case runcount := 0 successWorkflow := NewPipelineExecutor( func(_ context.Context) error { runcount++ return nil }, func(_ context.Context) error { runcount++ return nil }) assert.Nil(successWorkflow(ctx)) assert.Equal(2, runcount) } func TestNewConditionalExecutor(t *testing.T) { assert := assert.New(t) ctx := context.Background() trueCount := 0 falseCount := 0 err := NewConditionalExecutor(func(_ context.Context) bool { return false }, func(_ context.Context) error { trueCount++ return nil }, func(_ context.Context) error { falseCount++ return nil })(ctx) assert.Nil(err) assert.Equal(0, trueCount) assert.Equal(1, falseCount) err = NewConditionalExecutor(func(_ context.Context) bool { return true }, func(_ context.Context) error { trueCount++ return nil }, func(_ context.Context) error { falseCount++ return nil })(ctx) assert.Nil(err) assert.Equal(1, trueCount) assert.Equal(1, falseCount) } func TestNewParallelExecutor(t *testing.T) { assert := assert.New(t) ctx := context.Background() count := 0 activeCount := 0 maxCount := 0 emptyWorkflow := NewPipelineExecutor(func(_ context.Context) error { count++ activeCount++ if activeCount > maxCount { maxCount = activeCount } time.Sleep(2 * time.Second) activeCount-- return nil }) err := NewParallelExecutor(2, emptyWorkflow, emptyWorkflow, emptyWorkflow)(ctx) assert.Equal(3, count, "should run all 3 executors") assert.Equal(2, maxCount, "should run at most 2 executors in parallel") assert.Nil(err) // Reset to test running the executor with 0 parallelism count = 0 activeCount = 0 maxCount = 0 errSingle := NewParallelExecutor(0, emptyWorkflow, emptyWorkflow, emptyWorkflow)(ctx) assert.Equal(3, count, "should run all 3 executors") assert.Equal(1, maxCount, "should run at most 1 executors in parallel") assert.Nil(errSingle) } func TestNewParallelExecutorFailed(t *testing.T) { assert := assert.New(t) ctx, cancel := context.WithCancel(context.Background()) cancel() count := 0 errorWorkflow := NewPipelineExecutor(func(_ context.Context) error { count++ return fmt.Errorf("fake error") }) err := NewParallelExecutor(1, errorWorkflow)(ctx) assert.Equal(1, count) assert.ErrorIs(context.Canceled, err) } func TestNewParallelExecutorCanceled(t *testing.T) { assert := assert.New(t) ctx, cancel := context.WithCancel(context.Background()) cancel() errExpected := fmt.Errorf("fake error") count := 0 successWorkflow := NewPipelineExecutor(func(_ context.Context) error { count++ return nil }) errorWorkflow := NewPipelineExecutor(func(_ context.Context) error { count++ return errExpected }) err := NewParallelExecutor(3, errorWorkflow, successWorkflow, successWorkflow)(ctx) assert.Equal(3, count) assert.Error(errExpected, err) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/context_test.go
pkg/common/context_test.go
package common import ( "context" "os" "syscall" "testing" "time" "github.com/stretchr/testify/assert" ) func TestGracefulJobCancellationViaSigint(t *testing.T) { ctx, cancel, channel := createGracefulJobCancellationContext() defer cancel() assert.NotNil(t, ctx) assert.NotNil(t, cancel) assert.NotNil(t, channel) cancelCtx := JobCancelContext(ctx) assert.NotNil(t, cancelCtx) assert.NoError(t, ctx.Err()) assert.NoError(t, cancelCtx.Err()) channel <- os.Interrupt select { case <-time.After(1 * time.Second): t.Fatal("context not canceled") case <-cancelCtx.Done(): case <-ctx.Done(): } if assert.Error(t, cancelCtx.Err(), "context canceled") { assert.Equal(t, context.Canceled, cancelCtx.Err()) } assert.NoError(t, ctx.Err()) channel <- os.Interrupt select { case <-time.After(1 * time.Second): t.Fatal("context not canceled") case <-ctx.Done(): } if assert.Error(t, ctx.Err(), "context canceled") { assert.Equal(t, context.Canceled, ctx.Err()) } } func TestForceCancellationViaSigterm(t *testing.T) { ctx, cancel, channel := createGracefulJobCancellationContext() defer cancel() assert.NotNil(t, ctx) assert.NotNil(t, cancel) assert.NotNil(t, channel) cancelCtx := JobCancelContext(ctx) assert.NotNil(t, cancelCtx) assert.NoError(t, ctx.Err()) assert.NoError(t, cancelCtx.Err()) channel <- syscall.SIGTERM select { case <-time.After(1 * time.Second): t.Fatal("context not canceled") case <-cancelCtx.Done(): } select { case <-time.After(1 * time.Second): t.Fatal("context not canceled") case <-ctx.Done(): } if assert.Error(t, ctx.Err(), "context canceled") { assert.Equal(t, context.Canceled, ctx.Err()) } if assert.Error(t, cancelCtx.Err(), "context canceled") { assert.Equal(t, context.Canceled, cancelCtx.Err()) } } func TestCreateGracefulJobCancellationContext(t *testing.T) { ctx, cancel := CreateGracefulJobCancellationContext() defer cancel() assert.NotNil(t, ctx) assert.NotNil(t, cancel) cancelCtx := JobCancelContext(ctx) assert.NotNil(t, cancelCtx) assert.NoError(t, cancelCtx.Err()) } func TestCreateGracefulJobCancellationContextCancelFunc(t *testing.T) { ctx, cancel := CreateGracefulJobCancellationContext() assert.NotNil(t, ctx) assert.NotNil(t, cancel) cancelCtx := JobCancelContext(ctx) assert.NotNil(t, cancelCtx) assert.NoError(t, cancelCtx.Err()) cancel() if assert.Error(t, ctx.Err(), "context canceled") { assert.Equal(t, context.Canceled, ctx.Err()) } if assert.Error(t, cancelCtx.Err(), "context canceled") { assert.Equal(t, context.Canceled, cancelCtx.Err()) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/outbound_ip.go
pkg/common/outbound_ip.go
package common import ( "net" "sort" "strings" ) // GetOutboundIP returns an outbound IP address of this machine. // It tries to access the internet and returns the local IP address of the connection. // If the machine cannot access the internet, it returns a preferred IP address from network interfaces. // It returns nil if no IP address is found. func GetOutboundIP() net.IP { // See https://stackoverflow.com/a/37382208 conn, err := net.Dial("udp", "8.8.8.8:80") if err == nil { defer conn.Close() return conn.LocalAddr().(*net.UDPAddr).IP } // So the machine cannot access the internet. Pick an IP address from network interfaces. if ifs, err := net.Interfaces(); err == nil { type IP struct { net.IP net.Interface } var ips []IP for _, i := range ifs { if addrs, err := i.Addrs(); err == nil { for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: ip = v.IP case *net.IPAddr: ip = v.IP } if ip.IsGlobalUnicast() { ips = append(ips, IP{ip, i}) } } } } if len(ips) > 1 { sort.Slice(ips, func(i, j int) bool { ifi := ips[i].Interface ifj := ips[j].Interface // ethernet is preferred if vi, vj := strings.HasPrefix(ifi.Name, "e"), strings.HasPrefix(ifj.Name, "e"); vi != vj { return vi } ipi := ips[i].IP ipj := ips[j].IP // IPv4 is preferred if vi, vj := ipi.To4() != nil, ipj.To4() != nil; vi != vj { return vi } // en0 is preferred to en1 if ifi.Name != ifj.Name { return ifi.Name < ifj.Name } // fallback return ipi.String() < ipj.String() }) return ips[0].IP } } return nil }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/line_writer.go
pkg/common/line_writer.go
package common import ( "bytes" "io" ) // LineHandler is a callback function for handling a line type LineHandler func(line string) bool type lineWriter struct { buffer bytes.Buffer handlers []LineHandler } // NewLineWriter creates a new instance of a line writer func NewLineWriter(handlers ...LineHandler) io.Writer { w := new(lineWriter) w.handlers = handlers return w } func (lw *lineWriter) Write(p []byte) (n int, err error) { pBuf := bytes.NewBuffer(p) written := 0 for { line, err := pBuf.ReadString('\n') w, _ := lw.buffer.WriteString(line) written += w if err == nil { lw.handleLine(lw.buffer.String()) lw.buffer.Reset() } else if err == io.EOF { break } else { return written, err } } return written, nil } func (lw *lineWriter) handleLine(line string) { for _, h := range lw.handlers { ok := h(line) if !ok { break } } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/cartesian_test.go
pkg/common/cartesian_test.go
package common import ( "testing" "github.com/stretchr/testify/assert" ) func TestCartesianProduct(t *testing.T) { assert := assert.New(t) input := map[string][]interface{}{ "foo": {1, 2, 3, 4}, "bar": {"a", "b", "c"}, "baz": {false, true}, } output := CartesianProduct(input) assert.Len(output, 24) for _, v := range output { assert.Len(v, 3) assert.Contains(v, "foo") assert.Contains(v, "bar") assert.Contains(v, "baz") } input = map[string][]interface{}{ "foo": {1, 2, 3, 4}, "bar": {}, "baz": {false, true}, } output = CartesianProduct(input) assert.Len(output, 0) input = map[string][]interface{}{} output = CartesianProduct(input) assert.Len(output, 0) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/cartesian.go
pkg/common/cartesian.go
package common // CartesianProduct takes map of lists and returns list of unique tuples func CartesianProduct(mapOfLists map[string][]interface{}) []map[string]interface{} { listNames := make([]string, 0) lists := make([][]interface{}, 0) for k, v := range mapOfLists { listNames = append(listNames, k) lists = append(lists, v) } listCart := cartN(lists...) rtn := make([]map[string]interface{}, 0) for _, list := range listCart { vMap := make(map[string]interface{}) for i, v := range list { vMap[listNames[i]] = v } rtn = append(rtn, vMap) } return rtn } func cartN(a ...[]interface{}) [][]interface{} { c := 1 for _, a := range a { c *= len(a) } if c == 0 || len(a) == 0 { return nil } p := make([][]interface{}, c) b := make([]interface{}, c*len(a)) n := make([]int, len(a)) s := 0 for i := range p { e := s + len(a) pi := b[s:e] p[i] = pi s = e for j, n := range n { pi[j] = a[j][n] } for j := len(n) - 1; j >= 0; j-- { n[j]++ if n[j] < len(a[j]) { break } n[j] = 0 } } return p }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/line_writer_test.go
pkg/common/line_writer_test.go
package common import ( "testing" "github.com/stretchr/testify/assert" ) func TestLineWriter(t *testing.T) { lines := make([]string, 0) lineHandler := func(s string) bool { lines = append(lines, s) return true } lineWriter := NewLineWriter(lineHandler) assert := assert.New(t) write := func(s string) { n, err := lineWriter.Write([]byte(s)) assert.NoError(err) assert.Equal(len(s), n, s) } write("hello") write(" ") write("world!!\nextra") write(" line\n and another\nlast") write(" line\n") write("no newline here...") assert.Len(lines, 4) assert.Equal("hello world!!\n", lines[0]) assert.Equal("extra line\n", lines[1]) assert.Equal(" and another\n", lines[2]) assert.Equal("last line\n", lines[3]) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/job_error.go
pkg/common/job_error.go
package common import ( "context" ) type jobErrorContextKey string const jobErrorContextKeyVal = jobErrorContextKey("job.error") type jobCancelCtx string const JobCancelCtxVal = jobCancelCtx("job.cancel") // JobError returns the job error for current context if any func JobError(ctx context.Context) error { val := ctx.Value(jobErrorContextKeyVal) if val != nil { if container, ok := val.(map[string]error); ok { return container["error"] } } return nil } func SetJobError(ctx context.Context, err error) { ctx.Value(jobErrorContextKeyVal).(map[string]error)["error"] = err } // WithJobErrorContainer adds a value to the context as a container for an error func WithJobErrorContainer(ctx context.Context) context.Context { container := map[string]error{} return context.WithValue(ctx, jobErrorContextKeyVal, container) } func WithJobCancelContext(ctx context.Context, cancelContext context.Context) context.Context { return context.WithValue(ctx, JobCancelCtxVal, cancelContext) } func JobCancelContext(ctx context.Context) context.Context { val := ctx.Value(JobCancelCtxVal) if val != nil { if container, ok := val.(context.Context); ok { return container } } return nil } // EarlyCancelContext returns a new context based on ctx that is canceled when the first of the provided contexts is canceled. func EarlyCancelContext(ctx context.Context) (context.Context, context.CancelFunc) { val := JobCancelContext(ctx) if val != nil { context, cancel := context.WithCancel(ctx) go func() { defer cancel() select { case <-context.Done(): case <-ctx.Done(): case <-val.Done(): } }() return context, cancel } return ctx, func() {} }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/auth.go
pkg/common/auth.go
// Copyright 2024 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package common import ( "encoding/json" "fmt" "net/http" "strings" "time" "github.com/golang-jwt/jwt/v5" log "github.com/sirupsen/logrus" ) type actionsClaims struct { jwt.RegisteredClaims Scp string `json:"scp"` TaskID int64 RunID int64 JobID int64 Ac string `json:"ac"` } type actionsCacheScope struct { Scope string Permission actionsCachePermission } type actionsCachePermission int const ( actionsCachePermissionRead = 1 << iota actionsCachePermissionWrite ) func CreateAuthorizationToken(taskID, runID, jobID int64) (string, error) { now := time.Now() ac, err := json.Marshal(&[]actionsCacheScope{ { Scope: "", Permission: actionsCachePermissionWrite, }, }) if err != nil { return "", err } claims := actionsClaims{ RegisteredClaims: jwt.RegisteredClaims{ ExpiresAt: jwt.NewNumericDate(now.Add(24 * time.Hour)), NotBefore: jwt.NewNumericDate(now), }, Scp: fmt.Sprintf("Actions.Results:%d:%d", runID, jobID), TaskID: taskID, RunID: runID, JobID: jobID, Ac: string(ac), } token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) tokenString, err := token.SignedString([]byte{}) if err != nil { return "", err } return tokenString, nil } func ParseAuthorizationToken(req *http.Request) (int64, error) { h := req.Header.Get("Authorization") if h == "" { return 0, nil } parts := strings.SplitN(h, " ", 2) if len(parts) != 2 { log.Errorf("split token failed: %s", h) return 0, fmt.Errorf("split token failed") } token, err := jwt.ParseWithClaims(parts[1], &actionsClaims{}, func(t *jwt.Token) (any, error) { if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok { return nil, fmt.Errorf("unexpected signing method: %v", t.Header["alg"]) } return []byte{}, nil }) if err != nil { return 0, err } c, ok := token.Claims.(*actionsClaims) if !token.Valid || !ok { return 0, fmt.Errorf("invalid token claim") } return c.TaskID, nil }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/context.go
pkg/common/context.go
package common import ( "context" "os" "os/signal" "syscall" ) func createGracefulJobCancellationContext() (context.Context, func(), chan os.Signal) { ctx := context.Background() ctx, forceCancel := context.WithCancel(ctx) cancelCtx, cancel := context.WithCancel(ctx) ctx = WithJobCancelContext(ctx, cancelCtx) // trap Ctrl+C and call cancel on the context c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { select { case sig := <-c: if sig == os.Interrupt { cancel() select { case <-c: forceCancel() case <-ctx.Done(): } } else { forceCancel() } case <-ctx.Done(): } }() return ctx, func() { signal.Stop(c) forceCancel() cancel() }, c } func CreateGracefulJobCancellationContext() (context.Context, func()) { ctx, cancel, _ := createGracefulJobCancellationContext() return ctx, cancel }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/dryrun.go
pkg/common/dryrun.go
package common import ( "context" ) type dryrunContextKey string const dryrunContextKeyVal = dryrunContextKey("dryrun") // Dryrun returns true if the current context is dryrun func Dryrun(ctx context.Context) bool { val := ctx.Value(dryrunContextKeyVal) if val != nil { if dryrun, ok := val.(bool); ok { return dryrun } } return false } // WithDryrun adds a value to the context for dryrun func WithDryrun(ctx context.Context, dryrun bool) context.Context { return context.WithValue(ctx, dryrunContextKeyVal, dryrun) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/logger.go
pkg/common/logger.go
package common import ( "context" "github.com/sirupsen/logrus" ) type loggerContextKey string const loggerContextKeyVal = loggerContextKey("logrus.FieldLogger") // Logger returns the appropriate logger for current context func Logger(ctx context.Context) logrus.FieldLogger { val := ctx.Value(loggerContextKeyVal) if val != nil { if logger, ok := val.(logrus.FieldLogger); ok { return logger } } return logrus.StandardLogger() } // WithLogger adds a value to the context for the logger func WithLogger(ctx context.Context, logger logrus.FieldLogger) context.Context { return context.WithValue(ctx, loggerContextKeyVal, logger) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/executor.go
pkg/common/executor.go
package common import ( "context" "errors" "fmt" log "github.com/sirupsen/logrus" ) // Warning that implements `error` but safe to ignore type Warning struct { Message string } // Error the contract for error func (w Warning) Error() string { return w.Message } // Warningf create a warning func Warningf(format string, args ...interface{}) Warning { w := Warning{ Message: fmt.Sprintf(format, args...), } return w } // Executor define contract for the steps of a workflow type Executor func(ctx context.Context) error // Conditional define contract for the conditional predicate type Conditional func(ctx context.Context) bool // NewInfoExecutor is an executor that logs messages func NewInfoExecutor(format string, args ...interface{}) Executor { return func(ctx context.Context) error { logger := Logger(ctx) logger.Infof(format, args...) return nil } } // NewDebugExecutor is an executor that logs messages func NewDebugExecutor(format string, args ...interface{}) Executor { return func(ctx context.Context) error { logger := Logger(ctx) logger.Debugf(format, args...) return nil } } // NewPipelineExecutor creates a new executor from a series of other executors func NewPipelineExecutor(executors ...Executor) Executor { if len(executors) == 0 { return func(_ context.Context) error { return nil } } var rtn Executor for _, executor := range executors { if rtn == nil { rtn = executor } else { rtn = rtn.Then(executor) } } return rtn } // NewConditionalExecutor creates a new executor based on conditions func NewConditionalExecutor(conditional Conditional, trueExecutor Executor, falseExecutor Executor) Executor { return func(ctx context.Context) error { if conditional(ctx) { if trueExecutor != nil { return trueExecutor(ctx) } } else { if falseExecutor != nil { return falseExecutor(ctx) } } return nil } } // NewErrorExecutor creates a new executor that always errors out func NewErrorExecutor(err error) Executor { return func(_ context.Context) error { return err } } // NewParallelExecutor creates a new executor from a parallel of other executors func NewParallelExecutor(parallel int, executors ...Executor) Executor { return func(ctx context.Context) error { work := make(chan Executor, len(executors)) errs := make(chan error, len(executors)) if 1 > parallel { log.Debugf("Parallel tasks (%d) below minimum, setting to 1", parallel) parallel = 1 } for i := 0; i < parallel; i++ { go func(work <-chan Executor, errs chan<- error) { for executor := range work { errs <- executor(ctx) } }(work, errs) } for i := 0; i < len(executors); i++ { work <- executors[i] } close(work) // Executor waits all executors to cleanup these resources. var firstErr error for i := 0; i < len(executors); i++ { err := <-errs if firstErr == nil { firstErr = err } } if err := ctx.Err(); err != nil { return err } return firstErr } } func NewFieldExecutor(name string, value interface{}, exec Executor) Executor { return func(ctx context.Context) error { return exec(WithLogger(ctx, Logger(ctx).WithField(name, value))) } } // Then runs another executor if this executor succeeds func (e Executor) ThenError(then func(ctx context.Context, err error) error) Executor { return func(ctx context.Context) error { err := e(ctx) if err != nil { switch err.(type) { case Warning: Logger(ctx).Warning(err.Error()) default: return then(ctx, err) } } if ctx.Err() != nil { return ctx.Err() } return then(ctx, err) } } // Then runs another executor if this executor succeeds func (e Executor) Then(then Executor) Executor { return func(ctx context.Context) error { err := e(ctx) if err != nil { switch err.(type) { case Warning: Logger(ctx).Warning(err.Error()) default: return err } } if ctx.Err() != nil { return ctx.Err() } return then(ctx) } } // Then runs another executor if this executor succeeds func (e Executor) OnError(then Executor) Executor { return func(ctx context.Context) error { err := e(ctx) if err != nil { switch err.(type) { case Warning: Logger(ctx).Warning(err.Error()) default: return errors.Join(err, then(ctx)) } } if ctx.Err() != nil { return ctx.Err() } return nil } } // If only runs this executor if conditional is true func (e Executor) If(conditional Conditional) Executor { return func(ctx context.Context) error { if conditional(ctx) { return e(ctx) } return nil } } // IfNot only runs this executor if conditional is true func (e Executor) IfNot(conditional Conditional) Executor { return func(ctx context.Context) error { if !conditional(ctx) { return e(ctx) } return nil } } // IfBool only runs this executor if conditional is true func (e Executor) IfBool(conditional bool) Executor { return e.If(func(_ context.Context) bool { return conditional }) } // Finally adds an executor to run after other executor func (e Executor) Finally(finally Executor) Executor { return func(ctx context.Context) error { err := e(ctx) err2 := finally(ctx) if err2 != nil { return fmt.Errorf("Error occurred running finally: %v (original error: %v)", err2, err) } return err } } // Not return an inverted conditional func (c Conditional) Not() Conditional { return func(ctx context.Context) bool { return !c(ctx) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/auth_test.go
pkg/common/auth_test.go
// Copyright 2024 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package common import ( "encoding/json" "net/http" "testing" "github.com/golang-jwt/jwt/v5" "github.com/stretchr/testify/assert" ) func TestCreateAuthorizationToken(t *testing.T) { var taskID int64 = 23 token, err := CreateAuthorizationToken(taskID, 1, 2) assert.Nil(t, err) assert.NotEqual(t, "", token) claims := jwt.MapClaims{} _, err = jwt.ParseWithClaims(token, claims, func(_ *jwt.Token) (interface{}, error) { return []byte{}, nil }) assert.Nil(t, err) scp, ok := claims["scp"] assert.True(t, ok, "Has scp claim in jwt token") assert.Contains(t, scp, "Actions.Results:1:2") taskIDClaim, ok := claims["TaskID"] assert.True(t, ok, "Has TaskID claim in jwt token") assert.Equal(t, float64(taskID), taskIDClaim, "Supplied taskid must match stored one") acClaim, ok := claims["ac"] assert.True(t, ok, "Has ac claim in jwt token") ac, ok := acClaim.(string) assert.True(t, ok, "ac claim is a string for buildx gha cache") scopes := []actionsCacheScope{} err = json.Unmarshal([]byte(ac), &scopes) assert.NoError(t, err, "ac claim is a json list for buildx gha cache") assert.GreaterOrEqual(t, len(scopes), 1, "Expected at least one action cache scope for buildx gha cache") } func TestParseAuthorizationToken(t *testing.T) { var taskID int64 = 23 token, err := CreateAuthorizationToken(taskID, 1, 2) assert.Nil(t, err) assert.NotEqual(t, "", token) headers := http.Header{} headers.Set("Authorization", "Bearer "+token) rTaskID, err := ParseAuthorizationToken(&http.Request{ Header: headers, }) assert.Nil(t, err) assert.Equal(t, taskID, rTaskID) } func TestParseAuthorizationTokenNoAuthHeader(t *testing.T) { headers := http.Header{} rTaskID, err := ParseAuthorizationToken(&http.Request{ Header: headers, }) assert.Nil(t, err) assert.Equal(t, int64(0), rTaskID) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/draw.go
pkg/common/draw.go
package common import ( "fmt" "io" "os" "strings" ) // Style is a specific style type Style int // Styles const ( StyleDoubleLine = iota StyleSingleLine StyleDashedLine StyleNoLine ) // NewPen creates a new pen func NewPen(style Style, color int) *Pen { bgcolor := 49 if os.Getenv("CLICOLOR") == "0" { color = 0 bgcolor = 0 } return &Pen{ style: style, color: color, bgcolor: bgcolor, } } type styleDef struct { cornerTL string cornerTR string cornerBL string cornerBR string lineH string lineV string } var styleDefs = []styleDef{ {"\u2554", "\u2557", "\u255a", "\u255d", "\u2550", "\u2551"}, {"\u256d", "\u256e", "\u2570", "\u256f", "\u2500", "\u2502"}, {"\u250c", "\u2510", "\u2514", "\u2518", "\u254c", "\u254e"}, {" ", " ", " ", " ", " ", " "}, } // Pen struct type Pen struct { style Style color int bgcolor int } // Drawing struct type Drawing struct { buf *strings.Builder width int } func (p *Pen) drawTopBars(buf io.Writer, labels ...string) { style := styleDefs[p.style] for _, label := range labels { bar := strings.Repeat(style.lineH, len(label)+2) fmt.Fprintf(buf, " ") fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor) fmt.Fprintf(buf, "%s%s%s", style.cornerTL, bar, style.cornerTR) fmt.Fprintf(buf, "\x1b[%dm", 0) } fmt.Fprintf(buf, "\n") } func (p *Pen) drawBottomBars(buf io.Writer, labels ...string) { style := styleDefs[p.style] for _, label := range labels { bar := strings.Repeat(style.lineH, len(label)+2) fmt.Fprintf(buf, " ") fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor) fmt.Fprintf(buf, "%s%s%s", style.cornerBL, bar, style.cornerBR) fmt.Fprintf(buf, "\x1b[%dm", 0) } fmt.Fprintf(buf, "\n") } func (p *Pen) drawLabels(buf io.Writer, labels ...string) { style := styleDefs[p.style] for _, label := range labels { fmt.Fprintf(buf, " ") fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor) fmt.Fprintf(buf, "%s %s %s", style.lineV, label, style.lineV) fmt.Fprintf(buf, "\x1b[%dm", 0) } fmt.Fprintf(buf, "\n") } // DrawArrow between boxes func (p *Pen) DrawArrow() *Drawing { drawing := &Drawing{ buf: new(strings.Builder), width: 1, } fmt.Fprintf(drawing.buf, "\x1b[%dm", p.color) fmt.Fprintf(drawing.buf, "\u2b07") fmt.Fprintf(drawing.buf, "\x1b[%dm", 0) return drawing } // DrawBoxes to draw boxes func (p *Pen) DrawBoxes(labels ...string) *Drawing { width := 0 for _, l := range labels { width += len(l) + 2 + 2 + 1 } drawing := &Drawing{ buf: new(strings.Builder), width: width, } p.drawTopBars(drawing.buf, labels...) p.drawLabels(drawing.buf, labels...) p.drawBottomBars(drawing.buf, labels...) return drawing } // Draw to writer func (d *Drawing) Draw(writer io.Writer, centerOnWidth int) { padSize := (centerOnWidth - d.GetWidth()) / 2 if padSize < 0 { padSize = 0 } for _, l := range strings.Split(d.buf.String(), "\n") { if len(l) > 0 { padding := strings.Repeat(" ", padSize) fmt.Fprintf(writer, "%s%s\n", padding, l) } } } // GetWidth of drawing func (d *Drawing) GetWidth() int { return d.width }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/git/git.go
pkg/common/git/git.go
package git import ( "context" "errors" "fmt" "io" "os" "path" "regexp" "strings" "sync" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/mattn/go-isatty" log "github.com/sirupsen/logrus" "github.com/nektos/act/pkg/common" ) var ( codeCommitHTTPRegex = regexp.MustCompile(`^https?://git-codecommit\.(.+)\.amazonaws.com/v1/repos/(.+)$`) codeCommitSSHRegex = regexp.MustCompile(`ssh://git-codecommit\.(.+)\.amazonaws.com/v1/repos/(.+)$`) githubHTTPRegex = regexp.MustCompile(`^https?://.*github.com.*/(.+)/(.+?)(?:.git)?$`) githubSSHRegex = regexp.MustCompile(`github.com[:/](.+)/(.+?)(?:.git)?$`) cloneLock sync.Mutex ErrShortRef = errors.New("short SHA references are not supported") ErrNoRepo = errors.New("unable to find git repo") ) type Error struct { err error commit string } func (e *Error) Error() string { return e.err.Error() } func (e *Error) Unwrap() error { return e.err } func (e *Error) Commit() string { return e.commit } // FindGitRevision get the current git revision func FindGitRevision(ctx context.Context, file string) (shortSha string, sha string, err error) { logger := common.Logger(ctx) gitDir, err := git.PlainOpenWithOptions( file, &git.PlainOpenOptions{ DetectDotGit: true, EnableDotGitCommonDir: true, }, ) if err != nil { logger.WithError(err).Error("path", file, "not located inside a git repository") return "", "", err } head, err := gitDir.Reference(plumbing.HEAD, true) if err != nil { return "", "", err } if head.Hash().IsZero() { return "", "", fmt.Errorf("HEAD sha1 could not be resolved") } hash := head.Hash().String() logger.Debugf("Found revision: %s", hash) return hash[:7], strings.TrimSpace(hash), nil } // FindGitRef get the current git ref func FindGitRef(ctx context.Context, file string) (string, error) { logger := common.Logger(ctx) logger.Debugf("Loading revision from git directory") _, ref, err := FindGitRevision(ctx, file) if err != nil { return "", err } logger.Debugf("HEAD points to '%s'", ref) // Prefer the git library to iterate over the references and find a matching tag or branch. var refTag = "" var refBranch = "" repo, err := git.PlainOpenWithOptions( file, &git.PlainOpenOptions{ DetectDotGit: true, EnableDotGitCommonDir: true, }, ) if err != nil { return "", err } iter, err := repo.References() if err != nil { return "", err } // find the reference that matches the revision's has err = iter.ForEach(func(r *plumbing.Reference) error { /* tags and branches will have the same hash * when a user checks out a tag, it is not mentioned explicitly * in the go-git package, we must identify the revision * then check if any tag matches that revision, * if so then we checked out a tag * else we look for branches and if matches, * it means we checked out a branch * * If a branches matches first we must continue and check all tags (all references) * in case we match with a tag later in the iteration */ if r.Hash().String() == ref { if r.Name().IsTag() { refTag = r.Name().String() } if r.Name().IsBranch() { refBranch = r.Name().String() } } // we found what we where looking for if refTag != "" && refBranch != "" { return storer.ErrStop } return nil }) if err != nil { return "", err } // order matters here see above comment. if refTag != "" { return refTag, nil } if refBranch != "" { return refBranch, nil } return "", fmt.Errorf("failed to identify reference (tag/branch) for the checked-out revision '%s'", ref) } // FindGithubRepo get the repo func FindGithubRepo(ctx context.Context, file, githubInstance, remoteName string) (string, error) { if remoteName == "" { remoteName = "origin" } url, err := findGitRemoteURL(ctx, file, remoteName) if err != nil { return "", err } _, slug, err := findGitSlug(url, githubInstance) return slug, err } func findGitRemoteURL(_ context.Context, file, remoteName string) (string, error) { repo, err := git.PlainOpenWithOptions( file, &git.PlainOpenOptions{ DetectDotGit: true, EnableDotGitCommonDir: true, }, ) if err != nil { return "", err } remote, err := repo.Remote(remoteName) if err != nil { return "", err } if len(remote.Config().URLs) < 1 { return "", fmt.Errorf("remote '%s' exists but has no URL", remoteName) } return remote.Config().URLs[0], nil } func findGitSlug(url string, githubInstance string) (string, string, error) { if matches := codeCommitHTTPRegex.FindStringSubmatch(url); matches != nil { return "CodeCommit", matches[2], nil } else if matches := codeCommitSSHRegex.FindStringSubmatch(url); matches != nil { return "CodeCommit", matches[2], nil } else if matches := githubHTTPRegex.FindStringSubmatch(url); matches != nil { return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil } else if matches := githubSSHRegex.FindStringSubmatch(url); matches != nil { return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil } else if githubInstance != "github.com" { gheHTTPRegex := regexp.MustCompile(fmt.Sprintf(`^https?://%s/(.+)/(.+?)(?:.git)?$`, githubInstance)) gheSSHRegex := regexp.MustCompile(fmt.Sprintf(`%s[:/](.+)/(.+?)(?:.git)?$`, githubInstance)) if matches := gheHTTPRegex.FindStringSubmatch(url); matches != nil { return "GitHubEnterprise", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil } else if matches := gheSSHRegex.FindStringSubmatch(url); matches != nil { return "GitHubEnterprise", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil } } return "", url, nil } // NewGitCloneExecutorInput the input for the NewGitCloneExecutor type NewGitCloneExecutorInput struct { URL string Ref string Dir string Token string OfflineMode bool } // CloneIfRequired ... func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input NewGitCloneExecutorInput, logger log.FieldLogger) (*git.Repository, error) { // If the remote URL has changed, remove the directory and clone again. if r, err := git.PlainOpen(input.Dir); err == nil { if remote, err := r.Remote("origin"); err == nil { if len(remote.Config().URLs) > 0 && remote.Config().URLs[0] != input.URL { _ = os.RemoveAll(input.Dir) } } } r, err := git.PlainOpen(input.Dir) if err != nil { var progressWriter io.Writer if isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) { if entry, ok := logger.(*log.Entry); ok { progressWriter = entry.WriterLevel(log.DebugLevel) } else if lgr, ok := logger.(*log.Logger); ok { progressWriter = lgr.WriterLevel(log.DebugLevel) } else { log.Errorf("Unable to get writer from logger (type=%T)", logger) progressWriter = os.Stdout } } cloneOptions := git.CloneOptions{ URL: input.URL, Progress: progressWriter, } if input.Token != "" { cloneOptions.Auth = &http.BasicAuth{ Username: "token", Password: input.Token, } } r, err = git.PlainCloneContext(ctx, input.Dir, false, &cloneOptions) if err != nil { logger.Errorf("Unable to clone %v %s: %v", input.URL, refName, err) return nil, err } if err = os.Chmod(input.Dir, 0o755); err != nil { return nil, err } } return r, nil } func gitOptions(token string) (fetchOptions git.FetchOptions, pullOptions git.PullOptions) { fetchOptions.RefSpecs = []config.RefSpec{"refs/*:refs/*", "HEAD:refs/heads/HEAD"} pullOptions.Force = true if token != "" { auth := &http.BasicAuth{ Username: "token", Password: token, } fetchOptions.Auth = auth pullOptions.Auth = auth } return fetchOptions, pullOptions } // NewGitCloneExecutor creates an executor to clone git repos // //nolint:gocyclo func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) logger.Infof(" \u2601 git clone '%s' # ref=%s", input.URL, input.Ref) logger.Debugf(" cloning %s to %s", input.URL, input.Dir) cloneLock.Lock() defer cloneLock.Unlock() refName := plumbing.ReferenceName(fmt.Sprintf("refs/heads/%s", input.Ref)) r, err := CloneIfRequired(ctx, refName, input, logger) if err != nil { return err } isOfflineMode := input.OfflineMode // fetch latest changes fetchOptions, pullOptions := gitOptions(input.Token) if !isOfflineMode { err = r.Fetch(&fetchOptions) if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) { return err } } var hash *plumbing.Hash rev := plumbing.Revision(input.Ref) if hash, err = r.ResolveRevision(rev); err != nil { logger.Errorf("Unable to resolve %s: %v", input.Ref, err) } if hash.String() != input.Ref && len(input.Ref) >= 4 && strings.HasPrefix(hash.String(), input.Ref) { return &Error{ err: ErrShortRef, commit: hash.String(), } } // At this point we need to know if it's a tag or a branch // And the easiest way to do it is duck typing // // If err is nil, it's a tag so let's proceed with that hash like we would if // it was a sha refType := "tag" rev = plumbing.Revision(path.Join("refs", "tags", input.Ref)) if _, err := r.Tag(input.Ref); errors.Is(err, git.ErrTagNotFound) { rName := plumbing.ReferenceName(path.Join("refs", "remotes", "origin", input.Ref)) if _, err := r.Reference(rName, false); errors.Is(err, plumbing.ErrReferenceNotFound) { refType = "sha" rev = plumbing.Revision(input.Ref) } else { refType = "branch" rev = plumbing.Revision(rName) } } if hash, err = r.ResolveRevision(rev); err != nil { logger.Errorf("Unable to resolve %s: %v", input.Ref, err) return err } var w *git.Worktree if w, err = r.Worktree(); err != nil { return err } // If the hash resolved doesn't match the ref provided in a workflow then we're // using a branch or tag ref, not a sha // // Repos on disk point to commit hashes, and need to checkout input.Ref before // we try and pull down any changes if hash.String() != input.Ref && refType == "branch" { logger.Debugf("Provided ref is not a sha. Checking out branch before pulling changes") sourceRef := plumbing.ReferenceName(path.Join("refs", "remotes", "origin", input.Ref)) if err = w.Checkout(&git.CheckoutOptions{ Branch: sourceRef, Force: true, }); err != nil { logger.Errorf("Unable to checkout %s: %v", sourceRef, err) return err } } if !isOfflineMode { if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate { logger.Debugf("Unable to pull %s: %v", refName, err) } } logger.Debugf("Cloned %s to %s", input.URL, input.Dir) if hash.String() != input.Ref && refType == "branch" { logger.Debugf("Provided ref is not a sha. Updating branch ref after pull") if hash, err = r.ResolveRevision(rev); err != nil { logger.Errorf("Unable to resolve %s: %v", input.Ref, err) return err } } if err = w.Checkout(&git.CheckoutOptions{ Hash: *hash, Force: true, }); err != nil { logger.Errorf("Unable to checkout %s: %v", *hash, err) return err } if err = w.Reset(&git.ResetOptions{ Mode: git.HardReset, Commit: *hash, }); err != nil { logger.Errorf("Unable to reset to %s: %v", hash.String(), err) return err } logger.Debugf("Checked out %s", input.Ref) return nil } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/common/git/git_test.go
pkg/common/git/git_test.go
package git import ( "context" "fmt" "os" "os/exec" "path/filepath" "syscall" "testing" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/nektos/act/pkg/common" ) func TestFindGitSlug(t *testing.T) { assert := assert.New(t) var slugTests = []struct { url string // input provider string // expected result slug string // expected result }{ {"https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name", "CodeCommit", "my-repo-name"}, {"ssh://git-codecommit.us-west-2.amazonaws.com/v1/repos/my-repo", "CodeCommit", "my-repo"}, {"git@github.com:nektos/act.git", "GitHub", "nektos/act"}, {"git@github.com:nektos/act", "GitHub", "nektos/act"}, {"https://github.com/nektos/act.git", "GitHub", "nektos/act"}, {"http://github.com/nektos/act.git", "GitHub", "nektos/act"}, {"https://github.com/nektos/act", "GitHub", "nektos/act"}, {"http://github.com/nektos/act", "GitHub", "nektos/act"}, {"git+ssh://git@github.com/owner/repo.git", "GitHub", "owner/repo"}, {"http://myotherrepo.com/act.git", "", "http://myotherrepo.com/act.git"}, } for _, tt := range slugTests { provider, slug, err := findGitSlug(tt.url, "github.com") assert.NoError(err) assert.Equal(tt.provider, provider) assert.Equal(tt.slug, slug) } } func testDir(t *testing.T) string { basedir, err := os.MkdirTemp("", "act-test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(basedir) }) return basedir } func cleanGitHooks(dir string) error { hooksDir := filepath.Join(dir, ".git", "hooks") files, err := os.ReadDir(hooksDir) if err != nil { if os.IsNotExist(err) { return nil } return err } for _, f := range files { if f.IsDir() { continue } relName := filepath.Join(hooksDir, f.Name()) if err := os.Remove(relName); err != nil { return err } } return nil } func TestFindGitRemoteURL(t *testing.T) { assert := assert.New(t) basedir := testDir(t) gitConfig() err := gitCmd("init", basedir) assert.NoError(err) err = cleanGitHooks(basedir) assert.NoError(err) remoteURL := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name" err = gitCmd("-C", basedir, "remote", "add", "origin", remoteURL) assert.NoError(err) u, err := findGitRemoteURL(context.Background(), basedir, "origin") assert.NoError(err) assert.Equal(remoteURL, u) remoteURL = "git@github.com/AwesomeOwner/MyAwesomeRepo.git" err = gitCmd("-C", basedir, "remote", "add", "upstream", remoteURL) assert.NoError(err) u, err = findGitRemoteURL(context.Background(), basedir, "upstream") assert.NoError(err) assert.Equal(remoteURL, u) } func TestGitFindRef(t *testing.T) { basedir := testDir(t) gitConfig() for name, tt := range map[string]struct { Prepare func(t *testing.T, dir string) Assert func(t *testing.T, ref string, err error) }{ "new_repo": { Prepare: func(_ *testing.T, _ string) {}, Assert: func(t *testing.T, _ string, err error) { require.Error(t, err) }, }, "new_repo_with_commit": { Prepare: func(t *testing.T, dir string) { require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "msg")) }, Assert: func(t *testing.T, ref string, err error) { require.NoError(t, err) require.Equal(t, "refs/heads/master", ref) }, }, "current_head_is_tag": { Prepare: func(t *testing.T, dir string) { require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "commit msg")) require.NoError(t, gitCmd("-C", dir, "tag", "v1.2.3")) require.NoError(t, gitCmd("-C", dir, "checkout", "v1.2.3")) }, Assert: func(t *testing.T, ref string, err error) { require.NoError(t, err) require.Equal(t, "refs/tags/v1.2.3", ref) }, }, "current_head_is_same_as_tag": { Prepare: func(t *testing.T, dir string) { require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "1.4.2 release")) require.NoError(t, gitCmd("-C", dir, "tag", "v1.4.2")) }, Assert: func(t *testing.T, ref string, err error) { require.NoError(t, err) require.Equal(t, "refs/tags/v1.4.2", ref) }, }, "current_head_is_not_tag": { Prepare: func(t *testing.T, dir string) { require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "msg")) require.NoError(t, gitCmd("-C", dir, "tag", "v1.4.2")) require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "msg2")) }, Assert: func(t *testing.T, ref string, err error) { require.NoError(t, err) require.Equal(t, "refs/heads/master", ref) }, }, "current_head_is_another_branch": { Prepare: func(t *testing.T, dir string) { require.NoError(t, gitCmd("-C", dir, "checkout", "-b", "mybranch")) require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "msg")) }, Assert: func(t *testing.T, ref string, err error) { require.NoError(t, err) require.Equal(t, "refs/heads/mybranch", ref) }, }, } { t.Run(name, func(t *testing.T) { dir := filepath.Join(basedir, name) require.NoError(t, os.MkdirAll(dir, 0o755)) require.NoError(t, gitCmd("-C", dir, "init", "--initial-branch=master")) require.NoError(t, cleanGitHooks(dir)) tt.Prepare(t, dir) ref, err := FindGitRef(context.Background(), dir) tt.Assert(t, ref, err) }) } } func TestGitCloneExecutor(t *testing.T) { for name, tt := range map[string]struct { Err error URL, Ref string }{ "tag": { Err: nil, URL: "https://github.com/actions/checkout", Ref: "v2", }, "branch": { Err: nil, URL: "https://github.com/anchore/scan-action", Ref: "act-fails", }, "sha": { Err: nil, URL: "https://github.com/actions/checkout", Ref: "5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f", // v2 }, "short-sha": { Err: &Error{ErrShortRef, "5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f"}, URL: "https://github.com/actions/checkout", Ref: "5a4ac90", // v2 }, } { t.Run(name, func(t *testing.T) { clone := NewGitCloneExecutor(NewGitCloneExecutorInput{ URL: tt.URL, Ref: tt.Ref, Dir: testDir(t), }) err := clone(context.Background()) if tt.Err != nil { assert.Error(t, err) assert.Equal(t, tt.Err, err) } else { assert.Empty(t, err) } }) } } func gitConfig() { if os.Getenv("GITHUB_ACTIONS") == "true" { var err error if err = gitCmd("config", "--global", "user.email", "test@test.com"); err != nil { log.Error(err) } if err = gitCmd("config", "--global", "user.name", "Unit Test"); err != nil { log.Error(err) } } } func gitCmd(args ...string) error { cmd := exec.Command("git", args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Run() if exitError, ok := err.(*exec.ExitError); ok { if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok { return fmt.Errorf("Exit error %d", waitStatus.ExitStatus()) } return exitError } return nil } func TestCloneIfRequired(t *testing.T) { tempDir := t.TempDir() ctx := context.Background() t.Run("clone", func(t *testing.T) { repo, err := CloneIfRequired(ctx, "refs/heads/main", NewGitCloneExecutorInput{ URL: "https://github.com/actions/checkout", Dir: tempDir, }, common.Logger(ctx)) assert.NoError(t, err) assert.NotNil(t, repo) }) t.Run("clone different remote", func(t *testing.T) { repo, err := CloneIfRequired(ctx, "refs/heads/main", NewGitCloneExecutorInput{ URL: "https://github.com/actions/setup-go", Dir: tempDir, }, common.Logger(ctx)) require.NoError(t, err) require.NotNil(t, repo) remote, err := repo.Remote("origin") require.NoError(t, err) require.Len(t, remote.Config().URLs, 1) assert.Equal(t, "https://github.com/actions/setup-go", remote.Config().URLs[0]) }) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/command_test.go
pkg/runner/command_test.go
package runner import ( "bytes" "context" "io" "os" "testing" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/model" ) func TestSetEnv(t *testing.T) { a := assert.New(t) ctx := context.Background() rc := new(RunContext) handler := rc.commandHandler(ctx) handler("::set-env name=x::valz\n") a.Equal("valz", rc.Env["x"]) } func TestSetOutput(t *testing.T) { a := assert.New(t) ctx := context.Background() rc := new(RunContext) rc.StepResults = make(map[string]*model.StepResult) handler := rc.commandHandler(ctx) rc.CurrentStep = "my-step" rc.StepResults[rc.CurrentStep] = &model.StepResult{ Outputs: make(map[string]string), } handler("::set-output name=x::valz\n") a.Equal("valz", rc.StepResults["my-step"].Outputs["x"]) handler("::set-output name=x::percent2%25\n") a.Equal("percent2%", rc.StepResults["my-step"].Outputs["x"]) handler("::set-output name=x::percent2%25%0Atest\n") a.Equal("percent2%\ntest", rc.StepResults["my-step"].Outputs["x"]) handler("::set-output name=x::percent2%25%0Atest another3%25test\n") a.Equal("percent2%\ntest another3%test", rc.StepResults["my-step"].Outputs["x"]) handler("::set-output name=x%3A::percent2%25%0Atest\n") a.Equal("percent2%\ntest", rc.StepResults["my-step"].Outputs["x:"]) handler("::set-output name=x%3A%2C%0A%25%0D%3A::percent2%25%0Atest\n") a.Equal("percent2%\ntest", rc.StepResults["my-step"].Outputs["x:,\n%\r:"]) } func TestAddpath(t *testing.T) { a := assert.New(t) ctx := context.Background() rc := new(RunContext) handler := rc.commandHandler(ctx) handler("::add-path::/zoo\n") a.Equal("/zoo", rc.ExtraPath[0]) handler("::add-path::/boo\n") a.Equal("/boo", rc.ExtraPath[0]) } func TestStopCommands(t *testing.T) { logger, hook := test.NewNullLogger() a := assert.New(t) ctx := common.WithLogger(context.Background(), logger) rc := new(RunContext) handler := rc.commandHandler(ctx) handler("::set-env name=x::valz\n") a.Equal("valz", rc.Env["x"]) handler("::stop-commands::my-end-token\n") handler("::set-env name=x::abcd\n") a.Equal("valz", rc.Env["x"]) handler("::my-end-token::\n") handler("::set-env name=x::abcd\n") a.Equal("abcd", rc.Env["x"]) messages := make([]string, 0) for _, entry := range hook.AllEntries() { messages = append(messages, entry.Message) } a.Contains(messages, " \U00002699 ::set-env name=x::abcd\n") } func TestAddpathADO(t *testing.T) { a := assert.New(t) ctx := context.Background() rc := new(RunContext) handler := rc.commandHandler(ctx) handler("##[add-path]/zoo\n") a.Equal("/zoo", rc.ExtraPath[0]) handler("##[add-path]/boo\n") a.Equal("/boo", rc.ExtraPath[0]) } func TestAddmask(t *testing.T) { logger, hook := test.NewNullLogger() a := assert.New(t) ctx := context.Background() loggerCtx := common.WithLogger(ctx, logger) rc := new(RunContext) handler := rc.commandHandler(loggerCtx) handler("::add-mask::my-secret-value\n") a.Equal(" \U00002699 ***", hook.LastEntry().Message) a.NotEqual(" \U00002699 *my-secret-value", hook.LastEntry().Message) } // based on https://stackoverflow.com/a/10476304 func captureOutput(t *testing.T, f func()) string { old := os.Stdout r, w, _ := os.Pipe() os.Stdout = w f() outC := make(chan string) go func() { var buf bytes.Buffer _, err := io.Copy(&buf, r) if err != nil { a := assert.New(t) a.Fail("io.Copy failed") } outC <- buf.String() }() w.Close() os.Stdout = old out := <-outC return out } func TestAddmaskUsemask(t *testing.T) { rc := new(RunContext) rc.StepResults = make(map[string]*model.StepResult) rc.CurrentStep = "my-step" rc.StepResults[rc.CurrentStep] = &model.StepResult{ Outputs: make(map[string]string), } a := assert.New(t) config := &Config{ Secrets: map[string]string{}, InsecureSecrets: false, } re := captureOutput(t, func() { ctx := context.Background() ctx = WithJobLogger(ctx, "0", "testjob", config, &rc.Masks, map[string]interface{}{}) handler := rc.commandHandler(ctx) handler("::add-mask::secret\n") handler("::set-output:: token=secret\n") }) a.Equal("[testjob] \U00002699 ***\n[testjob] \U00002699 ::set-output:: = token=***\n", re) } func TestSaveState(t *testing.T) { rc := &RunContext{ CurrentStep: "step", StepResults: map[string]*model.StepResult{}, } ctx := context.Background() handler := rc.commandHandler(ctx) handler("::save-state name=state-name::state-value\n") assert.Equal(t, "state-value", rc.IntraActionState["step"]["state-name"]) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/expression_test.go
pkg/runner/expression_test.go
package runner import ( "context" "fmt" "os" "regexp" "sort" "testing" "github.com/nektos/act/pkg/exprparser" "github.com/nektos/act/pkg/model" assert "github.com/stretchr/testify/assert" yaml "gopkg.in/yaml.v3" ) func createRunContext(t *testing.T) *RunContext { var yml yaml.Node err := yml.Encode(map[string][]interface{}{ "os": {"Linux", "Windows"}, "foo": {"bar", "baz"}, }) assert.NoError(t, err) return &RunContext{ Config: &Config{ Workdir: ".", Secrets: map[string]string{ "CASE_INSENSITIVE_SECRET": "value", }, Vars: map[string]string{ "CASE_INSENSITIVE_VAR": "value", }, }, Env: map[string]string{ "key": "value", }, Run: &model.Run{ JobID: "job1", Workflow: &model.Workflow{ Name: "test-workflow", Jobs: map[string]*model.Job{ "job1": { Strategy: &model.Strategy{ RawMatrix: yml, }, }, }, }, }, Matrix: map[string]interface{}{ "os": "Linux", "foo": "bar", }, StepResults: map[string]*model.StepResult{ "idwithnothing": { Conclusion: model.StepStatusSuccess, Outcome: model.StepStatusFailure, Outputs: map[string]string{ "foowithnothing": "barwithnothing", }, }, "id-with-hyphens": { Conclusion: model.StepStatusSuccess, Outcome: model.StepStatusFailure, Outputs: map[string]string{ "foo-with-hyphens": "bar-with-hyphens", }, }, "id_with_underscores": { Conclusion: model.StepStatusSuccess, Outcome: model.StepStatusFailure, Outputs: map[string]string{ "foo_with_underscores": "bar_with_underscores", }, }, }, } } func TestEvaluateRunContext(t *testing.T) { rc := createRunContext(t) ee := rc.NewExpressionEvaluator(context.Background()) tables := []struct { in string out interface{} errMesg string }{ {" 1 ", 1, ""}, // {"1 + 3", "4", ""}, // {"(1 + 3) * -2", "-8", ""}, {"'my text'", "my text", ""}, {"contains('my text', 'te')", true, ""}, {"contains('my TEXT', 'te')", true, ""}, {"contains(fromJSON('[\"my text\"]'), 'te')", false, ""}, {"contains(fromJSON('[\"foo\",\"bar\"]'), 'bar')", true, ""}, {"startsWith('hello world', 'He')", true, ""}, {"endsWith('hello world', 'ld')", true, ""}, {"format('0:{0} 2:{2} 1:{1}', 'zero', 'one', 'two')", "0:zero 2:two 1:one", ""}, {"join(fromJSON('[\"hello\"]'),'octocat')", "hello", ""}, {"join(fromJSON('[\"hello\",\"mona\",\"the\"]'),'octocat')", "hellooctocatmonaoctocatthe", ""}, {"join('hello','mona')", "hello", ""}, {"toJSON(env)", "{\n \"ACT\": \"true\",\n \"key\": \"value\"\n}", ""}, {"toJson(env)", "{\n \"ACT\": \"true\",\n \"key\": \"value\"\n}", ""}, {"(fromJSON('{\"foo\":\"bar\"}')).foo", "bar", ""}, {"(fromJson('{\"foo\":\"bar\"}')).foo", "bar", ""}, {"(fromJson('[\"foo\",\"bar\"]'))[1]", "bar", ""}, // github does return an empty string for non-existent files {"hashFiles('**/non-extant-files')", "", ""}, {"hashFiles('**/non-extant-files', '**/more-non-extant-files')", "", ""}, {"hashFiles('**/non.extant.files')", "", ""}, {"hashFiles('**/non''extant''files')", "", ""}, {"success()", true, ""}, {"failure()", false, ""}, {"always()", true, ""}, {"cancelled()", false, ""}, {"github.workflow", "test-workflow", ""}, {"github.actor", "nektos/act", ""}, {"github.run_id", "1", ""}, {"github.run_number", "1", ""}, {"job.status", "success", ""}, {"matrix.os", "Linux", ""}, {"matrix.foo", "bar", ""}, {"env.key", "value", ""}, {"secrets.CASE_INSENSITIVE_SECRET", "value", ""}, {"secrets.case_insensitive_secret", "value", ""}, {"vars.CASE_INSENSITIVE_VAR", "value", ""}, {"vars.case_insensitive_var", "value", ""}, {"format('{{0}}', 'test')", "{0}", ""}, {"format('{{{0}}}', 'test')", "{test}", ""}, {"format('}}')", "}", ""}, {"format('echo Hello {0} ${{Test}}', 'World')", "echo Hello World ${Test}", ""}, {"format('echo Hello {0} ${{Test}}', github.undefined_property)", "echo Hello ${Test}", ""}, {"format('echo Hello {0}{1} ${{Te{0}st}}', github.undefined_property, 'World')", "echo Hello World ${Test}", ""}, {"format('{0}', '{1}', 'World')", "{1}", ""}, {"format('{{{0}', '{1}', 'World')", "{{1}", ""}, } for _, table := range tables { t.Run(table.in, func(t *testing.T) { assertObject := assert.New(t) out, err := ee.evaluate(context.Background(), table.in, exprparser.DefaultStatusCheckNone) if table.errMesg == "" { assertObject.NoError(err, table.in) assertObject.Equal(table.out, out, table.in) } else { assertObject.Error(err, table.in) assertObject.Equal(table.errMesg, err.Error(), table.in) } }) } } func TestEvaluateStep(t *testing.T) { rc := createRunContext(t) step := &stepRun{ RunContext: rc, } ee := rc.NewStepExpressionEvaluator(context.Background(), step) tables := []struct { in string out interface{} errMesg string }{ {"steps.idwithnothing.conclusion", model.StepStatusSuccess.String(), ""}, {"steps.idwithnothing.outcome", model.StepStatusFailure.String(), ""}, {"steps.idwithnothing.outputs.foowithnothing", "barwithnothing", ""}, {"steps.id-with-hyphens.conclusion", model.StepStatusSuccess.String(), ""}, {"steps.id-with-hyphens.outcome", model.StepStatusFailure.String(), ""}, {"steps.id-with-hyphens.outputs.foo-with-hyphens", "bar-with-hyphens", ""}, {"steps.id_with_underscores.conclusion", model.StepStatusSuccess.String(), ""}, {"steps.id_with_underscores.outcome", model.StepStatusFailure.String(), ""}, {"steps.id_with_underscores.outputs.foo_with_underscores", "bar_with_underscores", ""}, } for _, table := range tables { t.Run(table.in, func(t *testing.T) { assertObject := assert.New(t) out, err := ee.evaluate(context.Background(), table.in, exprparser.DefaultStatusCheckNone) if table.errMesg == "" { assertObject.NoError(err, table.in) assertObject.Equal(table.out, out, table.in) } else { assertObject.Error(err, table.in) assertObject.Equal(table.errMesg, err.Error(), table.in) } }) } } func TestInterpolate(t *testing.T) { rc := &RunContext{ Config: &Config{ Workdir: ".", Secrets: map[string]string{ "CASE_INSENSITIVE_SECRET": "value", }, Vars: map[string]string{ "CASE_INSENSITIVE_VAR": "value", }, }, Env: map[string]string{ "KEYWITHNOTHING": "valuewithnothing", "KEY-WITH-HYPHENS": "value-with-hyphens", "KEY_WITH_UNDERSCORES": "value_with_underscores", "SOMETHING_TRUE": "true", "SOMETHING_FALSE": "false", }, Run: &model.Run{ JobID: "job1", Workflow: &model.Workflow{ Name: "test-workflow", Jobs: map[string]*model.Job{ "job1": {}, }, }, }, } ee := rc.NewExpressionEvaluator(context.Background()) tables := []struct { in string out string }{ {" text ", " text "}, {" $text ", " $text "}, {" ${text} ", " ${text} "}, {" ${{ 1 }} to ${{2}} ", " 1 to 2 "}, {" ${{ (true || false) }} to ${{2}} ", " true to 2 "}, {" ${{ (false || '}}' ) }} to ${{2}} ", " }} to 2 "}, {" ${{ env.KEYWITHNOTHING }} ", " valuewithnothing "}, {" ${{ env.KEY-WITH-HYPHENS }} ", " value-with-hyphens "}, {" ${{ env.KEY_WITH_UNDERSCORES }} ", " value_with_underscores "}, {"${{ secrets.CASE_INSENSITIVE_SECRET }}", "value"}, {"${{ secrets.case_insensitive_secret }}", "value"}, {"${{ vars.CASE_INSENSITIVE_VAR }}", "value"}, {"${{ vars.case_insensitive_var }}", "value"}, {"${{ env.UNKNOWN }}", ""}, {"${{ env.SOMETHING_TRUE }}", "true"}, {"${{ env.SOMETHING_FALSE }}", "false"}, {"${{ !env.SOMETHING_TRUE }}", "false"}, {"${{ !env.SOMETHING_FALSE }}", "false"}, {"${{ !env.SOMETHING_TRUE && true }}", "false"}, {"${{ !env.SOMETHING_FALSE && true }}", "false"}, {"${{ env.SOMETHING_TRUE && true }}", "true"}, {"${{ env.SOMETHING_FALSE && true }}", "true"}, {"${{ !env.SOMETHING_TRUE || true }}", "true"}, {"${{ !env.SOMETHING_FALSE || true }}", "true"}, {"${{ !env.SOMETHING_TRUE && false }}", "false"}, {"${{ !env.SOMETHING_FALSE && false }}", "false"}, {"${{ !env.SOMETHING_TRUE || false }}", "false"}, {"${{ !env.SOMETHING_FALSE || false }}", "false"}, {"${{ env.SOMETHING_TRUE || false }}", "true"}, {"${{ env.SOMETHING_FALSE || false }}", "false"}, {"${{ env.SOMETHING_FALSE }} && ${{ env.SOMETHING_TRUE }}", "false && true"}, {"${{ fromJSON('{}') < 2 }}", "false"}, } updateTestExpressionWorkflow(t, tables, rc) for _, table := range tables { t.Run("interpolate", func(t *testing.T) { assertObject := assert.New(t) out := ee.Interpolate(context.Background(), table.in) assertObject.Equal(table.out, out, table.in) }) } } func updateTestExpressionWorkflow(t *testing.T, tables []struct { in string out string }, rc *RunContext) { var envs string keys := make([]string, 0, len(rc.Env)) for k := range rc.Env { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { envs += fmt.Sprintf(" %s: %s\n", k, rc.Env[k]) } // editorconfig-checker-disable workflow := fmt.Sprintf(` name: "Test how expressions are handled on GitHub" on: push env: %s jobs: test-espressions: runs-on: ubuntu-latest steps: `, envs) // editorconfig-checker-enable for _, table := range tables { expressionPattern := regexp.MustCompile(`\${{\s*(.+?)\s*}}`) expr := expressionPattern.ReplaceAllStringFunc(table.in, func(match string) string { return fmt.Sprintf("€{{ %s }}", expressionPattern.ReplaceAllString(match, "$1")) }) name := fmt.Sprintf(`%s -> %s should be equal to %s`, expr, table.in, table.out) echo := `run: echo "Done "` workflow += fmt.Sprintf("\n - name: %s\n %s\n", name, echo) } file, err := os.Create("../../.github/workflows/test-expressions.yml") if err != nil { t.Fatal(err) } _, err = file.WriteString(workflow) if err != nil { t.Fatal(err) } } func TestRewriteSubExpression(t *testing.T) { table := []struct { in string out string }{ {in: "Hello World", out: "Hello World"}, {in: "${{ true }}", out: "${{ true }}"}, {in: "${{ true }} ${{ true }}", out: "format('{0} {1}', true, true)"}, {in: "${{ true || false }} ${{ true && true }}", out: "format('{0} {1}', true || false, true && true)"}, {in: "${{ '}}' }}", out: "${{ '}}' }}"}, {in: "${{ '''}}''' }}", out: "${{ '''}}''' }}"}, {in: "${{ '''' }}", out: "${{ '''' }}"}, {in: `${{ fromJSON('"}}"') }}`, out: `${{ fromJSON('"}}"') }}`}, {in: `${{ fromJSON('"\"}}\""') }}`, out: `${{ fromJSON('"\"}}\""') }}`}, {in: `${{ fromJSON('"''}}"') }}`, out: `${{ fromJSON('"''}}"') }}`}, {in: "Hello ${{ 'World' }}", out: "format('Hello {0}', 'World')"}, } for _, table := range table { t.Run("TestRewriteSubExpression", func(t *testing.T) { assertObject := assert.New(t) out, err := rewriteSubExpression(context.Background(), table.in, false) if err != nil { t.Fatal(err) } assertObject.Equal(table.out, out, table.in) }) } } func TestRewriteSubExpressionForceFormat(t *testing.T) { table := []struct { in string out string }{ {in: "Hello World", out: "Hello World"}, {in: "${{ true }}", out: "format('{0}', true)"}, {in: "${{ '}}' }}", out: "format('{0}', '}}')"}, {in: `${{ fromJSON('"}}"') }}`, out: `format('{0}', fromJSON('"}}"'))`}, {in: "Hello ${{ 'World' }}", out: "format('Hello {0}', 'World')"}, } for _, table := range table { t.Run("TestRewriteSubExpressionForceFormat", func(t *testing.T) { assertObject := assert.New(t) out, err := rewriteSubExpression(context.Background(), table.in, true) if err != nil { t.Fatal(err) } assertObject.Equal(table.out, out, table.in) }) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/action_cache.go
pkg/runner/action_cache.go
package runner import ( "archive/tar" "context" "crypto/rand" "encoding/hex" "errors" "fmt" "io" "io/fs" "path" "strings" "time" git "github.com/go-git/go-git/v5" config "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/nektos/act/pkg/common" ) type ActionCache interface { Fetch(ctx context.Context, cacheDir, url, ref, token string) (string, error) GetTarArchive(ctx context.Context, cacheDir, sha, includePrefix string) (io.ReadCloser, error) } type GoGitActionCache struct { Path string } func (c GoGitActionCache) Fetch(ctx context.Context, cacheDir, url, ref, token string) (string, error) { logger := common.Logger(ctx) gitPath := path.Join(c.Path, safeFilename(cacheDir)+".git") logger.Infof("GoGitActionCache fetch %s with ref %s at %s", url, ref, gitPath) gogitrepo, err := git.PlainInit(gitPath, true) if errors.Is(err, git.ErrRepositoryAlreadyExists) { logger.Debugf("GoGitActionCache cache hit %s with ref %s at %s", url, ref, gitPath) gogitrepo, err = git.PlainOpen(gitPath) } if err != nil { return "", fmt.Errorf("GoGitActionCache failed to open bare git %s with ref %s at %s: %w", url, ref, gitPath, err) } tmpBranch := make([]byte, 12) if _, err := rand.Read(tmpBranch); err != nil { return "", fmt.Errorf("GoGitActionCache failed to generate random tmp branch %s with ref %s at %s: %w", url, ref, gitPath, err) } branchName := hex.EncodeToString(tmpBranch) var auth transport.AuthMethod if token != "" { auth = &http.BasicAuth{ Username: "token", Password: token, } } remote, err := gogitrepo.CreateRemoteAnonymous(&config.RemoteConfig{ Name: "anonymous", URLs: []string{ url, }, }) if err != nil { return "", fmt.Errorf("GoGitActionCache failed to create remote %s with ref %s at %s: %w", url, ref, gitPath, err) } defer func() { _ = gogitrepo.DeleteBranch(branchName) }() if err := remote.FetchContext(ctx, &git.FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec(ref + ":" + branchName), }, Auth: auth, Force: true, Depth: 1, }); err != nil { return "", fmt.Errorf("GoGitActionCache failed to fetch %s with ref %s at %s: %w", url, ref, gitPath, err) } hash, err := gogitrepo.ResolveRevision(plumbing.Revision(branchName)) if err != nil { return "", fmt.Errorf("GoGitActionCache failed to resolve sha %s with ref %s at %s: %w", url, ref, gitPath, err) } logger.Infof("GoGitActionCache fetch %s with ref %s at %s resolved to %s", url, ref, gitPath, hash.String()) return hash.String(), nil } type GitFileInfo struct { name string size int64 modTime time.Time isDir bool mode fs.FileMode } // IsDir implements fs.FileInfo. func (g *GitFileInfo) IsDir() bool { return g.isDir } // ModTime implements fs.FileInfo. func (g *GitFileInfo) ModTime() time.Time { return g.modTime } // Mode implements fs.FileInfo. func (g *GitFileInfo) Mode() fs.FileMode { return g.mode } // Name implements fs.FileInfo. func (g *GitFileInfo) Name() string { return g.name } // Size implements fs.FileInfo. func (g *GitFileInfo) Size() int64 { return g.size } // Sys implements fs.FileInfo. func (g *GitFileInfo) Sys() any { return nil } func (c GoGitActionCache) GetTarArchive(ctx context.Context, cacheDir, sha, includePrefix string) (io.ReadCloser, error) { logger := common.Logger(ctx) gitPath := path.Join(c.Path, safeFilename(cacheDir)+".git") logger.Infof("GoGitActionCache get content %s with sha %s subpath '%s' at %s", cacheDir, sha, includePrefix, gitPath) gogitrepo, err := git.PlainOpen(gitPath) if err != nil { return nil, fmt.Errorf("GoGitActionCache failed to open bare git %s with sha %s subpath '%s' at %s: %w", cacheDir, sha, includePrefix, gitPath, err) } commit, err := gogitrepo.CommitObject(plumbing.NewHash(sha)) if err != nil { return nil, fmt.Errorf("GoGitActionCache failed to get commit %s with sha %s subpath '%s' at %s: %w", cacheDir, sha, includePrefix, gitPath, err) } t, err := commit.Tree() if err != nil { return nil, fmt.Errorf("GoGitActionCache failed to open git tree %s with sha %s subpath '%s' at %s: %w", cacheDir, sha, includePrefix, gitPath, err) } files, err := commit.Files() if err != nil { return nil, fmt.Errorf("GoGitActionCache failed to list files %s with sha %s subpath '%s' at %s: %w", cacheDir, sha, includePrefix, gitPath, err) } rpipe, wpipe := io.Pipe() // Interrupt io.Copy using ctx ch := make(chan int, 1) go func() { select { case <-ctx.Done(): wpipe.CloseWithError(ctx.Err()) case <-ch: } }() go func() { defer wpipe.Close() defer close(ch) tw := tar.NewWriter(wpipe) cleanIncludePrefix := path.Clean(includePrefix) wpipe.CloseWithError(files.ForEach(func(f *object.File) error { return actionCacheCopyFileOrDir(ctx, cleanIncludePrefix, t, tw, f.Name, f) })) }() return rpipe, err } func actionCacheCopyFileOrDir(ctx context.Context, cleanIncludePrefix string, t *object.Tree, tw *tar.Writer, origin string, f *object.File) error { if err := ctx.Err(); err != nil { return err } name := origin if strings.HasPrefix(name, cleanIncludePrefix+"/") { name = name[len(cleanIncludePrefix)+1:] } else if cleanIncludePrefix != "." && name != cleanIncludePrefix { return nil } fmode, err := f.Mode.ToOSFileMode() if err != nil { return err } if fmode&fs.ModeSymlink == fs.ModeSymlink { content, err := f.Contents() if err != nil { return err } destPath := path.Join(path.Dir(f.Name), content) subtree, err := t.Tree(destPath) if err == nil { return subtree.Files().ForEach(func(ft *object.File) error { return actionCacheCopyFileOrDir(ctx, cleanIncludePrefix, t, tw, origin+strings.TrimPrefix(ft.Name, f.Name), f) }) } f, err := t.File(destPath) if err != nil { return fmt.Errorf("%s (%s): %w", destPath, origin, err) } return actionCacheCopyFileOrDir(ctx, cleanIncludePrefix, t, tw, origin, f) } header, err := tar.FileInfoHeader(&GitFileInfo{ name: name, mode: fmode, size: f.Size, }, "") if err != nil { return err } err = tw.WriteHeader(header) if err != nil { return err } reader, err := f.Reader() if err != nil { return err } _, err = io.Copy(tw, reader) return err }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/action_cache_test.go
pkg/runner/action_cache_test.go
package runner import ( "archive/tar" "bytes" "context" "io" "os" "testing" "github.com/stretchr/testify/assert" ) //nolint:gosec func TestActionCache(t *testing.T) { a := assert.New(t) cache := &GoGitActionCache{ Path: os.TempDir(), } ctx := context.Background() cacheDir := "nektos/act-test-actions" repo := "https://github.com/nektos/act-test-actions" refs := []struct { Name string CacheDir string Repo string Ref string }{ { Name: "Fetch Branch Name", CacheDir: cacheDir, Repo: repo, Ref: "main", }, { Name: "Fetch Branch Name Absolutely", CacheDir: cacheDir, Repo: repo, Ref: "refs/heads/main", }, { Name: "Fetch HEAD", CacheDir: cacheDir, Repo: repo, Ref: "HEAD", }, { Name: "Fetch Sha", CacheDir: cacheDir, Repo: repo, Ref: "de984ca37e4df4cb9fd9256435a3b82c4a2662b1", }, } for _, c := range refs { t.Run(c.Name, func(_ *testing.T) { sha, err := cache.Fetch(ctx, c.CacheDir, c.Repo, c.Ref, "") if !a.NoError(err) || !a.NotEmpty(sha) { return } atar, err := cache.GetTarArchive(ctx, c.CacheDir, sha, "js") if !a.NoError(err) || !a.NotEmpty(atar) { return } mytar := tar.NewReader(atar) th, err := mytar.Next() if !a.NoError(err) || !a.NotEqual(0, th.Size) { return } buf := &bytes.Buffer{} // G110: Potential DoS vulnerability via decompression bomb (gosec) _, err = io.Copy(buf, mytar) a.NoError(err) str := buf.String() a.NotEmpty(str) }) } } func TestActionCacheFailures(t *testing.T) { a := assert.New(t) cache := &GoGitActionCache{ Path: os.TempDir(), } ctx := context.Background() cacheDir := "nektos/act-test-actions" repo := "https://github.com/nektos/act-test-actions-not-exist" repoExist := "https://github.com/nektos/act-test-actions" refs := []struct { Name string CacheDir string Repo string Ref string }{ { Name: "Fetch Branch Name", CacheDir: cacheDir, Repo: repo, Ref: "main", }, { Name: "Fetch Branch Name Absolutely", CacheDir: cacheDir, Repo: repo, Ref: "refs/heads/main", }, { Name: "Fetch HEAD", CacheDir: cacheDir, Repo: repo, Ref: "HEAD", }, { Name: "Fetch Sha", CacheDir: cacheDir, Repo: repo, Ref: "de984ca37e4df4cb9fd9256435a3b82c4a2662b1", }, { Name: "Fetch Branch Name no existing", CacheDir: cacheDir, Repo: repoExist, Ref: "main2", }, { Name: "Fetch Branch Name Absolutely no existing", CacheDir: cacheDir, Repo: repoExist, Ref: "refs/heads/main2", }, { Name: "Fetch Sha no existing", CacheDir: cacheDir, Repo: repoExist, Ref: "de984ca37e4df4cb9fd9256435a3b82c4a2662b2", }, } for _, c := range refs { t.Run(c.Name, func(t *testing.T) { _, err := cache.Fetch(ctx, c.CacheDir, c.Repo, c.Ref, "") t.Logf("%s\n", err) if !a.Error(err) { return } }) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/run_context.go
pkg/runner/run_context.go
package runner import ( "archive/tar" "bufio" "bytes" "context" "crypto/rand" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "path/filepath" "regexp" "runtime" "strconv" "strings" "time" "github.com/docker/go-connections/nat" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/container" "github.com/nektos/act/pkg/exprparser" "github.com/nektos/act/pkg/model" "github.com/opencontainers/selinux/go-selinux" ) // RunContext contains info about current job type RunContext struct { Name string Config *Config Matrix map[string]interface{} Run *model.Run EventJSON string Env map[string]string GlobalEnv map[string]string // to pass env changes of GITHUB_ENV and set-env correctly, due to dirty Env field ExtraPath []string CurrentStep string StepResults map[string]*model.StepResult IntraActionState map[string]map[string]string ExprEval ExpressionEvaluator JobContainer container.ExecutionsEnvironment ServiceContainers []container.ExecutionsEnvironment OutputMappings map[MappableOutput]MappableOutput JobName string ActionPath string Parent *RunContext Masks []string cleanUpJobContainer common.Executor caller *caller // job calling this RunContext (reusable workflows) Cancelled bool nodeToolFullPath string } func (rc *RunContext) AddMask(mask string) { rc.Masks = append(rc.Masks, mask) } type MappableOutput struct { StepID string OutputName string } func (rc *RunContext) String() string { name := fmt.Sprintf("%s/%s", rc.Run.Workflow.Name, rc.Name) if rc.caller != nil { // prefix the reusable workflow with the caller job // this is required to create unique container names name = fmt.Sprintf("%s/%s", rc.caller.runContext.Name, name) } return name } // GetEnv returns the env for the context func (rc *RunContext) GetEnv() map[string]string { if rc.Env == nil { rc.Env = map[string]string{} if rc.Run != nil && rc.Run.Workflow != nil && rc.Config != nil { job := rc.Run.Job() if job != nil { rc.Env = mergeMaps(rc.Run.Workflow.Env, job.Environment(), rc.Config.Env) } } } rc.Env["ACT"] = "true" return rc.Env } func (rc *RunContext) jobContainerName() string { return createContainerName("act", rc.String()) } // networkName return the name of the network which will be created by `act` automatically for job, // only create network if using a service container func (rc *RunContext) networkName() (string, bool) { if len(rc.Run.Job().Services) > 0 { return fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID), true } if rc.Config.ContainerNetworkMode == "" { return "host", false } return string(rc.Config.ContainerNetworkMode), false } func getDockerDaemonSocketMountPath(daemonPath string) string { if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 { scheme := daemonPath[:protoIndex] if strings.EqualFold(scheme, "npipe") { // linux container mount on windows, use the default socket path of the VM / wsl2 return "/var/run/docker.sock" } else if strings.EqualFold(scheme, "unix") { return daemonPath[protoIndex+3:] } else if strings.IndexFunc(scheme, func(r rune) bool { return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z') }) == -1 { // unknown protocol use default return "/var/run/docker.sock" } } return daemonPath } // Returns the binds and mounts for the container, resolving paths as appropriate func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) { name := rc.jobContainerName() if rc.Config.ContainerDaemonSocket == "" { rc.Config.ContainerDaemonSocket = "/var/run/docker.sock" } binds := []string{} if rc.Config.ContainerDaemonSocket != "-" { daemonPath := getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket) binds = append(binds, fmt.Sprintf("%s:%s", daemonPath, "/var/run/docker.sock")) } ext := container.LinuxContainerEnvironmentExtensions{} if hostEnv, ok := rc.JobContainer.(*container.HostEnvironment); ok { mounts := map[string]string{} // Permission issues? // binds = append(binds, hostEnv.ToolCache+":/opt/hostedtoolcache") binds = append(binds, hostEnv.GetActPath()+":"+ext.GetActPath()) binds = append(binds, hostEnv.ToContainerPath(rc.Config.Workdir)+":"+ext.ToContainerPath(rc.Config.Workdir)) return binds, mounts } mounts := map[string]string{ "act-toolcache": "/opt/hostedtoolcache", name + "-env": ext.GetActPath(), } if job := rc.Run.Job(); job != nil { if container := job.Container(); container != nil { for _, v := range container.Volumes { if !strings.Contains(v, ":") || filepath.IsAbs(v) { // Bind anonymous volume or host file. binds = append(binds, v) } else { // Mount existing volume. paths := strings.SplitN(v, ":", 2) mounts[paths[0]] = paths[1] } } } } if rc.Config.BindWorkdir { bindModifiers := "" if runtime.GOOS == "darwin" { bindModifiers = ":delegated" } if selinux.GetEnabled() { bindModifiers = ":z" } binds = append(binds, fmt.Sprintf("%s:%s%s", rc.Config.Workdir, ext.ToContainerPath(rc.Config.Workdir), bindModifiers)) } else { mounts[name] = ext.ToContainerPath(rc.Config.Workdir) } return binds, mounts } func (rc *RunContext) startHostEnvironment() common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) rawLogger := logger.WithField("raw_output", true) logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool { if rc.Config.LogOutput { rawLogger.Infof("%s", s) } else { rawLogger.Debugf("%s", s) } return true }) cacheDir := rc.ActionCacheDir() randBytes := make([]byte, 8) _, _ = rand.Read(randBytes) miscpath := filepath.Join(cacheDir, hex.EncodeToString(randBytes)) actPath := filepath.Join(miscpath, "act") if err := os.MkdirAll(actPath, 0o777); err != nil { return err } path := filepath.Join(miscpath, "hostexecutor") if err := os.MkdirAll(path, 0o777); err != nil { return err } runnerTmp := filepath.Join(miscpath, "tmp") if err := os.MkdirAll(runnerTmp, 0o777); err != nil { return err } toolCache := filepath.Join(cacheDir, "tool_cache") rc.JobContainer = &container.HostEnvironment{ Path: path, TmpDir: runnerTmp, ToolCache: toolCache, Workdir: rc.Config.Workdir, ActPath: actPath, CleanUp: func() { os.RemoveAll(miscpath) }, StdOut: logWriter, } rc.cleanUpJobContainer = rc.JobContainer.Remove() for k, v := range rc.JobContainer.GetRunnerContext(ctx) { if v, ok := v.(string); ok { rc.Env[fmt.Sprintf("RUNNER_%s", strings.ToUpper(k))] = v } } for _, env := range os.Environ() { if k, v, ok := strings.Cut(env, "="); ok { // don't override if _, ok := rc.Env[k]; !ok { rc.Env[k] = v } } } return common.NewPipelineExecutor( rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{ Name: "workflow/event.json", Mode: 0o644, Body: rc.EventJSON, }, &container.FileEntry{ Name: "workflow/envs.txt", Mode: 0o666, Body: "", }), )(ctx) } } func (rc *RunContext) startJobContainer() common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) image := rc.platformImage(ctx) rawLogger := logger.WithField("raw_output", true) logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool { if rc.Config.LogOutput { rawLogger.Infof("%s", s) } else { rawLogger.Debugf("%s", s) } return true }) username, password, err := rc.handleCredentials(ctx) if err != nil { return fmt.Errorf("failed to handle credentials: %s", err) } logger.Infof("\U0001f680 Start image=%s", image) name := rc.jobContainerName() envList := make([]string, 0) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TOOL_CACHE", "/opt/hostedtoolcache")) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_OS", "Linux")) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx))) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp")) envList = append(envList, fmt.Sprintf("%s=%s", "LANG", "C.UTF-8")) // Use same locale as GitHub Actions ext := container.LinuxContainerEnvironmentExtensions{} binds, mounts := rc.GetBindsAndMounts() // specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>) // if using service containers, will create a new network for the containers. // and it will be removed after at last. networkName, createAndDeleteNetwork := rc.networkName() // add service containers for serviceID, spec := range rc.Run.Job().Services { // interpolate env interpolatedEnvs := make(map[string]string, len(spec.Env)) for k, v := range spec.Env { interpolatedEnvs[k] = rc.ExprEval.Interpolate(ctx, v) } envs := make([]string, 0, len(interpolatedEnvs)) for k, v := range interpolatedEnvs { envs = append(envs, fmt.Sprintf("%s=%s", k, v)) } username, password, err = rc.handleServiceCredentials(ctx, spec.Credentials) if err != nil { return fmt.Errorf("failed to handle service %s credentials: %w", serviceID, err) } interpolatedVolumes := make([]string, 0, len(spec.Volumes)) for _, volume := range spec.Volumes { interpolatedVolumes = append(interpolatedVolumes, rc.ExprEval.Interpolate(ctx, volume)) } serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(interpolatedVolumes) interpolatedPorts := make([]string, 0, len(spec.Ports)) for _, port := range spec.Ports { interpolatedPorts = append(interpolatedPorts, rc.ExprEval.Interpolate(ctx, port)) } exposedPorts, portBindings, err := nat.ParsePortSpecs(interpolatedPorts) if err != nil { return fmt.Errorf("failed to parse service %s ports: %w", serviceID, err) } imageName := rc.ExprEval.Interpolate(ctx, spec.Image) if imageName == "" { logger.Infof("The service '%s' will not be started because the container definition has an empty image.", serviceID) continue } serviceContainerName := createContainerName(rc.jobContainerName(), serviceID) c := container.NewContainer(&container.NewContainerInput{ Name: serviceContainerName, WorkingDir: ext.ToContainerPath(rc.Config.Workdir), Image: imageName, Username: username, Password: password, Env: envs, Mounts: serviceMounts, Binds: serviceBinds, Stdout: logWriter, Stderr: logWriter, Privileged: rc.Config.Privileged, UsernsMode: rc.Config.UsernsMode, Platform: rc.Config.ContainerArchitecture, Options: rc.ExprEval.Interpolate(ctx, spec.Options), NetworkMode: networkName, NetworkAliases: []string{serviceID}, ExposedPorts: exposedPorts, PortBindings: portBindings, }) rc.ServiceContainers = append(rc.ServiceContainers, c) } rc.cleanUpJobContainer = func(ctx context.Context) error { reuseJobContainer := func(_ context.Context) bool { return rc.Config.ReuseContainers } if rc.JobContainer != nil { return rc.JobContainer.Remove().IfNot(reuseJobContainer). Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).IfNot(reuseJobContainer). Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false)).IfNot(reuseJobContainer). Then(func(ctx context.Context) error { if len(rc.ServiceContainers) > 0 { logger.Infof("Cleaning up services for job %s", rc.JobName) if err := rc.stopServiceContainers()(ctx); err != nil { logger.Errorf("Error while cleaning services: %v", err) } if createAndDeleteNetwork { // clean network if it has been created by act // if using service containers // it means that the network to which containers are connecting is created by `act_runner`, // so, we should remove the network at last. logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName) if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil { logger.Errorf("Error while cleaning network: %v", err) } } } return nil })(ctx) } return nil } jobContainerNetwork := rc.Config.ContainerNetworkMode.NetworkName() if rc.containerImage(ctx) != "" { jobContainerNetwork = networkName } else if jobContainerNetwork == "" { jobContainerNetwork = "host" } rc.JobContainer = container.NewContainer(&container.NewContainerInput{ Cmd: nil, Entrypoint: []string{"tail", "-f", "/dev/null"}, WorkingDir: ext.ToContainerPath(rc.Config.Workdir), Image: image, Username: username, Password: password, Name: name, Env: envList, Mounts: mounts, NetworkMode: jobContainerNetwork, NetworkAliases: []string{rc.Name}, Binds: binds, Stdout: logWriter, Stderr: logWriter, Privileged: rc.Config.Privileged, UsernsMode: rc.Config.UsernsMode, Platform: rc.Config.ContainerArchitecture, Options: rc.options(ctx), }) if rc.JobContainer == nil { return errors.New("Failed to create job container") } return common.NewPipelineExecutor( rc.pullServicesImages(rc.Config.ForcePull), rc.JobContainer.Pull(rc.Config.ForcePull), rc.stopJobContainer(), container.NewDockerNetworkCreateExecutor(networkName).IfBool(createAndDeleteNetwork), rc.startServiceContainers(networkName), rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop), rc.JobContainer.Start(false), rc.JobContainer.Copy(rc.JobContainer.GetActPath()+"/", &container.FileEntry{ Name: "workflow/event.json", Mode: 0o644, Body: rc.EventJSON, }, &container.FileEntry{ Name: "workflow/envs.txt", Mode: 0o666, Body: "", }), rc.waitForServiceContainers(), )(ctx) } } func (rc *RunContext) execJobContainer(cmd []string, env map[string]string, user, workdir string) common.Executor { return func(ctx context.Context) error { return rc.JobContainer.Exec(cmd, env, user, workdir)(ctx) } } func (rc *RunContext) InitializeNodeTool() common.Executor { return func(ctx context.Context) error { ctx, cancel := common.EarlyCancelContext(ctx) defer cancel() rc.GetNodeToolFullPath(ctx) return nil } } func (rc *RunContext) GetNodeToolFullPath(ctx context.Context) string { if rc.nodeToolFullPath == "" { timeed, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() path := rc.JobContainer.GetPathVariableName() cenv := map[string]string{} var cpath string if err := rc.JobContainer.UpdateFromImageEnv(&cenv)(ctx); err == nil { if p, ok := cenv[path]; ok { cpath = p } } if len(cpath) == 0 { cpath = rc.JobContainer.DefaultPathVariable() } cenv[path] = cpath hout := &bytes.Buffer{} herr := &bytes.Buffer{} stdout, stderr := rc.JobContainer.ReplaceLogWriter(hout, herr) err := rc.execJobContainer([]string{"node", "--no-warnings", "-e", "console.log(process.execPath)"}, cenv, "", ""). Finally(func(context.Context) error { rc.JobContainer.ReplaceLogWriter(stdout, stderr) return nil })(timeed) rawStr := strings.Trim(hout.String(), "\r\n") if err == nil && !strings.ContainsAny(rawStr, "\r\n") { rc.nodeToolFullPath = rawStr } else { rc.nodeToolFullPath = "node" } } return rc.nodeToolFullPath } func (rc *RunContext) ApplyExtraPath(ctx context.Context, env *map[string]string) { if len(rc.ExtraPath) > 0 { path := rc.JobContainer.GetPathVariableName() if rc.JobContainer.IsEnvironmentCaseInsensitive() { // On windows system Path and PATH could also be in the map for k := range *env { if strings.EqualFold(path, k) { path = k break } } } if (*env)[path] == "" { cenv := map[string]string{} var cpath string if err := rc.JobContainer.UpdateFromImageEnv(&cenv)(ctx); err == nil { if p, ok := cenv[path]; ok { cpath = p } } if len(cpath) == 0 { cpath = rc.JobContainer.DefaultPathVariable() } (*env)[path] = cpath } (*env)[path] = rc.JobContainer.JoinPathVariable(append(rc.ExtraPath, (*env)[path])...) } } func (rc *RunContext) UpdateExtraPath(ctx context.Context, githubEnvPath string) error { if common.Dryrun(ctx) { return nil } pathTar, err := rc.JobContainer.GetContainerArchive(ctx, githubEnvPath) if err != nil { return err } defer pathTar.Close() reader := tar.NewReader(pathTar) _, err = reader.Next() if err != nil && err != io.EOF { return err } s := bufio.NewScanner(reader) s.Buffer(nil, 1024*1024*1024) // increase buffer to 1GB to avoid scanner buffer overflow firstLine := true for s.Scan() { line := s.Text() if firstLine { firstLine = false // skip utf8 bom, powershell 5 legacy uses it for utf8 if len(line) >= 3 && line[0] == 239 && line[1] == 187 && line[2] == 191 { line = line[3:] } } if len(line) > 0 { rc.addPath(ctx, line) } } return s.Err() } // stopJobContainer removes the job container (if it exists) and its volume (if it exists) func (rc *RunContext) stopJobContainer() common.Executor { return func(ctx context.Context) error { if rc.cleanUpJobContainer != nil { return rc.cleanUpJobContainer(ctx) } return nil } } func (rc *RunContext) pullServicesImages(forcePull bool) common.Executor { return func(ctx context.Context) error { execs := []common.Executor{} for _, c := range rc.ServiceContainers { execs = append(execs, c.Pull(forcePull)) } return common.NewParallelExecutor(len(execs), execs...)(ctx) } } func (rc *RunContext) startServiceContainers(_ string) common.Executor { return func(ctx context.Context) error { execs := []common.Executor{} for _, c := range rc.ServiceContainers { execs = append(execs, common.NewPipelineExecutor( c.Pull(false), c.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop), c.Start(false), )) } return common.NewParallelExecutor(len(execs), execs...)(ctx) } } func (rc *RunContext) waitForServiceContainer(c container.ExecutionsEnvironment) common.Executor { return func(ctx context.Context) error { sctx, cancel := context.WithTimeout(ctx, time.Minute*5) defer cancel() health := container.HealthStarting delay := time.Second for i := 0; ; i++ { health = c.GetHealth(sctx) if health != container.HealthStarting || i > 30 { break } time.Sleep(delay) delay *= 2 if delay > 10*time.Second { delay = 10 * time.Second } } if health == container.HealthHealthy { return nil } return fmt.Errorf("service container failed to start") } } func (rc *RunContext) waitForServiceContainers() common.Executor { return func(ctx context.Context) error { execs := []common.Executor{} for _, c := range rc.ServiceContainers { execs = append(execs, rc.waitForServiceContainer(c)) } return common.NewParallelExecutor(len(execs), execs...)(ctx) } } func (rc *RunContext) stopServiceContainers() common.Executor { return func(ctx context.Context) error { execs := []common.Executor{} for _, c := range rc.ServiceContainers { execs = append(execs, c.Remove().Finally(c.Close())) } return common.NewParallelExecutor(len(execs), execs...)(ctx) } } // Prepare the mounts and binds for the worker // ActionCacheDir is for rc func (rc *RunContext) ActionCacheDir() string { if rc.Config.ActionCacheDir != "" { return rc.Config.ActionCacheDir } var xdgCache string var ok bool if xdgCache, ok = os.LookupEnv("XDG_CACHE_HOME"); !ok || xdgCache == "" { if home, err := os.UserHomeDir(); err == nil { xdgCache = filepath.Join(home, ".cache") } else if xdgCache, err = filepath.Abs("."); err != nil { // It's almost impossible to get here, so the temp dir is a good fallback xdgCache = os.TempDir() } } return filepath.Join(xdgCache, "act") } // Interpolate outputs after a job is done func (rc *RunContext) interpolateOutputs() common.Executor { return func(ctx context.Context) error { ee := rc.NewExpressionEvaluator(ctx) for k, v := range rc.Run.Job().Outputs { interpolated := ee.Interpolate(ctx, v) if v != interpolated { rc.Run.Job().Outputs[k] = interpolated } } return nil } } func (rc *RunContext) startContainer() common.Executor { return func(ctx context.Context) error { ctx, cancel := common.EarlyCancelContext(ctx) defer cancel() if rc.IsHostEnv(ctx) { return rc.startHostEnvironment()(ctx) } return rc.startJobContainer()(ctx) } } func (rc *RunContext) IsHostEnv(ctx context.Context) bool { platform := rc.runsOnImage(ctx) image := rc.containerImage(ctx) return image == "" && strings.EqualFold(platform, "-self-hosted") } func (rc *RunContext) stopContainer() common.Executor { return rc.stopJobContainer() } func (rc *RunContext) closeContainer() common.Executor { return func(ctx context.Context) error { if rc.JobContainer != nil { return rc.JobContainer.Close()(ctx) } return nil } } func (rc *RunContext) matrix() map[string]interface{} { return rc.Matrix } func (rc *RunContext) result(result string) { rc.Run.Job().Result = result } func (rc *RunContext) steps() []*model.Step { return rc.Run.Job().Steps } // Executor returns a pipeline executor for all the steps in the job func (rc *RunContext) Executor() (common.Executor, error) { var executor common.Executor var jobType, err = rc.Run.Job().Type() switch jobType { case model.JobTypeDefault: executor = newJobExecutor(rc, &stepFactoryImpl{}, rc) case model.JobTypeReusableWorkflowLocal: executor = newLocalReusableWorkflowExecutor(rc) case model.JobTypeReusableWorkflowRemote: executor = newRemoteReusableWorkflowExecutor(rc) case model.JobTypeInvalid: return nil, err } return func(ctx context.Context) error { res, err := rc.isEnabled(ctx) if err != nil { return err } if res { return executor(ctx) } return nil }, nil } func (rc *RunContext) containerImage(ctx context.Context) string { job := rc.Run.Job() c := job.Container() if c != nil { return rc.ExprEval.Interpolate(ctx, c.Image) } return "" } func (rc *RunContext) runsOnImage(ctx context.Context) string { if rc.Run.Job().RunsOn() == nil { common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String()) } for _, platformName := range rc.runsOnPlatformNames(ctx) { image := rc.Config.Platforms[strings.ToLower(platformName)] if image != "" { return image } } return "" } func (rc *RunContext) runsOnPlatformNames(ctx context.Context) []string { job := rc.Run.Job() if job.RunsOn() == nil { return []string{} } if err := rc.ExprEval.EvaluateYamlNode(ctx, &job.RawRunsOn); err != nil { common.Logger(ctx).Errorf("Error while evaluating runs-on: %v", err) return []string{} } return job.RunsOn() } func (rc *RunContext) platformImage(ctx context.Context) string { if containerImage := rc.containerImage(ctx); containerImage != "" { return containerImage } return rc.runsOnImage(ctx) } func (rc *RunContext) options(ctx context.Context) string { job := rc.Run.Job() c := job.Container() if c != nil { return rc.ExprEval.Interpolate(ctx, c.Options) } return rc.Config.ContainerOptions } func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) { job := rc.Run.Job() l := common.Logger(ctx) runJob, runJobErr := EvalBool(ctx, rc.ExprEval, job.If.Value, exprparser.DefaultStatusCheckSuccess) jobType, jobTypeErr := job.Type() if runJobErr != nil { return false, fmt.Errorf(" \u274C Error in if-expression: \"if: %s\" (%s)", job.If.Value, runJobErr) } if jobType == model.JobTypeInvalid { return false, jobTypeErr } if !runJob { rc.result("skipped") l.WithField("jobResult", "skipped").Debugf("Skipping job '%s' due to '%s'", job.Name, job.If.Value) return false, nil } if jobType != model.JobTypeDefault { return true, nil } img := rc.platformImage(ctx) if img == "" { for _, platformName := range rc.runsOnPlatformNames(ctx) { l.Infof("\U0001F6A7 Skipping unsupported platform -- Try running with `-P %+v=...`", platformName) } return false, nil } return true, nil } func mergeMaps(maps ...map[string]string) map[string]string { rtnMap := make(map[string]string) for _, m := range maps { for k, v := range m { rtnMap[k] = v } } return rtnMap } func createContainerName(parts ...string) string { name := strings.Join(parts, "-") pattern := regexp.MustCompile("[^a-zA-Z0-9]") name = pattern.ReplaceAllString(name, "-") name = strings.ReplaceAll(name, "--", "-") hash := sha256.Sum256([]byte(name)) // SHA256 is 64 hex characters. So trim name to 63 characters to make room for the hash and separator trimmedName := strings.Trim(trimToLen(name, 63), "-") return fmt.Sprintf("%s-%x", trimmedName, hash) } func trimToLen(s string, l int) string { if l < 0 { l = 0 } if len(s) > l { return s[:l] } return s } func (rc *RunContext) getJobContext() *model.JobContext { jobStatus := "success" if rc.Cancelled { jobStatus = "cancelled" } else { for _, stepStatus := range rc.StepResults { if stepStatus.Conclusion == model.StepStatusFailure { jobStatus = "failure" break } } } return &model.JobContext{ Status: jobStatus, } } func (rc *RunContext) getStepsContext() map[string]*model.StepResult { return rc.StepResults } func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext { logger := common.Logger(ctx) ghc := &model.GithubContext{ Event: make(map[string]interface{}), Workflow: rc.Run.Workflow.Name, RunAttempt: rc.Config.Env["GITHUB_RUN_ATTEMPT"], RunID: rc.Config.Env["GITHUB_RUN_ID"], RunNumber: rc.Config.Env["GITHUB_RUN_NUMBER"], Actor: rc.Config.Actor, EventName: rc.Config.EventName, Action: rc.CurrentStep, Token: rc.Config.Token, Job: rc.Run.JobID, ActionPath: rc.ActionPath, ActionRepository: rc.Env["GITHUB_ACTION_REPOSITORY"], ActionRef: rc.Env["GITHUB_ACTION_REF"], RepositoryOwner: rc.Config.Env["GITHUB_REPOSITORY_OWNER"], RetentionDays: rc.Config.Env["GITHUB_RETENTION_DAYS"], RunnerPerflog: rc.Config.Env["RUNNER_PERFLOG"], RunnerTrackingID: rc.Config.Env["RUNNER_TRACKING_ID"], Repository: rc.Config.Env["GITHUB_REPOSITORY"], Ref: rc.Config.Env["GITHUB_REF"], Sha: rc.Config.Env["SHA_REF"], RefName: rc.Config.Env["GITHUB_REF_NAME"], RefType: rc.Config.Env["GITHUB_REF_TYPE"], BaseRef: rc.Config.Env["GITHUB_BASE_REF"], HeadRef: rc.Config.Env["GITHUB_HEAD_REF"], Workspace: rc.Config.Env["GITHUB_WORKSPACE"], } if rc.JobContainer != nil { ghc.EventPath = rc.JobContainer.GetActPath() + "/workflow/event.json" ghc.Workspace = rc.JobContainer.ToContainerPath(rc.Config.Workdir) } if ghc.RunAttempt == "" { ghc.RunAttempt = "1" } if ghc.RunID == "" { ghc.RunID = "1" } if ghc.RunNumber == "" { ghc.RunNumber = "1" } if ghc.RetentionDays == "" { ghc.RetentionDays = "0" } if ghc.RunnerPerflog == "" { ghc.RunnerPerflog = "/dev/null" } // Backwards compatibility for configs that require // a default rather than being run as a cmd if ghc.Actor == "" { ghc.Actor = "nektos/act" } if rc.EventJSON != "" { err := json.Unmarshal([]byte(rc.EventJSON), &ghc.Event) if err != nil { logger.Errorf("Unable to Unmarshal event '%s': %v", rc.EventJSON, err) } } ghc.SetBaseAndHeadRef() repoPath := rc.Config.Workdir ghc.SetRepositoryAndOwner(ctx, rc.Config.GitHubInstance, rc.Config.RemoteName, repoPath) if ghc.Ref == "" { ghc.SetRef(ctx, rc.Config.DefaultBranch, repoPath) } if ghc.Sha == "" { ghc.SetSha(ctx, repoPath) } ghc.SetRefTypeAndName() // defaults ghc.ServerURL = "https://github.com" ghc.APIURL = "https://api.github.com" ghc.GraphQLURL = "https://api.github.com/graphql" // per GHES if rc.Config.GitHubInstance != "github.com" { ghc.ServerURL = fmt.Sprintf("https://%s", rc.Config.GitHubInstance) ghc.APIURL = fmt.Sprintf("https://%s/api/v3", rc.Config.GitHubInstance) ghc.GraphQLURL = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance) } // allow to be overridden by user if rc.Config.Env["GITHUB_SERVER_URL"] != "" { ghc.ServerURL = rc.Config.Env["GITHUB_SERVER_URL"] } if rc.Config.Env["GITHUB_API_URL"] != "" { ghc.APIURL = rc.Config.Env["GITHUB_API_URL"] } if rc.Config.Env["GITHUB_GRAPHQL_URL"] != "" { ghc.GraphQLURL = rc.Config.Env["GITHUB_GRAPHQL_URL"] } return ghc } func isLocalCheckout(ghc *model.GithubContext, step *model.Step) bool { if step.Type() == model.StepTypeInvalid { // This will be errored out by the executor later, we need this here to avoid a null panic though return false } if step.Type() != model.StepTypeUsesActionRemote { return false } remoteAction := newRemoteAction(step.Uses) if remoteAction == nil { // IsCheckout() will nil panic if we dont bail out early return false } if !remoteAction.IsCheckout() { return false } if repository, ok := step.With["repository"]; ok && repository != ghc.Repository { return false } if repository, ok := step.With["ref"]; ok && repository != ghc.Ref { return false } return true } func nestedMapLookup(m map[string]interface{}, ks ...string) (rval interface{}) { var ok bool if len(ks) == 0 { // degenerate input return nil } if rval, ok = m[ks[0]]; !ok { return nil } else if len(ks) == 1 { // we've reached the final key return rval } else if m, ok = rval.(map[string]interface{}); !ok { return nil } // 1+ more keys return nestedMapLookup(m, ks[1:]...) } func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubContext, env map[string]string) map[string]string { env["CI"] = "true" env["GITHUB_WORKFLOW"] = github.Workflow env["GITHUB_RUN_ATTEMPT"] = github.RunAttempt env["GITHUB_RUN_ID"] = github.RunID env["GITHUB_RUN_NUMBER"] = github.RunNumber env["GITHUB_ACTION"] = github.Action env["GITHUB_ACTION_PATH"] = github.ActionPath env["GITHUB_ACTION_REPOSITORY"] = github.ActionRepository env["GITHUB_ACTION_REF"] = github.ActionRef env["GITHUB_ACTIONS"] = "true" env["GITHUB_ACTOR"] = github.Actor env["GITHUB_REPOSITORY"] = github.Repository env["GITHUB_EVENT_NAME"] = github.EventName env["GITHUB_EVENT_PATH"] = github.EventPath env["GITHUB_WORKSPACE"] = github.Workspace env["GITHUB_SHA"] = github.Sha env["GITHUB_REF"] = github.Ref env["GITHUB_REF_NAME"] = github.RefName env["GITHUB_REF_TYPE"] = github.RefType env["GITHUB_JOB"] = github.Job env["GITHUB_REPOSITORY_OWNER"] = github.RepositoryOwner env["GITHUB_RETENTION_DAYS"] = github.RetentionDays env["RUNNER_PERFLOG"] = github.RunnerPerflog env["RUNNER_TRACKING_ID"] = github.RunnerTrackingID env["GITHUB_BASE_REF"] = github.BaseRef env["GITHUB_HEAD_REF"] = github.HeadRef env["GITHUB_SERVER_URL"] = github.ServerURL env["GITHUB_API_URL"] = github.APIURL env["GITHUB_GRAPHQL_URL"] = github.GraphQLURL if rc.Config.ArtifactServerPath != "" { setActionRuntimeVars(rc, env) } for _, platformName := range rc.runsOnPlatformNames(ctx) { if platformName != "" { if platformName == "ubuntu-latest" { // hardcode current ubuntu-latest since we have no way to check that 'on the fly' env["ImageOS"] = "ubuntu20" } else { platformName = strings.SplitN(strings.Replace(platformName, `-`, ``, 1), `.`, 2)[0] env["ImageOS"] = platformName } } } return env } func setActionRuntimeVars(rc *RunContext, env map[string]string) { actionsRuntimeURL := os.Getenv("ACTIONS_RUNTIME_URL") if actionsRuntimeURL == "" { actionsRuntimeURL = fmt.Sprintf("http://%s:%s/", rc.Config.ArtifactServerAddr, rc.Config.ArtifactServerPort) } env["ACTIONS_RUNTIME_URL"] = actionsRuntimeURL env["ACTIONS_RESULTS_URL"] = actionsRuntimeURL actionsRuntimeToken := os.Getenv("ACTIONS_RUNTIME_TOKEN") if actionsRuntimeToken == "" { runID := int64(1) if rid, ok := rc.Config.Env["GITHUB_RUN_ID"]; ok { runID, _ = strconv.ParseInt(rid, 10, 64) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
true
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/step_factory_test.go
pkg/runner/step_factory_test.go
package runner import ( "testing" "github.com/nektos/act/pkg/model" "github.com/stretchr/testify/assert" ) func TestStepFactoryNewStep(t *testing.T) { table := []struct { name string model *model.Step check func(s step) bool }{ { name: "StepRemoteAction", model: &model.Step{ Uses: "remote/action@v1", }, check: func(s step) bool { _, ok := s.(*stepActionRemote) return ok }, }, { name: "StepLocalAction", model: &model.Step{ Uses: "./action@v1", }, check: func(s step) bool { _, ok := s.(*stepActionLocal) return ok }, }, { name: "StepDocker", model: &model.Step{ Uses: "docker://image:tag", }, check: func(s step) bool { _, ok := s.(*stepDocker) return ok }, }, { name: "StepRun", model: &model.Step{ Run: "cmd", }, check: func(s step) bool { _, ok := s.(*stepRun) return ok }, }, } for _, tt := range table { t.Run(tt.name, func(t *testing.T) { sf := &stepFactoryImpl{} step, err := sf.newStep(tt.model, &RunContext{}) assert.True(t, tt.check((step))) assert.Nil(t, err) }) } } func TestStepFactoryInvalidStep(t *testing.T) { model := &model.Step{ Uses: "remote/action@v1", Run: "cmd", } sf := &stepFactoryImpl{} _, err := sf.newStep(model, &RunContext{}) assert.Error(t, err) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/step_action_remote.go
pkg/runner/step_action_remote.go
package runner import ( "archive/tar" "context" "errors" "fmt" "io" "os" "path" "path/filepath" "regexp" "strings" gogit "github.com/go-git/go-git/v5" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/common/git" "github.com/nektos/act/pkg/model" ) type stepActionRemote struct { Step *model.Step RunContext *RunContext compositeRunContext *RunContext compositeSteps *compositeSteps readAction readAction runAction runAction action *model.Action env map[string]string remoteAction *remoteAction cacheDir string resolvedSha string } var ( stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor ) func (sar *stepActionRemote) prepareActionExecutor() common.Executor { return func(ctx context.Context) error { if sar.remoteAction != nil && sar.action != nil { // we are already good to run return nil } sar.remoteAction = newRemoteAction(sar.Step.Uses) if sar.remoteAction == nil { return fmt.Errorf("Expected format {org}/{repo}[/path]@ref. Actual '%s' Input string was not in a correct format", sar.Step.Uses) } github := sar.getGithubContext(ctx) sar.remoteAction.URL = github.ServerURL if sar.remoteAction.IsCheckout() && isLocalCheckout(github, sar.Step) && !sar.RunContext.Config.NoSkipCheckout { common.Logger(ctx).Debugf("Skipping local actions/checkout because workdir was already copied") return nil } for _, action := range sar.RunContext.Config.ReplaceGheActionWithGithubCom { if strings.EqualFold(fmt.Sprintf("%s/%s", sar.remoteAction.Org, sar.remoteAction.Repo), action) { sar.remoteAction.URL = "https://github.com" github.Token = sar.RunContext.Config.ReplaceGheActionTokenWithGithubCom } } if sar.RunContext.Config.ActionCache != nil { cache := sar.RunContext.Config.ActionCache var err error sar.cacheDir = fmt.Sprintf("%s/%s", sar.remoteAction.Org, sar.remoteAction.Repo) repoURL := sar.remoteAction.URL + "/" + sar.cacheDir repoRef := sar.remoteAction.Ref sar.resolvedSha, err = cache.Fetch(ctx, sar.cacheDir, repoURL, repoRef, github.Token) if err != nil { return fmt.Errorf("failed to fetch \"%s\" version \"%s\": %w", repoURL, repoRef, err) } remoteReader := func(ctx context.Context) actionYamlReader { return func(filename string) (io.Reader, io.Closer, error) { spath := path.Join(sar.remoteAction.Path, filename) for i := 0; i < maxSymlinkDepth; i++ { tars, err := cache.GetTarArchive(ctx, sar.cacheDir, sar.resolvedSha, spath) if err != nil { return nil, nil, os.ErrNotExist } treader := tar.NewReader(tars) header, err := treader.Next() if err != nil { return nil, nil, os.ErrNotExist } if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink { spath, err = symlinkJoin(spath, header.Linkname, ".") if err != nil { return nil, nil, err } } else { return treader, tars, nil } } return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath) } } actionModel, err := sar.readAction(ctx, sar.Step, sar.resolvedSha, sar.remoteAction.Path, remoteReader(ctx), os.WriteFile) sar.action = actionModel return err } actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses)) gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{ URL: sar.remoteAction.CloneURL(), Ref: sar.remoteAction.Ref, Dir: actionDir, Token: github.Token, OfflineMode: sar.RunContext.Config.ActionOfflineMode, }) var ntErr common.Executor if err := gitClone(ctx); err != nil { if errors.Is(err, git.ErrShortRef) { return fmt.Errorf("Unable to resolve action `%s`, the provided ref `%s` is the shortened version of a commit SHA, which is not supported. Please use the full commit SHA `%s` instead", sar.Step.Uses, sar.remoteAction.Ref, err.(*git.Error).Commit()) } else if errors.Is(err, gogit.ErrForceNeeded) { // TODO: figure out if it will be easy to shadow/alias go-git err's ntErr = common.NewInfoExecutor("Non-terminating error while running 'git clone': %v", err) } else { return err } } remoteReader := func(_ context.Context) actionYamlReader { return func(filename string) (io.Reader, io.Closer, error) { f, err := os.Open(filepath.Join(actionDir, sar.remoteAction.Path, filename)) return f, f, err } } return common.NewPipelineExecutor( ntErr, func(ctx context.Context) error { actionModel, err := sar.readAction(ctx, sar.Step, actionDir, sar.remoteAction.Path, remoteReader(ctx), os.WriteFile) sar.action = actionModel return err }, )(ctx) } } func (sar *stepActionRemote) pre() common.Executor { sar.env = map[string]string{} return common.NewPipelineExecutor( sar.prepareActionExecutor(), runStepExecutor(sar, stepStagePre, runPreStep(sar)).If(hasPreStep(sar)).If(shouldRunPreStep(sar))) } func (sar *stepActionRemote) main() common.Executor { return common.NewPipelineExecutor( sar.prepareActionExecutor(), runStepExecutor(sar, stepStageMain, func(ctx context.Context) error { github := sar.getGithubContext(ctx) if sar.remoteAction.IsCheckout() && isLocalCheckout(github, sar.Step) && !sar.RunContext.Config.NoSkipCheckout { if sar.RunContext.Config.BindWorkdir { common.Logger(ctx).Debugf("Skipping local actions/checkout because you bound your workspace") return nil } eval := sar.RunContext.NewExpressionEvaluator(ctx) copyToPath := path.Join(sar.RunContext.JobContainer.ToContainerPath(sar.RunContext.Config.Workdir), eval.Interpolate(ctx, sar.Step.With["path"])) return sar.RunContext.JobContainer.CopyDir(copyToPath, sar.RunContext.Config.Workdir+string(filepath.Separator)+".", sar.RunContext.Config.UseGitIgnore)(ctx) } actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses)) return sar.runAction(sar, actionDir, sar.remoteAction)(ctx) }), ) } func (sar *stepActionRemote) post() common.Executor { return runStepExecutor(sar, stepStagePost, runPostStep(sar)).If(hasPostStep(sar)).If(shouldRunPostStep(sar)) } func (sar *stepActionRemote) getRunContext() *RunContext { return sar.RunContext } func (sar *stepActionRemote) getGithubContext(ctx context.Context) *model.GithubContext { ghc := sar.getRunContext().getGithubContext(ctx) // extend github context if we already have an initialized remoteAction remoteAction := sar.remoteAction if remoteAction != nil { ghc.ActionRepository = fmt.Sprintf("%s/%s", remoteAction.Org, remoteAction.Repo) ghc.ActionRef = remoteAction.Ref } return ghc } func (sar *stepActionRemote) getStepModel() *model.Step { return sar.Step } func (sar *stepActionRemote) getEnv() *map[string]string { return &sar.env } func (sar *stepActionRemote) getIfExpression(ctx context.Context, stage stepStage) string { switch stage { case stepStagePre: github := sar.getGithubContext(ctx) if sar.remoteAction.IsCheckout() && isLocalCheckout(github, sar.Step) && !sar.RunContext.Config.NoSkipCheckout { // skip local checkout pre step return "false" } return sar.action.Runs.PreIf case stepStageMain: return sar.Step.If.Value case stepStagePost: return sar.action.Runs.PostIf } return "" } func (sar *stepActionRemote) getActionModel() *model.Action { return sar.action } func (sar *stepActionRemote) getCompositeRunContext(ctx context.Context) *RunContext { if sar.compositeRunContext == nil { actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses)) actionLocation := path.Join(actionDir, sar.remoteAction.Path) _, containerActionDir := getContainerActionPaths(sar.getStepModel(), actionLocation, sar.RunContext) sar.compositeRunContext = newCompositeRunContext(ctx, sar.RunContext, sar, containerActionDir) sar.compositeSteps = sar.compositeRunContext.compositeExecutor(sar.action) } else { // Re-evaluate environment here. For remote actions the environment // need to be re-created for every stage (pre, main, post) as there // might be required context changes (inputs/outputs) while the action // stages are executed. (e.g. the output of another action is the // input for this action during the main stage, but the env // was already created during the pre stage) env := evaluateCompositeInputAndEnv(ctx, sar.RunContext, sar) sar.compositeRunContext.Env = env sar.compositeRunContext.ExtraPath = sar.RunContext.ExtraPath } return sar.compositeRunContext } func (sar *stepActionRemote) getCompositeSteps() *compositeSteps { return sar.compositeSteps } type remoteAction struct { URL string Org string Repo string Path string Ref string } func (ra *remoteAction) CloneURL() string { return fmt.Sprintf("%s/%s/%s", ra.URL, ra.Org, ra.Repo) } func (ra *remoteAction) IsCheckout() bool { if ra.Org == "actions" && ra.Repo == "checkout" { return true } return false } func newRemoteAction(action string) *remoteAction { // GitHub's document[^] describes: // > We strongly recommend that you include the version of // > the action you are using by specifying a Git ref, SHA, or Docker tag number. // Actually, the workflow stops if there is the uses directive that hasn't @ref. // [^]: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions r := regexp.MustCompile(`^([^/@]+)/([^/@]+)(/([^@]*))?(@(.*))?$`) matches := r.FindStringSubmatch(action) if len(matches) < 7 || matches[6] == "" { return nil } return &remoteAction{ Org: matches[1], Repo: matches[2], Path: matches[4], Ref: matches[6], URL: "https://github.com", } } func safeFilename(s string) string { return strings.NewReplacer( `<`, "-", `>`, "-", `:`, "-", `"`, "-", `/`, "-", `\`, "-", `|`, "-", `?`, "-", `*`, "-", ).Replace(s) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/action_test.go
pkg/runner/action_test.go
package runner import ( "context" "io" "io/fs" "strings" "testing" "github.com/nektos/act/pkg/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) type closerMock struct { mock.Mock } func (m *closerMock) Close() error { m.Called() return nil } func TestActionReader(t *testing.T) { yaml := strings.ReplaceAll(` name: 'name' runs: using: 'node16' main: 'main.js' `, "\t", " ") table := []struct { name string step *model.Step filename string fileContent string expected *model.Action }{ { name: "readActionYml", step: &model.Step{}, filename: "action.yml", fileContent: yaml, expected: &model.Action{ Name: "name", Runs: model.ActionRuns{ Using: "node16", Main: "main.js", PreIf: "always()", PostIf: "always()", }, }, }, { name: "readActionYaml", step: &model.Step{}, filename: "action.yaml", fileContent: yaml, expected: &model.Action{ Name: "name", Runs: model.ActionRuns{ Using: "node16", Main: "main.js", PreIf: "always()", PostIf: "always()", }, }, }, { name: "readDockerfile", step: &model.Step{}, filename: "Dockerfile", fileContent: "FROM ubuntu:20.04", expected: &model.Action{ Name: "(Synthetic)", Runs: model.ActionRuns{ Using: "docker", Image: "Dockerfile", }, }, }, { name: "readWithArgs", step: &model.Step{ With: map[string]string{ "args": "cmd", }, }, expected: &model.Action{ Name: "(Synthetic)", Inputs: map[string]model.Input{ "cwd": { Description: "(Actual working directory)", Required: false, Default: "actionDir/actionPath", }, "command": { Description: "(Actual program)", Required: false, Default: "cmd", }, }, Runs: model.ActionRuns{ Using: "node12", Main: "trampoline.js", }, }, }, } for _, tt := range table { t.Run(tt.name, func(t *testing.T) { closerMock := &closerMock{} readFile := func(filename string) (io.Reader, io.Closer, error) { if tt.filename != filename { return nil, nil, fs.ErrNotExist } return strings.NewReader(tt.fileContent), closerMock, nil } writeFile := func(filename string, _ []byte, perm fs.FileMode) error { assert.Equal(t, "actionDir/actionPath/trampoline.js", filename) assert.Equal(t, fs.FileMode(0400), perm) return nil } if tt.filename != "" { closerMock.On("Close") } action, err := readActionImpl(context.Background(), tt.step, "actionDir", "actionPath", readFile, writeFile) assert.Nil(t, err) assert.Equal(t, tt.expected, action) closerMock.AssertExpectations(t) }) } } func TestActionRunner(t *testing.T) { table := []struct { name string step actionStep expectedEnv map[string]string }{ { name: "with-input", step: &stepActionRemote{ Step: &model.Step{ Uses: "org/repo/path@ref", }, RunContext: &RunContext{ Config: &Config{}, Run: &model.Run{ JobID: "job", Workflow: &model.Workflow{ Jobs: map[string]*model.Job{ "job": { Name: "job", }, }, }, }, nodeToolFullPath: "node", }, action: &model.Action{ Inputs: map[string]model.Input{ "key": { Default: "default value", }, }, Runs: model.ActionRuns{ Using: "node16", }, }, env: map[string]string{}, }, expectedEnv: map[string]string{"INPUT_KEY": "default value"}, }, { name: "restore-saved-state", step: &stepActionRemote{ Step: &model.Step{ ID: "step", Uses: "org/repo/path@ref", }, RunContext: &RunContext{ ActionPath: "path", Config: &Config{}, Run: &model.Run{ JobID: "job", Workflow: &model.Workflow{ Jobs: map[string]*model.Job{ "job": { Name: "job", }, }, }, }, CurrentStep: "post-step", StepResults: map[string]*model.StepResult{ "step": {}, }, IntraActionState: map[string]map[string]string{ "step": { "name": "state value", }, }, nodeToolFullPath: "node", }, action: &model.Action{ Runs: model.ActionRuns{ Using: "node16", }, }, env: map[string]string{}, }, expectedEnv: map[string]string{"STATE_name": "state value"}, }, } for _, tt := range table { t.Run(tt.name, func(t *testing.T) { ctx := context.Background() cm := &containerMock{} cm.On("CopyDir", "/var/run/act/actions/dir/", "dir/", false).Return(func(_ context.Context) error { return nil }) envMatcher := mock.MatchedBy(func(env map[string]string) bool { for k, v := range tt.expectedEnv { if env[k] != v { return false } } return true }) cm.On("Exec", []string{"node", "/var/run/act/actions/dir/path"}, envMatcher, "", "").Return(func(_ context.Context) error { return nil }) tt.step.getRunContext().JobContainer = cm err := runActionImpl(tt.step, "dir", newRemoteAction("org/repo/path@ref"))(ctx) assert.Nil(t, err) cm.AssertExpectations(t) }) } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/runner_test.go
pkg/runner/runner_test.go
package runner import ( "bufio" "bytes" "context" "encoding/json" "fmt" "io" "os" "path" "path/filepath" "runtime" "strings" "testing" "github.com/joho/godotenv" "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" assert "github.com/stretchr/testify/assert" "gopkg.in/yaml.v3" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/model" ) var ( baseImage = "node:24-bookworm-slim" platforms map[string]string logLevel = log.DebugLevel workdir = "testdata" secrets map[string]string ) func init() { if p := os.Getenv("ACT_TEST_IMAGE"); p != "" { baseImage = p } platforms = map[string]string{ "ubuntu-latest": baseImage, "self-hosted": "-self-hosted", } if l := os.Getenv("ACT_TEST_LOG_LEVEL"); l != "" { if lvl, err := log.ParseLevel(l); err == nil { logLevel = lvl } } if wd, err := filepath.Abs(workdir); err == nil { workdir = wd } secrets = map[string]string{} } func TestNoWorkflowsFoundByPlanner(t *testing.T) { planner, err := model.NewWorkflowPlanner("res", true, false) assert.NoError(t, err) out := log.StandardLogger().Out var buf bytes.Buffer log.SetOutput(&buf) log.SetLevel(log.DebugLevel) plan, err := planner.PlanEvent("pull_request") assert.NotNil(t, plan) assert.NoError(t, err) assert.Contains(t, buf.String(), "no workflows found by planner") buf.Reset() plan, err = planner.PlanAll() assert.NotNil(t, plan) assert.NoError(t, err) assert.Contains(t, buf.String(), "no workflows found by planner") log.SetOutput(out) } func TestGraphMissingEvent(t *testing.T) { planner, err := model.NewWorkflowPlanner("testdata/issue-1595/no-event.yml", true, false) assert.NoError(t, err) out := log.StandardLogger().Out var buf bytes.Buffer log.SetOutput(&buf) log.SetLevel(log.DebugLevel) plan, err := planner.PlanEvent("push") assert.NoError(t, err) assert.NotNil(t, plan) assert.Equal(t, 0, len(plan.Stages)) assert.Contains(t, buf.String(), "no events found for workflow: no-event.yml") log.SetOutput(out) } func TestGraphMissingFirst(t *testing.T) { planner, err := model.NewWorkflowPlanner("testdata/issue-1595/no-first.yml", true, false) assert.NoError(t, err) plan, err := planner.PlanEvent("push") assert.EqualError(t, err, "unable to build dependency graph for no first (no-first.yml)") assert.NotNil(t, plan) assert.Equal(t, 0, len(plan.Stages)) } func TestGraphWithMissing(t *testing.T) { planner, err := model.NewWorkflowPlanner("testdata/issue-1595/missing.yml", true, false) assert.NoError(t, err) out := log.StandardLogger().Out var buf bytes.Buffer log.SetOutput(&buf) log.SetLevel(log.DebugLevel) plan, err := planner.PlanEvent("push") assert.NotNil(t, plan) assert.Equal(t, 0, len(plan.Stages)) assert.EqualError(t, err, "unable to build dependency graph for missing (missing.yml)") assert.Contains(t, buf.String(), "unable to build dependency graph for missing (missing.yml)") log.SetOutput(out) } func TestGraphWithSomeMissing(t *testing.T) { log.SetLevel(log.DebugLevel) planner, err := model.NewWorkflowPlanner("testdata/issue-1595/", true, false) assert.NoError(t, err) out := log.StandardLogger().Out var buf bytes.Buffer log.SetOutput(&buf) log.SetLevel(log.DebugLevel) plan, err := planner.PlanAll() assert.Error(t, err, "unable to build dependency graph for no first (no-first.yml)") assert.NotNil(t, plan) assert.Equal(t, 1, len(plan.Stages)) assert.Contains(t, buf.String(), "unable to build dependency graph for missing (missing.yml)") assert.Contains(t, buf.String(), "unable to build dependency graph for no first (no-first.yml)") log.SetOutput(out) } func TestGraphEvent(t *testing.T) { planner, err := model.NewWorkflowPlanner("testdata/basic", true, false) assert.NoError(t, err) plan, err := planner.PlanEvent("push") assert.NoError(t, err) assert.NotNil(t, plan) assert.NotNil(t, plan.Stages) assert.Equal(t, len(plan.Stages), 3, "stages") assert.Equal(t, len(plan.Stages[0].Runs), 1, "stage0.runs") assert.Equal(t, len(plan.Stages[1].Runs), 1, "stage1.runs") assert.Equal(t, len(plan.Stages[2].Runs), 1, "stage2.runs") assert.Equal(t, plan.Stages[0].Runs[0].JobID, "check", "jobid") assert.Equal(t, plan.Stages[1].Runs[0].JobID, "build", "jobid") assert.Equal(t, plan.Stages[2].Runs[0].JobID, "test", "jobid") plan, err = planner.PlanEvent("release") assert.NoError(t, err) assert.NotNil(t, plan) assert.Equal(t, 0, len(plan.Stages)) } type TestJobFileInfo struct { workdir string workflowPath string eventName string errorMessage string platforms map[string]string secrets map[string]string } func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config) { fmt.Printf("::group::%s\n", j.workflowPath) log.SetLevel(logLevel) workdir, err := filepath.Abs(j.workdir) assert.Nil(t, err, workdir) fullWorkflowPath := filepath.Join(workdir, j.workflowPath) runnerConfig := &Config{ Workdir: workdir, BindWorkdir: false, EventName: j.eventName, EventPath: cfg.EventPath, Platforms: j.platforms, ReuseContainers: false, Env: cfg.Env, Secrets: cfg.Secrets, Inputs: cfg.Inputs, GitHubInstance: "github.com", ContainerArchitecture: cfg.ContainerArchitecture, Matrix: cfg.Matrix, ActionCache: cfg.ActionCache, } runner, err := New(runnerConfig) assert.Nil(t, err, j.workflowPath) planner, err := model.NewWorkflowPlanner(fullWorkflowPath, true, false) if j.errorMessage != "" && err != nil { assert.Error(t, err, j.errorMessage) } else if assert.Nil(t, err, fullWorkflowPath) { plan, err := planner.PlanEvent(j.eventName) assert.True(t, (err == nil) != (plan == nil), "PlanEvent should return either a plan or an error") if err == nil && plan != nil { err = runner.NewPlanExecutor(plan)(ctx) if j.errorMessage == "" { assert.Nil(t, err, fullWorkflowPath) } else { assert.Error(t, err, j.errorMessage) } } } fmt.Println("::endgroup::") } type TestConfig struct { LocalRepositories map[string]string `yaml:"local-repositories"` } func TestRunEvent(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } ctx := context.Background() tables := []TestJobFileInfo{ // Shells {workdir, "shells/defaults", "push", "", platforms, secrets}, // TODO: figure out why it fails // {workdir, "shells/custom", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:pwsh-latest"}, }, // custom image with pwsh {workdir, "shells/pwsh", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:pwsh-latest"}, secrets}, // custom image with pwsh {workdir, "shells/bash", "push", "", platforms, secrets}, {workdir, "shells/python", "push", "", map[string]string{"ubuntu-latest": "node:16-buster"}, secrets}, // slim doesn't have python {workdir, "shells/sh", "push", "", platforms, secrets}, // Local action {workdir, "local-action-docker-url", "push", "", platforms, secrets}, {workdir, "local-action-dockerfile", "push", "", platforms, secrets}, {workdir, "local-action-via-composite-dockerfile", "push", "", platforms, secrets}, {workdir, "local-action-js", "push", "", platforms, secrets}, // Uses {workdir, "uses-composite", "push", "", platforms, secrets}, {workdir, "uses-composite-with-error", "push", "Job 'failing-composite-action' failed", platforms, secrets}, {workdir, "uses-composite-check-for-input-collision", "push", "", platforms, secrets}, {workdir, "uses-composite-check-for-input-shadowing", "push", "", platforms, secrets}, {workdir, "uses-nested-composite", "push", "", platforms, secrets}, {workdir, "remote-action-composite-js-pre-with-defaults", "push", "", platforms, secrets}, {workdir, "remote-action-composite-action-ref", "push", "", platforms, secrets}, {workdir, "uses-workflow", "push", "", platforms, map[string]string{"secret": "keep_it_private"}}, {workdir, "uses-workflow", "pull_request", "", platforms, map[string]string{"secret": "keep_it_private"}}, {workdir, "uses-docker-url", "push", "", platforms, secrets}, {workdir, "act-composite-env-test", "push", "", platforms, secrets}, // Eval {workdir, "evalmatrix", "push", "", platforms, secrets}, {workdir, "evalmatrixneeds", "push", "", platforms, secrets}, {workdir, "evalmatrixneeds2", "push", "", platforms, secrets}, {workdir, "evalmatrix-merge-map", "push", "", platforms, secrets}, {workdir, "evalmatrix-merge-array", "push", "", platforms, secrets}, {workdir, "issue-1195", "push", "", platforms, secrets}, {workdir, "basic", "push", "", platforms, secrets}, {workdir, "fail", "push", "exit with `FAILURE`: 1", platforms, secrets}, {workdir, "runs-on", "push", "", platforms, secrets}, {workdir, "checkout", "push", "", platforms, secrets}, {workdir, "job-container", "push", "", platforms, secrets}, {workdir, "job-container-non-root", "push", "", platforms, secrets}, {workdir, "job-container-invalid-credentials", "push", "failed to handle credentials: failed to interpolate container.credentials.password", platforms, secrets}, {workdir, "container-hostname", "push", "", platforms, secrets}, {workdir, "remote-action-docker", "push", "", platforms, secrets}, {workdir, "remote-action-docker-new-cache", "push", "", platforms, secrets}, {workdir, "remote-action-js", "push", "", platforms, secrets}, {workdir, "remote-action-js-node-user", "push", "", platforms, secrets}, // Test if this works with non root container {workdir, "matrix", "push", "", platforms, secrets}, {workdir, "matrix-include-exclude", "push", "", platforms, secrets}, {workdir, "matrix-exitcode", "push", "Job 'test' failed", platforms, secrets}, {workdir, "commands", "push", "", platforms, secrets}, {workdir, "workdir", "push", "", platforms, secrets}, {workdir, "defaults-run", "push", "", platforms, secrets}, {workdir, "composite-fail-with-output", "push", "", platforms, secrets}, {workdir, "issue-597", "push", "", platforms, secrets}, {workdir, "issue-598", "push", "", platforms, secrets}, {workdir, "if-env-act", "push", "", platforms, secrets}, {workdir, "env-and-path", "push", "", platforms, secrets}, {workdir, "environment-files", "push", "", platforms, secrets}, {workdir, "GITHUB_STATE", "push", "", platforms, secrets}, {workdir, "environment-files-parser-bug", "push", "", platforms, secrets}, {workdir, "non-existent-action", "push", "Job 'nopanic' failed", platforms, secrets}, {workdir, "outputs", "push", "", platforms, secrets}, {workdir, "networking", "push", "", platforms, secrets}, {workdir, "steps-context/conclusion", "push", "", platforms, secrets}, {workdir, "steps-context/outcome", "push", "", platforms, secrets}, {workdir, "job-status-check", "push", "job 'fail' failed", platforms, secrets}, {workdir, "if-expressions", "push", "Job 'mytest' failed", platforms, secrets}, {workdir, "actions-environment-and-context-tests", "push", "", platforms, secrets}, {workdir, "uses-action-with-pre-and-post-step", "push", "", platforms, secrets}, {workdir, "evalenv", "push", "", platforms, secrets}, {workdir, "docker-action-custom-path", "push", "", platforms, secrets}, {workdir, "GITHUB_ENV-use-in-env-ctx", "push", "", platforms, secrets}, {workdir, "ensure-post-steps", "push", "Job 'second-post-step-should-fail' failed", platforms, secrets}, {workdir, "workflow_call_inputs", "workflow_call", "", platforms, secrets}, {workdir, "workflow_dispatch", "workflow_dispatch", "", platforms, secrets}, {workdir, "workflow_dispatch_no_inputs_mapping", "workflow_dispatch", "", platforms, secrets}, {workdir, "workflow_dispatch-scalar", "workflow_dispatch", "", platforms, secrets}, {workdir, "workflow_dispatch-scalar-composite-action", "workflow_dispatch", "", platforms, secrets}, {workdir, "uses-workflow-defaults", "workflow_dispatch", "", platforms, secrets}, {workdir, "job-needs-context-contains-result", "push", "", platforms, secrets}, {"../model/testdata", "strategy", "push", "", platforms, secrets}, // TODO: move all testdata into pkg so we can validate it with planner and runner {"../model/testdata", "container-volumes", "push", "", platforms, secrets}, {workdir, "path-handling", "push", "", platforms, secrets}, {workdir, "do-not-leak-step-env-in-composite", "push", "", platforms, secrets}, {workdir, "set-env-step-env-override", "push", "", platforms, secrets}, {workdir, "set-env-new-env-file-per-step", "push", "", platforms, secrets}, {workdir, "no-panic-on-invalid-composite-action", "push", "jobs failed due to invalid action", platforms, secrets}, // GITHUB_STEP_SUMMARY {workdir, "stepsummary", "push", "", platforms, secrets}, // services {workdir, "services", "push", "", platforms, secrets}, {workdir, "services-empty-image", "push", "", platforms, secrets}, {workdir, "services-host-network", "push", "", platforms, secrets}, {workdir, "services-with-container", "push", "", platforms, secrets}, {workdir, "mysql-service-container-with-health-check", "push", "", platforms, secrets}, // local remote action overrides {workdir, "local-remote-action-overrides", "push", "", platforms, secrets}, // docker action on host executor {workdir, "docker-action-host-env", "push", "", platforms, secrets}, } for _, table := range tables { t.Run(table.workflowPath, func(t *testing.T) { config := &Config{ Secrets: table.secrets, } eventFile := filepath.Join(workdir, table.workflowPath, "event.json") if _, err := os.Stat(eventFile); err == nil { config.EventPath = eventFile } testConfigFile := filepath.Join(workdir, table.workflowPath, "config/config.yml") if file, err := os.ReadFile(testConfigFile); err == nil { testConfig := &TestConfig{} if yaml.Unmarshal(file, testConfig) == nil { if testConfig.LocalRepositories != nil { config.ActionCache = &LocalRepositoryCache{ Parent: GoGitActionCache{ path.Clean(path.Join(workdir, "cache")), }, LocalRepositories: testConfig.LocalRepositories, CacheDirCache: map[string]string{}, } } } } table.runTest(ctx, t, config) }) } } type captureJobLoggerFactory struct { buffer bytes.Buffer } func (factory *captureJobLoggerFactory) WithJobLogger() *logrus.Logger { logger := logrus.New() logger.SetOutput(&factory.buffer) logger.SetLevel(log.TraceLevel) logger.SetFormatter(&log.JSONFormatter{}) return logger } func TestPullAndPostStepFailureIsJobFailure(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } defCache := &GoGitActionCache{ path.Clean(path.Join(workdir, "cache")), } mockCache := &mockCache{} tables := []struct { TestJobFileInfo ActionCache ActionCache SetupResult string }{ {TestJobFileInfo{workdir, "checkout", "push", "pull failure", map[string]string{"ubuntu-latest": "localhost:0000/missing:latest"}, secrets}, defCache, "failure"}, {TestJobFileInfo{workdir, "post-step-failure-is-job-failure", "push", "post failure", map[string]string{"ubuntu-latest": "-self-hosted"}, secrets}, mockCache, "success"}, } for _, table := range tables { t.Run(table.workflowPath, func(t *testing.T) { factory := &captureJobLoggerFactory{} config := &Config{ Secrets: table.secrets, } eventFile := filepath.Join(workdir, table.workflowPath, "event.json") if _, err := os.Stat(eventFile); err == nil { config.EventPath = eventFile } config.ActionCache = table.ActionCache logger := logrus.New() logger.SetOutput(&factory.buffer) logger.SetLevel(log.TraceLevel) logger.SetFormatter(&log.JSONFormatter{}) table.runTest(common.WithLogger(WithJobLoggerFactory(t.Context(), factory), logger), t, config) scan := bufio.NewScanner(&factory.buffer) var hasJobResult, hasStepResult bool for scan.Scan() { t.Log(scan.Text()) entry := map[string]interface{}{} if json.Unmarshal(scan.Bytes(), &entry) == nil { if val, ok := entry["jobResult"]; ok { assert.Equal(t, "failure", val) hasJobResult = true } if val, ok := entry["stepResult"]; ok && !hasStepResult { assert.Equal(t, table.SetupResult, val) hasStepResult = true } } } assert.True(t, hasStepResult, "stepResult not found") assert.True(t, hasJobResult, "jobResult not found") }) } } type mockCache struct { } func (c mockCache) Fetch(ctx context.Context, cacheDir string, url string, ref string, token string) (string, error) { _ = ctx _ = cacheDir _ = url _ = ref _ = token return "", fmt.Errorf("fetch failure") } func (c mockCache) GetTarArchive(ctx context.Context, cacheDir string, sha string, includePrefix string) (io.ReadCloser, error) { _ = ctx _ = cacheDir _ = sha _ = includePrefix return nil, fmt.Errorf("fetch failure") } func TestFetchFailureIsJobFailure(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } tables := []TestJobFileInfo{ {workdir, "action-cache-v2-fetch-failure-is-job-error", "push", "fetch failure", map[string]string{"ubuntu-latest": "-self-hosted"}, secrets}, } for _, table := range tables { t.Run(table.workflowPath, func(t *testing.T) { factory := &captureJobLoggerFactory{} config := &Config{ Secrets: table.secrets, } eventFile := filepath.Join(workdir, table.workflowPath, "event.json") if _, err := os.Stat(eventFile); err == nil { config.EventPath = eventFile } config.ActionCache = &mockCache{} logger := logrus.New() logger.SetOutput(&factory.buffer) logger.SetLevel(log.TraceLevel) logger.SetFormatter(&log.JSONFormatter{}) table.runTest(common.WithLogger(WithJobLoggerFactory(t.Context(), factory), logger), t, config) scan := bufio.NewScanner(&factory.buffer) var hasJobResult bool for scan.Scan() { t.Log(scan.Text()) entry := map[string]interface{}{} if json.Unmarshal(scan.Bytes(), &entry) == nil { if val, ok := entry["jobResult"]; ok { assert.Equal(t, "failure", val) hasJobResult = true } } } assert.True(t, hasJobResult, "jobResult not found") }) } } func TestRunEventHostEnvironment(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } ctx := context.Background() tables := []TestJobFileInfo{} if runtime.GOOS == "linux" { platforms := map[string]string{ "ubuntu-latest": "-self-hosted", } tables = append(tables, []TestJobFileInfo{ // Shells {workdir, "shells/defaults", "push", "", platforms, secrets}, {workdir, "shells/pwsh", "push", "", platforms, secrets}, {workdir, "shells/bash", "push", "", platforms, secrets}, {workdir, "shells/python", "push", "", platforms, secrets}, {workdir, "shells/sh", "push", "", platforms, secrets}, // Local action {workdir, "local-action-js", "push", "", platforms, secrets}, // Uses {workdir, "uses-composite", "push", "", platforms, secrets}, {workdir, "uses-composite-with-error", "push", "Job 'failing-composite-action' failed", platforms, secrets}, {workdir, "uses-nested-composite", "push", "", platforms, secrets}, {workdir, "act-composite-env-test", "push", "", platforms, secrets}, // Eval {workdir, "evalmatrix", "push", "", platforms, secrets}, {workdir, "evalmatrixneeds", "push", "", platforms, secrets}, {workdir, "evalmatrixneeds2", "push", "", platforms, secrets}, {workdir, "evalmatrix-merge-map", "push", "", platforms, secrets}, {workdir, "evalmatrix-merge-array", "push", "", platforms, secrets}, {workdir, "issue-1195", "push", "", platforms, secrets}, {workdir, "fail", "push", "exit with `FAILURE`: 1", platforms, secrets}, {workdir, "runs-on", "push", "", platforms, secrets}, {workdir, "checkout", "push", "", platforms, secrets}, {workdir, "remote-action-js", "push", "", platforms, secrets}, {workdir, "matrix", "push", "", platforms, secrets}, {workdir, "matrix-include-exclude", "push", "", platforms, secrets}, {workdir, "commands", "push", "", platforms, secrets}, // Disabled for now because this test is somewhat invalid // shell sh is not necessarily bash if the job has no override // {workdir, "defaults-run", "push", "", platforms, secrets}, {workdir, "composite-fail-with-output", "push", "", platforms, secrets}, {workdir, "issue-597", "push", "", platforms, secrets}, {workdir, "issue-598", "push", "", platforms, secrets}, {workdir, "if-env-act", "push", "", platforms, secrets}, {workdir, "env-and-path", "push", "", platforms, secrets}, {workdir, "non-existent-action", "push", "Job 'nopanic' failed", platforms, secrets}, {workdir, "outputs", "push", "", platforms, secrets}, {workdir, "steps-context/conclusion", "push", "", platforms, secrets}, {workdir, "steps-context/outcome", "push", "", platforms, secrets}, {workdir, "job-status-check", "push", "job 'fail' failed", platforms, secrets}, {workdir, "if-expressions", "push", "Job 'mytest' failed", platforms, secrets}, {workdir, "uses-action-with-pre-and-post-step", "push", "", platforms, secrets}, {workdir, "evalenv", "push", "", platforms, secrets}, {workdir, "ensure-post-steps", "push", "Job 'second-post-step-should-fail' failed", platforms, secrets}, }...) } if runtime.GOOS == "windows" { platforms := map[string]string{ "windows-latest": "-self-hosted", } tables = append(tables, []TestJobFileInfo{ {workdir, "windows-prepend-path", "push", "", platforms, secrets}, {workdir, "windows-add-env", "push", "", platforms, secrets}, {workdir, "windows-prepend-path-powershell-5", "push", "", platforms, secrets}, {workdir, "windows-add-env-powershell-5", "push", "", platforms, secrets}, {workdir, "windows-shell-cmd", "push", "", platforms, secrets}, }...) } else { platforms := map[string]string{ "self-hosted": "-self-hosted", "ubuntu-latest": "-self-hosted", } tables = append(tables, []TestJobFileInfo{ {workdir, "nix-prepend-path", "push", "", platforms, secrets}, {workdir, "inputs-via-env-context", "push", "", platforms, secrets}, {workdir, "do-not-leak-step-env-in-composite", "push", "", platforms, secrets}, {workdir, "set-env-step-env-override", "push", "", platforms, secrets}, {workdir, "set-env-new-env-file-per-step", "push", "", platforms, secrets}, {workdir, "no-panic-on-invalid-composite-action", "push", "jobs failed due to invalid action", platforms, secrets}, }...) } for _, table := range tables { t.Run(table.workflowPath, func(t *testing.T) { table.runTest(ctx, t, &Config{}) }) } } func TestDryrunEvent(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } ctx := common.WithDryrun(context.Background(), true) tables := []TestJobFileInfo{ // Shells {workdir, "shells/defaults", "push", "", platforms, secrets}, {workdir, "shells/pwsh", "push", "", map[string]string{"ubuntu-latest": "catthehacker/ubuntu:pwsh-latest"}, secrets}, // custom image with pwsh {workdir, "shells/bash", "push", "", platforms, secrets}, {workdir, "shells/python", "push", "", map[string]string{"ubuntu-latest": "node:16-buster"}, secrets}, // slim doesn't have python {workdir, "shells/sh", "push", "", platforms, secrets}, // Local action {workdir, "local-action-docker-url", "push", "", platforms, secrets}, {workdir, "local-action-dockerfile", "push", "", platforms, secrets}, {workdir, "local-action-via-composite-dockerfile", "push", "", platforms, secrets}, {workdir, "local-action-js", "push", "", platforms, secrets}, } for _, table := range tables { t.Run(table.workflowPath, func(t *testing.T) { table.runTest(ctx, t, &Config{}) }) } } func TestDockerActionForcePullForceRebuild(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } ctx := context.Background() config := &Config{ ForcePull: true, ForceRebuild: true, } tables := []TestJobFileInfo{ {workdir, "local-action-dockerfile", "push", "", platforms, secrets}, {workdir, "local-action-via-composite-dockerfile", "push", "", platforms, secrets}, } for _, table := range tables { t.Run(table.workflowPath, func(t *testing.T) { table.runTest(ctx, t, config) }) } } func TestRunDifferentArchitecture(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } tjfi := TestJobFileInfo{ workdir: workdir, workflowPath: "basic", eventName: "push", errorMessage: "", platforms: platforms, } tjfi.runTest(context.Background(), t, &Config{ContainerArchitecture: "linux/arm64"}) } type maskJobLoggerFactory struct { Output bytes.Buffer } func (f *maskJobLoggerFactory) WithJobLogger() *log.Logger { logger := log.New() logger.SetOutput(io.MultiWriter(&f.Output, os.Stdout)) logger.SetLevel(log.DebugLevel) return logger } func TestMaskValues(t *testing.T) { assertNoSecret := func(text string, _ string) { index := strings.Index(text, "composite secret") if index > -1 { fmt.Printf("\nFound Secret in the given text:\n%s\n", text) } assert.False(t, strings.Contains(text, "composite secret")) } if testing.Short() { t.Skip("skipping integration test") } log.SetLevel(log.DebugLevel) tjfi := TestJobFileInfo{ workdir: workdir, workflowPath: "mask-values", eventName: "push", errorMessage: "", platforms: platforms, } logger := &maskJobLoggerFactory{} tjfi.runTest(WithJobLoggerFactory(common.WithLogger(context.Background(), logger.WithJobLogger()), logger), t, &Config{}) output := logger.Output.String() assertNoSecret(output, "secret value") assertNoSecret(output, "YWJjCg==") } func TestRunEventSecrets(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } workflowPath := "secrets" tjfi := TestJobFileInfo{ workdir: workdir, workflowPath: workflowPath, eventName: "push", errorMessage: "", platforms: platforms, } env, err := godotenv.Read(filepath.Join(workdir, workflowPath, ".env")) assert.NoError(t, err, "Failed to read .env") secrets, _ := godotenv.Read(filepath.Join(workdir, workflowPath, ".secrets")) assert.NoError(t, err, "Failed to read .secrets") tjfi.runTest(context.Background(), t, &Config{Secrets: secrets, Env: env}) } func TestRunActionInputs(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } workflowPath := "input-from-cli" tjfi := TestJobFileInfo{ workdir: workdir, workflowPath: workflowPath, eventName: "workflow_dispatch", errorMessage: "", platforms: platforms, } inputs := map[string]string{ "SOME_INPUT": "input", } tjfi.runTest(context.Background(), t, &Config{Inputs: inputs}) } func TestRunEventPullRequest(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } workflowPath := "pull-request" tjfi := TestJobFileInfo{ workdir: workdir, workflowPath: workflowPath, eventName: "pull_request", errorMessage: "", platforms: platforms, } tjfi.runTest(context.Background(), t, &Config{EventPath: filepath.Join(workdir, workflowPath, "event.json")}) } func TestRunMatrixWithUserDefinedInclusions(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") } workflowPath := "matrix-with-user-inclusions" tjfi := TestJobFileInfo{ workdir: workdir, workflowPath: workflowPath, eventName: "push", errorMessage: "", platforms: platforms, } matrix := map[string]map[string]bool{ "node": { "8": true, "8.x": true, }, "os": { "ubuntu-18.04": true, }, } tjfi.runTest(context.Background(), t, &Config{Matrix: matrix}) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/step_factory.go
pkg/runner/step_factory.go
package runner import ( "fmt" "github.com/nektos/act/pkg/model" ) type stepFactory interface { newStep(step *model.Step, rc *RunContext) (step, error) } type stepFactoryImpl struct{} func (sf *stepFactoryImpl) newStep(stepModel *model.Step, rc *RunContext) (step, error) { switch stepModel.Type() { case model.StepTypeInvalid: return nil, fmt.Errorf("Invalid run/uses syntax for job:%s step:%+v", rc.Run, stepModel) case model.StepTypeRun: return &stepRun{ Step: stepModel, RunContext: rc, }, nil case model.StepTypeUsesActionLocal: return &stepActionLocal{ Step: stepModel, RunContext: rc, readAction: readActionImpl, runAction: runActionImpl, }, nil case model.StepTypeUsesActionRemote: return &stepActionRemote{ Step: stepModel, RunContext: rc, readAction: readActionImpl, runAction: runActionImpl, }, nil case model.StepTypeUsesDockerURL: return &stepDocker{ Step: stepModel, RunContext: rc, }, nil } return nil, fmt.Errorf("Unable to determine how to run job:%s step:%+v", rc.Run, stepModel) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/action_composite.go
pkg/runner/action_composite.go
package runner import ( "context" "fmt" "regexp" "strings" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/model" ) func evaluateCompositeInputAndEnv(ctx context.Context, parent *RunContext, step actionStep) map[string]string { env := make(map[string]string) stepEnv := *step.getEnv() for k, v := range stepEnv { // do not set current inputs into composite action // the required inputs are added in the second loop if !strings.HasPrefix(k, "INPUT_") { env[k] = v } } ee := parent.NewStepExpressionEvaluator(ctx, step) for inputID, input := range step.getActionModel().Inputs { envKey := regexp.MustCompile("[^A-Z0-9-]").ReplaceAllString(strings.ToUpper(inputID), "_") envKey = fmt.Sprintf("INPUT_%s", strings.ToUpper(envKey)) // lookup if key is defined in the step but the already // evaluated value from the environment _, defined := step.getStepModel().With[inputID] if value, ok := stepEnv[envKey]; defined && ok { env[envKey] = value } else { // defaults could contain expressions env[envKey] = ee.Interpolate(ctx, input.Default) } } gh := step.getGithubContext(ctx) env["GITHUB_ACTION_REPOSITORY"] = gh.ActionRepository env["GITHUB_ACTION_REF"] = gh.ActionRef return env } func newCompositeRunContext(ctx context.Context, parent *RunContext, step actionStep, actionPath string) *RunContext { env := evaluateCompositeInputAndEnv(ctx, parent, step) // run with the global config but without secrets configCopy := *(parent.Config) configCopy.Secrets = nil // create a run context for the composite action to run in compositerc := &RunContext{ Name: parent.Name, JobName: parent.JobName, Run: &model.Run{ JobID: parent.Run.JobID, Workflow: &model.Workflow{ Name: parent.Run.Workflow.Name, Jobs: map[string]*model.Job{ parent.Run.JobID: {}, }, }, }, Config: &configCopy, StepResults: map[string]*model.StepResult{}, JobContainer: parent.JobContainer, ActionPath: actionPath, Env: env, GlobalEnv: parent.GlobalEnv, Masks: parent.Masks, ExtraPath: parent.ExtraPath, Parent: parent, EventJSON: parent.EventJSON, nodeToolFullPath: parent.nodeToolFullPath, } compositerc.ExprEval = compositerc.NewExpressionEvaluator(ctx) return compositerc } func execAsComposite(step actionStep) common.Executor { rc := step.getRunContext() action := step.getActionModel() return func(ctx context.Context) error { compositeRC := step.getCompositeRunContext(ctx) steps := step.getCompositeSteps() if steps == nil || steps.main == nil { return fmt.Errorf("missing steps in composite action") } ctx = WithCompositeLogger(ctx, &compositeRC.Masks) err := steps.main(ctx) // Map outputs from composite RunContext to job RunContext eval := compositeRC.NewExpressionEvaluator(ctx) for outputName, output := range action.Outputs { rc.setOutput(ctx, map[string]string{ "name": outputName, }, eval.Interpolate(ctx, output.Value)) } rc.Masks = append(rc.Masks, compositeRC.Masks...) rc.ExtraPath = compositeRC.ExtraPath // compositeRC.Env is dirty, contains INPUT_ and merged step env, only rely on compositeRC.GlobalEnv mergeIntoMap := mergeIntoMapCaseSensitive if rc.JobContainer.IsEnvironmentCaseInsensitive() { mergeIntoMap = mergeIntoMapCaseInsensitive } if rc.GlobalEnv == nil { rc.GlobalEnv = map[string]string{} } mergeIntoMap(rc.GlobalEnv, compositeRC.GlobalEnv) mergeIntoMap(rc.Env, compositeRC.GlobalEnv) return err } } type compositeSteps struct { pre common.Executor main common.Executor post common.Executor } // Executor returns a pipeline executor for all the steps in the job func (rc *RunContext) compositeExecutor(action *model.Action) *compositeSteps { steps := make([]common.Executor, 0) preSteps := make([]common.Executor, 0) var postExecutor common.Executor sf := &stepFactoryImpl{} for i, step := range action.Runs.Steps { if step.ID == "" { step.ID = fmt.Sprintf("%d", i) } // create a copy of the step, since this composite action could // run multiple times and we might modify the instance stepcopy := step step, err := sf.newStep(&stepcopy, rc) if err != nil { return &compositeSteps{ main: common.NewErrorExecutor(err), } } stepID := step.getStepModel().ID stepPre := rc.newCompositeCommandExecutor(step.pre()) preSteps = append(preSteps, newCompositeStepLogExecutor(stepPre, stepID)) steps = append(steps, func(ctx context.Context) error { ctx = WithCompositeStepLogger(ctx, stepID) logger := common.Logger(ctx) err := rc.newCompositeCommandExecutor(step.main())(ctx) if err != nil { logger.Errorf("%v", err) common.SetJobError(ctx, err) } else if ctx.Err() != nil { logger.Errorf("%v", ctx.Err()) common.SetJobError(ctx, ctx.Err()) } return nil }) // run the post executor in reverse order if postExecutor != nil { stepPost := rc.newCompositeCommandExecutor(step.post()) postExecutor = newCompositeStepLogExecutor(stepPost.Finally(postExecutor), stepID) } else { stepPost := rc.newCompositeCommandExecutor(step.post()) postExecutor = newCompositeStepLogExecutor(stepPost, stepID) } } steps = append(steps, common.JobError) return &compositeSteps{ pre: func(ctx context.Context) error { return common.NewPipelineExecutor(preSteps...)(common.WithJobErrorContainer(ctx)) }, main: func(ctx context.Context) error { return common.NewPipelineExecutor(steps...)(common.WithJobErrorContainer(ctx)) }, post: postExecutor, } } func (rc *RunContext) newCompositeCommandExecutor(executor common.Executor) common.Executor { return func(ctx context.Context) error { ctx = WithCompositeLogger(ctx, &rc.Masks) // We need to inject a composite RunContext related command // handler into the current running job container // We need this, to support scoping commands to the composite action // executing. rawLogger := common.Logger(ctx).WithField("raw_output", true) logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool { if rc.Config.LogOutput { rawLogger.Infof("%s", s) } else { rawLogger.Debugf("%s", s) } return true }) oldout, olderr := rc.JobContainer.ReplaceLogWriter(logWriter, logWriter) defer rc.JobContainer.ReplaceLogWriter(oldout, olderr) return executor(ctx) } } func newCompositeStepLogExecutor(runStep common.Executor, stepID string) common.Executor { return func(ctx context.Context) error { ctx = WithCompositeStepLogger(ctx, stepID) logger := common.Logger(ctx) err := runStep(ctx) if err != nil { logger.Errorf("%v", err) common.SetJobError(ctx, err) } else if ctx.Err() != nil { logger.Errorf("%v", ctx.Err()) common.SetJobError(ctx, ctx.Err()) } return nil } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/action.go
pkg/runner/action.go
package runner import ( "context" "embed" "errors" "fmt" "io" "io/fs" "os" "path" "path/filepath" "regexp" "runtime" "strings" "github.com/kballard/go-shellquote" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/container" "github.com/nektos/act/pkg/model" ) type actionStep interface { step getActionModel() *model.Action getCompositeRunContext(context.Context) *RunContext getCompositeSteps() *compositeSteps } type readAction func(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) type actionYamlReader func(filename string) (io.Reader, io.Closer, error) type fileWriter func(filename string, data []byte, perm fs.FileMode) error type runAction func(step actionStep, actionDir string, remoteAction *remoteAction) common.Executor //go:embed res/trampoline.js var trampoline embed.FS func readActionImpl(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) { logger := common.Logger(ctx) allErrors := []error{} addError := func(fileName string, err error) { if err != nil { allErrors = append(allErrors, fmt.Errorf("failed to read '%s' from action '%s' with path '%s' of step: %w", fileName, step.String(), actionPath, err)) } else { // One successful read, clear error state allErrors = nil } } reader, closer, err := readFile("action.yml") addError("action.yml", err) if os.IsNotExist(err) { reader, closer, err = readFile("action.yaml") addError("action.yaml", err) if os.IsNotExist(err) { _, closer, err := readFile("Dockerfile") addError("Dockerfile", err) if err == nil { closer.Close() action := &model.Action{ Name: "(Synthetic)", Runs: model.ActionRuns{ Using: "docker", Image: "Dockerfile", }, } logger.Debugf("Using synthetic action %v for Dockerfile", action) return action, nil } if step.With != nil { if val, ok := step.With["args"]; ok { var b []byte if b, err = trampoline.ReadFile("res/trampoline.js"); err != nil { return nil, err } err2 := writeFile(filepath.Join(actionDir, actionPath, "trampoline.js"), b, 0o400) if err2 != nil { return nil, err2 } action := &model.Action{ Name: "(Synthetic)", Inputs: map[string]model.Input{ "cwd": { Description: "(Actual working directory)", Required: false, Default: filepath.Join(actionDir, actionPath), }, "command": { Description: "(Actual program)", Required: false, Default: val, }, }, Runs: model.ActionRuns{ Using: "node12", Main: "trampoline.js", }, } logger.Debugf("Using synthetic action %v", action) return action, nil } } } } if allErrors != nil { return nil, errors.Join(allErrors...) } defer closer.Close() action, err := model.ReadAction(reader) logger.Debugf("Read action %v from '%s'", action, "Unknown") return action, err } func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string, actionPath string, containerActionDir string) error { logger := common.Logger(ctx) rc := step.getRunContext() stepModel := step.getStepModel() if stepModel.Type() != model.StepTypeUsesActionRemote { return nil } var containerActionDirCopy string containerActionDirCopy = strings.TrimSuffix(containerActionDir, actionPath) logger.Debug(containerActionDirCopy) if !strings.HasSuffix(containerActionDirCopy, `/`) { containerActionDirCopy += `/` } if rc.Config != nil && rc.Config.ActionCache != nil { raction := step.(*stepActionRemote) ta, err := rc.Config.ActionCache.GetTarArchive(ctx, raction.cacheDir, raction.resolvedSha, "") if err != nil { return err } defer ta.Close() return rc.JobContainer.CopyTarStream(ctx, containerActionDirCopy, ta) } if err := removeGitIgnore(ctx, actionDir); err != nil { return err } return rc.JobContainer.CopyDir(containerActionDirCopy, actionDir+"/", rc.Config.UseGitIgnore)(ctx) } func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction) common.Executor { rc := step.getRunContext() stepModel := step.getStepModel() return func(ctx context.Context) error { logger := common.Logger(ctx) actionPath := "" if remoteAction != nil && remoteAction.Path != "" { actionPath = remoteAction.Path } action := step.getActionModel() logger.Debugf("About to run action %v", action) err := setupActionEnv(ctx, step, remoteAction) if err != nil { return err } actionLocation := path.Join(actionDir, actionPath) actionName, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc) logger.Debugf("type=%v actionDir=%s actionPath=%s workdir=%s actionCacheDir=%s actionName=%s containerActionDir=%s", stepModel.Type(), actionDir, actionPath, rc.Config.Workdir, rc.ActionCacheDir(), actionName, containerActionDir) x := action.Runs.Using switch { case x.IsNode(): if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil { return err } containerArgs := []string{rc.GetNodeToolFullPath(ctx), path.Join(containerActionDir, action.Runs.Main)} logger.Debugf("executing remote job container: %s", containerArgs) rc.ApplyExtraPath(ctx, step.getEnv()) return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx) case x.IsDocker(): if remoteAction == nil { actionDir = "" actionPath = containerActionDir } return execAsDocker(ctx, step, actionName, actionDir, actionPath, remoteAction == nil, "entrypoint") case x.IsComposite(): if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil { return err } return execAsComposite(step)(ctx) default: return fmt.Errorf("The runs.using key must be one of: %v, got %s", []string{ model.ActionRunsUsingDocker, model.ActionRunsUsingNode12, model.ActionRunsUsingNode16, model.ActionRunsUsingNode20, model.ActionRunsUsingNode24, model.ActionRunsUsingComposite, }, action.Runs.Using) } } } func setupActionEnv(ctx context.Context, step actionStep, _ *remoteAction) error { rc := step.getRunContext() // A few fields in the environment (e.g. GITHUB_ACTION_REPOSITORY) // are dependent on the action. That means we can complete the // setup only after resolving the whole action model and cloning // the action rc.withGithubEnv(ctx, step.getGithubContext(ctx), *step.getEnv()) populateEnvsFromSavedState(step.getEnv(), step, rc) populateEnvsFromInput(ctx, step.getEnv(), step.getActionModel(), rc) return nil } // https://github.com/nektos/act/issues/228#issuecomment-629709055 // files in .gitignore are not copied in a Docker container // this causes issues with actions that ignore other important resources // such as `node_modules` for example func removeGitIgnore(ctx context.Context, directory string) error { gitIgnorePath := path.Join(directory, ".gitignore") if _, err := os.Stat(gitIgnorePath); err == nil { // .gitignore exists common.Logger(ctx).Debugf("Removing %s before docker cp", gitIgnorePath) err := os.Remove(gitIgnorePath) if err != nil { return err } } return nil } // TODO: break out parts of function to reduce complexicity // //nolint:gocyclo func execAsDocker(ctx context.Context, step actionStep, actionName, basedir, subpath string, localAction bool, entrypointType string) error { logger := common.Logger(ctx) rc := step.getRunContext() action := step.getActionModel() var prepImage common.Executor var image string forcePull := false if strings.HasPrefix(action.Runs.Image, "docker://") { image = strings.TrimPrefix(action.Runs.Image, "docker://") // Apply forcePull only for prebuild docker images forcePull = rc.Config.ForcePull } else { // "-dockeraction" enshures that "./", "./test " won't get converted to "act-:latest", "act-test-:latest" which are invalid docker image names image = fmt.Sprintf("%s-dockeraction:%s", regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(actionName, "-"), "latest") image = fmt.Sprintf("act-%s", strings.TrimLeft(image, "-")) image = strings.ToLower(image) contextDir, fileName := path.Split(path.Join(subpath, action.Runs.Image)) anyArchExists, err := container.ImageExistsLocally(ctx, image, "any") if err != nil { return err } correctArchExists, err := container.ImageExistsLocally(ctx, image, rc.Config.ContainerArchitecture) if err != nil { return err } if anyArchExists && !correctArchExists { wasRemoved, err := container.RemoveImage(ctx, image, true, true) if err != nil { return err } if !wasRemoved { return fmt.Errorf("failed to remove image '%s'", image) } } if !correctArchExists || rc.Config.ForceRebuild { logger.Debugf("image '%s' for architecture '%s' will be built from context '%s", image, rc.Config.ContainerArchitecture, contextDir) var buildContext io.ReadCloser if localAction { buildContext, err = rc.JobContainer.GetContainerArchive(ctx, contextDir+"/.") if err != nil { return err } defer buildContext.Close() } else if rc.Config.ActionCache != nil { rstep := step.(*stepActionRemote) buildContext, err = rc.Config.ActionCache.GetTarArchive(ctx, rstep.cacheDir, rstep.resolvedSha, contextDir) if err != nil { return err } defer buildContext.Close() } prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{ ContextDir: filepath.Join(basedir, contextDir), Dockerfile: fileName, ImageTag: image, BuildContext: buildContext, Platform: rc.Config.ContainerArchitecture, }) } else { logger.Debugf("image '%s' for architecture '%s' already exists", image, rc.Config.ContainerArchitecture) } } eval := rc.NewStepExpressionEvaluator(ctx, step) cmd, err := shellquote.Split(eval.Interpolate(ctx, step.getStepModel().With["args"])) if err != nil { return err } if len(cmd) == 0 { cmd = action.Runs.Args evalDockerArgs(ctx, step, action, &cmd) } entrypoint := strings.Fields(eval.Interpolate(ctx, step.getStepModel().With[entrypointType])) if len(entrypoint) == 0 { if entrypointType == "pre-entrypoint" && action.Runs.PreEntrypoint != "" { entrypoint, err = shellquote.Split(action.Runs.PreEntrypoint) if err != nil { return err } } else if entrypointType == "entrypoint" && action.Runs.Entrypoint != "" { entrypoint, err = shellquote.Split(action.Runs.Entrypoint) if err != nil { return err } } else if entrypointType == "post-entrypoint" && action.Runs.PostEntrypoint != "" { entrypoint, err = shellquote.Split(action.Runs.PostEntrypoint) if err != nil { return err } } else { entrypoint = nil } } stepContainer := newStepContainer(ctx, step, image, cmd, entrypoint) return common.NewPipelineExecutor( prepImage, stepContainer.Pull(forcePull), stepContainer.Remove().IfBool(!rc.Config.ReuseContainers), stepContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop), stepContainer.Start(true), ).Finally( stepContainer.Remove().IfBool(!rc.Config.ReuseContainers), ).Finally(stepContainer.Close())(ctx) } func evalDockerArgs(ctx context.Context, step step, action *model.Action, cmd *[]string) { rc := step.getRunContext() stepModel := step.getStepModel() inputs := make(map[string]string) eval := rc.NewExpressionEvaluator(ctx) // Set Defaults for k, input := range action.Inputs { inputs[k] = eval.Interpolate(ctx, input.Default) } if stepModel.With != nil { for k, v := range stepModel.With { inputs[k] = eval.Interpolate(ctx, v) } } mergeIntoMap(step, step.getEnv(), inputs) stepEE := rc.NewStepExpressionEvaluator(ctx, step) for i, v := range *cmd { (*cmd)[i] = stepEE.Interpolate(ctx, v) } mergeIntoMap(step, step.getEnv(), action.Runs.Env) ee := rc.NewStepExpressionEvaluator(ctx, step) for k, v := range *step.getEnv() { (*step.getEnv())[k] = ee.Interpolate(ctx, v) } } func newStepContainer(ctx context.Context, step step, image string, cmd []string, entrypoint []string) container.Container { rc := step.getRunContext() stepModel := step.getStepModel() rawLogger := common.Logger(ctx).WithField("raw_output", true) logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool { if rc.Config.LogOutput { rawLogger.Infof("%s", s) } else { rawLogger.Debugf("%s", s) } return true }) envList := make([]string, 0) for k, v := range *step.getEnv() { envList = append(envList, fmt.Sprintf("%s=%s", k, v)) } envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TOOL_CACHE", "/opt/hostedtoolcache")) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_OS", "Linux")) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_ARCH", container.RunnerArch(ctx))) envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp")) binds, mounts := rc.GetBindsAndMounts() networkMode := fmt.Sprintf("container:%s", rc.jobContainerName()) var workdir string if rc.IsHostEnv(ctx) { networkMode = "default" ext := container.LinuxContainerEnvironmentExtensions{} workdir = ext.ToContainerPath(rc.Config.Workdir) } else { workdir = rc.JobContainer.ToContainerPath(rc.Config.Workdir) } stepContainer := container.NewContainer(&container.NewContainerInput{ Cmd: cmd, Entrypoint: entrypoint, WorkingDir: workdir, Image: image, Username: rc.Config.Secrets["DOCKER_USERNAME"], Password: rc.Config.Secrets["DOCKER_PASSWORD"], Name: createContainerName(rc.jobContainerName(), stepModel.ID), Env: envList, Mounts: mounts, NetworkMode: networkMode, Binds: binds, Stdout: logWriter, Stderr: logWriter, Privileged: rc.Config.Privileged, UsernsMode: rc.Config.UsernsMode, Platform: rc.Config.ContainerArchitecture, Options: rc.Config.ContainerOptions, }) return stepContainer } func populateEnvsFromSavedState(env *map[string]string, step actionStep, rc *RunContext) { state, ok := rc.IntraActionState[step.getStepModel().ID] if ok { for name, value := range state { envName := fmt.Sprintf("STATE_%s", name) (*env)[envName] = value } } } func populateEnvsFromInput(ctx context.Context, env *map[string]string, action *model.Action, rc *RunContext) { eval := rc.NewExpressionEvaluator(ctx) for inputID, input := range action.Inputs { envKey := regexp.MustCompile("[^A-Z0-9-]").ReplaceAllString(strings.ToUpper(inputID), "_") envKey = fmt.Sprintf("INPUT_%s", envKey) if _, ok := (*env)[envKey]; !ok { (*env)[envKey] = eval.Interpolate(ctx, input.Default) } } } func getContainerActionPaths(step *model.Step, actionDir string, rc *RunContext) (string, string) { actionName := "" containerActionDir := "." if step.Type() != model.StepTypeUsesActionRemote { actionName = getOsSafeRelativePath(actionDir, rc.Config.Workdir) containerActionDir = rc.JobContainer.ToContainerPath(rc.Config.Workdir) + "/" + actionName actionName = "./" + actionName } else if step.Type() == model.StepTypeUsesActionRemote { actionName = getOsSafeRelativePath(actionDir, rc.ActionCacheDir()) containerActionDir = rc.JobContainer.GetActPath() + "/actions/" + actionName } if actionName == "" { actionName = filepath.Base(actionDir) if runtime.GOOS == "windows" { actionName = strings.ReplaceAll(actionName, "\\", "/") } } return actionName, containerActionDir } func getOsSafeRelativePath(s, prefix string) string { actionName := strings.TrimPrefix(s, prefix) if runtime.GOOS == "windows" { actionName = strings.ReplaceAll(actionName, "\\", "/") } actionName = strings.TrimPrefix(actionName, "/") return actionName } func shouldRunPreStep(step actionStep) common.Conditional { return func(ctx context.Context) bool { log := common.Logger(ctx) if step.getActionModel() == nil { log.Debugf("skip pre step for '%s': no action model available", step.getStepModel()) return false } return true } } func hasPreStep(step actionStep) common.Conditional { return func(_ context.Context) bool { action := step.getActionModel() return action.Runs.Using.IsComposite() || (action.Runs.Using.IsNode() && action.Runs.Pre != "") || (action.Runs.Using.IsDocker() && action.Runs.PreEntrypoint != "") } } func runPreStep(step actionStep) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) logger.Debugf("run pre step for '%s'", step.getStepModel()) rc := step.getRunContext() stepModel := step.getStepModel() action := step.getActionModel() // defaults in pre steps were missing, however provided inputs are available populateEnvsFromInput(ctx, step.getEnv(), action, rc) // todo: refactor into step var actionDir string var actionPath string var remoteAction *stepActionRemote if remote, ok := step.(*stepActionRemote); ok { actionPath = newRemoteAction(stepModel.Uses).Path actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses)) remoteAction = remote } else { actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses) actionPath = "" } actionLocation := "" if actionPath != "" { actionLocation = path.Join(actionDir, actionPath) } else { actionLocation = actionDir } actionName, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc) x := action.Runs.Using switch { case x.IsNode(): if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil { return err } containerArgs := []string{rc.GetNodeToolFullPath(ctx), path.Join(containerActionDir, action.Runs.Pre)} logger.Debugf("executing remote job container: %s", containerArgs) rc.ApplyExtraPath(ctx, step.getEnv()) return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx) case x.IsDocker(): if remoteAction == nil { actionDir = "" actionPath = containerActionDir } return execAsDocker(ctx, step, actionName, actionDir, actionPath, remoteAction == nil, "pre-entrypoint") case x.IsComposite(): if step.getCompositeSteps() == nil { step.getCompositeRunContext(ctx) } if steps := step.getCompositeSteps(); steps != nil && steps.pre != nil { return steps.pre(ctx) } return fmt.Errorf("missing steps in composite action") default: return nil } } } func shouldRunPostStep(step actionStep) common.Conditional { return func(ctx context.Context) bool { log := common.Logger(ctx) stepResults := step.getRunContext().getStepsContext() stepResult := stepResults[step.getStepModel().ID] if stepResult == nil { log.WithField("stepResult", model.StepStatusSkipped).Debugf("skipping post step for '%s'; step was not executed", step.getStepModel()) return false } if stepResult.Conclusion == model.StepStatusSkipped { log.WithField("stepResult", model.StepStatusSkipped).Debugf("skipping post step for '%s'; main step was skipped", step.getStepModel()) return false } if step.getActionModel() == nil { log.WithField("stepResult", model.StepStatusSkipped).Debugf("skipping post step for '%s': no action model available", step.getStepModel()) return false } return true } } func hasPostStep(step actionStep) common.Conditional { return func(_ context.Context) bool { action := step.getActionModel() return action.Runs.Using.IsComposite() || (action.Runs.Using.IsNode() && action.Runs.Post != "") || (action.Runs.Using.IsDocker() && action.Runs.PostEntrypoint != "") } } func runPostStep(step actionStep) common.Executor { return func(ctx context.Context) error { logger := common.Logger(ctx) logger.Debugf("run post step for '%s'", step.getStepModel()) rc := step.getRunContext() stepModel := step.getStepModel() action := step.getActionModel() // todo: refactor into step var actionDir string var actionPath string var remoteAction *stepActionRemote if remote, ok := step.(*stepActionRemote); ok { actionPath = newRemoteAction(stepModel.Uses).Path actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses)) remoteAction = remote } else { actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses) actionPath = "" } actionLocation := "" if actionPath != "" { actionLocation = path.Join(actionDir, actionPath) } else { actionLocation = actionDir } actionName, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc) x := action.Runs.Using switch { case x.IsNode(): populateEnvsFromSavedState(step.getEnv(), step, rc) populateEnvsFromInput(ctx, step.getEnv(), step.getActionModel(), rc) containerArgs := []string{rc.GetNodeToolFullPath(ctx), path.Join(containerActionDir, action.Runs.Post)} logger.Debugf("executing remote job container: %s", containerArgs) rc.ApplyExtraPath(ctx, step.getEnv()) return rc.execJobContainer(containerArgs, *step.getEnv(), "", "")(ctx) case x.IsDocker(): if remoteAction == nil { actionDir = "" actionPath = containerActionDir } return execAsDocker(ctx, step, actionName, actionDir, actionPath, remoteAction == nil, "post-entrypoint") case x.IsComposite(): if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil { return err } if steps := step.getCompositeSteps(); steps != nil && steps.post != nil { return steps.post(ctx) } return fmt.Errorf("missing steps in composite action") default: return nil } } }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/local_repository_cache.go
pkg/runner/local_repository_cache.go
package runner import ( "archive/tar" "bytes" "context" "fmt" "io" "io/fs" goURL "net/url" "os" "path/filepath" "strings" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/filecollector" ) type LocalRepositoryCache struct { Parent ActionCache LocalRepositories map[string]string CacheDirCache map[string]string } func (l *LocalRepositoryCache) Fetch(ctx context.Context, cacheDir, url, ref, token string) (string, error) { logger := common.Logger(ctx) logger.Debugf("LocalRepositoryCache fetch %s with ref %s", url, ref) if dest, ok := l.LocalRepositories[fmt.Sprintf("%s@%s", url, ref)]; ok { logger.Infof("LocalRepositoryCache matched %s with ref %s to %s", url, ref, dest) l.CacheDirCache[fmt.Sprintf("%s@%s", cacheDir, ref)] = dest return ref, nil } if purl, err := goURL.Parse(url); err == nil { if dest, ok := l.LocalRepositories[fmt.Sprintf("%s@%s", strings.TrimPrefix(purl.Path, "/"), ref)]; ok { logger.Infof("LocalRepositoryCache matched %s with ref %s to %s", url, ref, dest) l.CacheDirCache[fmt.Sprintf("%s@%s", cacheDir, ref)] = dest return ref, nil } } logger.Infof("LocalRepositoryCache not matched %s with Ref %s", url, ref) return l.Parent.Fetch(ctx, cacheDir, url, ref, token) } func (l *LocalRepositoryCache) GetTarArchive(ctx context.Context, cacheDir, sha, includePrefix string) (io.ReadCloser, error) { logger := common.Logger(ctx) // sha is mapped to ref in fetch if there is a local override if dest, ok := l.CacheDirCache[fmt.Sprintf("%s@%s", cacheDir, sha)]; ok { logger.Infof("LocalRepositoryCache read cachedir %s with ref %s and subpath '%s' from %s", cacheDir, sha, includePrefix, dest) srcPath := filepath.Join(dest, includePrefix) buf := &bytes.Buffer{} tw := tar.NewWriter(buf) defer tw.Close() srcPath = filepath.Clean(srcPath) fi, err := os.Lstat(srcPath) if err != nil { return nil, err } tc := &filecollector.TarCollector{ TarWriter: tw, } if fi.IsDir() { srcPrefix := srcPath if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) { srcPrefix += string(filepath.Separator) } fc := &filecollector.FileCollector{ Fs: &filecollector.DefaultFs{}, SrcPath: srcPath, SrcPrefix: srcPrefix, Handler: tc, } err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{})) if err != nil { return nil, err } } else { var f io.ReadCloser var linkname string if fi.Mode()&fs.ModeSymlink != 0 { linkname, err = os.Readlink(srcPath) if err != nil { return nil, err } } else { f, err = os.Open(srcPath) if err != nil { return nil, err } defer f.Close() } err := tc.WriteFile(fi.Name(), fi, linkname, f) if err != nil { return nil, err } } return io.NopCloser(buf), nil } logger.Infof("LocalRepositoryCache not matched cachedir %s with Ref %s and subpath '%s'", cacheDir, sha, includePrefix) return l.Parent.GetTarArchive(ctx, cacheDir, sha, includePrefix) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/step_run_test.go
pkg/runner/step_run_test.go
package runner import ( "bytes" "context" "io" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/nektos/act/pkg/container" "github.com/nektos/act/pkg/model" ) func TestStepRun(t *testing.T) { cm := &containerMock{} fileEntry := &container.FileEntry{ Name: "workflow/1.sh", Mode: 0o755, Body: "\ncmd\n", } sr := &stepRun{ RunContext: &RunContext{ StepResults: map[string]*model.StepResult{}, ExprEval: &expressionEvaluator{}, Config: &Config{}, Run: &model.Run{ JobID: "1", Workflow: &model.Workflow{ Jobs: map[string]*model.Job{ "1": { Defaults: model.Defaults{ Run: model.RunDefaults{ Shell: "bash", }, }, }, }, }, }, JobContainer: cm, }, Step: &model.Step{ ID: "1", Run: "cmd", WorkingDirectory: "workdir", }, } cm.On("Copy", "/var/run/act", []*container.FileEntry{fileEntry}).Return(func(_ context.Context) error { return nil }) cm.On("Exec", []string{"bash", "--noprofile", "--norc", "-e", "-o", "pipefail", "/var/run/act/workflow/1.sh"}, mock.AnythingOfType("map[string]string"), "", "workdir").Return(func(_ context.Context) error { return nil }) cm.On("Copy", "/var/run/act", mock.AnythingOfType("[]*container.FileEntry")).Return(func(_ context.Context) error { return nil }) cm.On("UpdateFromEnv", "/var/run/act/workflow/envs.txt", mock.AnythingOfType("*map[string]string")).Return(func(_ context.Context) error { return nil }) cm.On("UpdateFromEnv", "/var/run/act/workflow/statecmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(_ context.Context) error { return nil }) cm.On("UpdateFromEnv", "/var/run/act/workflow/outputcmd.txt", mock.AnythingOfType("*map[string]string")).Return(func(_ context.Context) error { return nil }) ctx := context.Background() cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/SUMMARY.md").Return(io.NopCloser(&bytes.Buffer{}), nil) cm.On("GetContainerArchive", ctx, "/var/run/act/workflow/pathcmd.txt").Return(io.NopCloser(&bytes.Buffer{}), nil) err := sr.main()(ctx) assert.Nil(t, err) cm.AssertExpectations(t) } func TestStepRunPrePost(t *testing.T) { ctx := context.Background() sr := &stepRun{} err := sr.pre()(ctx) assert.Nil(t, err) err = sr.post()(ctx) assert.Nil(t, err) }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false
nektos/act
https://github.com/nektos/act/blob/d93106d194bba273d70d2ba604ea633c3f396b59/pkg/runner/step_action_local.go
pkg/runner/step_action_local.go
package runner import ( "archive/tar" "context" "errors" "fmt" "io" "io/fs" "os" "path" "path/filepath" "github.com/nektos/act/pkg/common" "github.com/nektos/act/pkg/model" ) type stepActionLocal struct { Step *model.Step RunContext *RunContext compositeRunContext *RunContext compositeSteps *compositeSteps runAction runAction readAction readAction env map[string]string action *model.Action } func (sal *stepActionLocal) pre() common.Executor { sal.env = map[string]string{} return func(_ context.Context) error { return nil } } func (sal *stepActionLocal) main() common.Executor { return runStepExecutor(sal, stepStageMain, func(ctx context.Context) error { if common.Dryrun(ctx) { return nil } actionDir := filepath.Join(sal.getRunContext().Config.Workdir, sal.Step.Uses) localReader := func(ctx context.Context) actionYamlReader { _, cpath := getContainerActionPaths(sal.Step, path.Join(actionDir, ""), sal.RunContext) return func(filename string) (io.Reader, io.Closer, error) { spath := path.Join(cpath, filename) for i := 0; i < maxSymlinkDepth; i++ { tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, spath) if errors.Is(err, fs.ErrNotExist) { return nil, nil, err } else if err != nil { return nil, nil, fs.ErrNotExist } treader := tar.NewReader(tars) header, err := treader.Next() if errors.Is(err, io.EOF) { return nil, nil, os.ErrNotExist } else if err != nil { return nil, nil, err } if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink { spath, err = symlinkJoin(spath, header.Linkname, cpath) if err != nil { return nil, nil, err } } else { return treader, tars, nil } } return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath) } } actionModel, err := sal.readAction(ctx, sal.Step, actionDir, "", localReader(ctx), os.WriteFile) if err != nil { return err } sal.action = actionModel return sal.runAction(sal, actionDir, nil)(ctx) }) } func (sal *stepActionLocal) post() common.Executor { return runStepExecutor(sal, stepStagePost, runPostStep(sal)).If(hasPostStep(sal)).If(shouldRunPostStep(sal)) } func (sal *stepActionLocal) getRunContext() *RunContext { return sal.RunContext } func (sal *stepActionLocal) getGithubContext(ctx context.Context) *model.GithubContext { return sal.getRunContext().getGithubContext(ctx) } func (sal *stepActionLocal) getStepModel() *model.Step { return sal.Step } func (sal *stepActionLocal) getEnv() *map[string]string { return &sal.env } func (sal *stepActionLocal) getIfExpression(_ context.Context, stage stepStage) string { switch stage { case stepStageMain: return sal.Step.If.Value case stepStagePost: return sal.action.Runs.PostIf } return "" } func (sal *stepActionLocal) getActionModel() *model.Action { return sal.action } func (sal *stepActionLocal) getCompositeRunContext(ctx context.Context) *RunContext { if sal.compositeRunContext == nil { actionDir := filepath.Join(sal.RunContext.Config.Workdir, sal.Step.Uses) _, containerActionDir := getContainerActionPaths(sal.getStepModel(), actionDir, sal.RunContext) sal.compositeRunContext = newCompositeRunContext(ctx, sal.RunContext, sal, containerActionDir) sal.compositeSteps = sal.compositeRunContext.compositeExecutor(sal.action) } return sal.compositeRunContext } func (sal *stepActionLocal) getCompositeSteps() *compositeSteps { return sal.compositeSteps }
go
MIT
d93106d194bba273d70d2ba604ea633c3f396b59
2026-01-07T08:35:43.481138Z
false