repo stringlengths 5 67 | sha stringlengths 40 40 | path stringlengths 4 234 | url stringlengths 85 339 | language stringclasses 6 values | split stringclasses 3 values | doc stringlengths 3 51.2k | sign stringlengths 5 8.01k | problem stringlengths 13 51.2k | output stringlengths 0 3.87M |
|---|---|---|---|---|---|---|---|---|---|
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L289-L291 | go | train | // IsExpired returns true if profile is not expired yet | func (p *ProfileStatus) IsExpired(clock clockwork.Clock) bool | // IsExpired returns true if profile is not expired yet
func (p *ProfileStatus) IsExpired(clock clockwork.Clock) bool | {
return p.ValidUntil.Sub(clock.Now()) <= 0
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L295-L327 | go | train | // RetryWithRelogin is a helper error handling method,
// attempts to relogin and retry the function once | func RetryWithRelogin(ctx context.Context, tc *TeleportClient, fn func() error) error | // RetryWithRelogin is a helper error handling method,
// attempts to relogin and retry the function once
func RetryWithRelogin(ctx context.Context, tc *TeleportClient, fn func() error) error | {
err := fn()
if err == nil {
return nil
}
// Assume that failed handshake is a result of expired credentials,
// retry the login procedure
if !utils.IsHandshakeFailedError(err) && !utils.IsCertExpiredError(err) && !trace.IsBadParameter(err) && trace.IsTrustError(err) {
return err
}
key, err := tc.Login(ctx, true)
if err != nil {
if trace.IsTrustError(err) {
return trace.Wrap(err, "refusing to connect to untrusted proxy %v without --insecure flag\n", tc.Config.SSHProxyAddr)
}
return trace.Wrap(err)
}
// Save profile to record proxy credentials
if err := tc.SaveProfile(key.ProxyHost, "", ProfileCreateNew|ProfileMakeCurrent); err != nil {
log.Warningf("Failed to save profile: %v", err)
return trace.Wrap(err)
}
// Override client's auth methods, current cluster and user name
authMethod, err := key.AsAuthMethod()
if err != nil {
return trace.Wrap(err)
}
// After successful login we have local agent updated with latest
// and greatest auth information, setup client to try only this new
// method fetched from key, to isolate the retry
tc.Config.AuthMethods = []ssh.AuthMethod{authMethod}
return fn()
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L332-L397 | go | train | // readProfile reads in the profile as well as the associated certificate
// and returns a *ProfileStatus which can be used to print the status of the
// profile. | func readProfile(profileDir string, profileName string) (*ProfileStatus, error) | // readProfile reads in the profile as well as the associated certificate
// and returns a *ProfileStatus which can be used to print the status of the
// profile.
func readProfile(profileDir string, profileName string) (*ProfileStatus, error) | {
var err error
// Read in the profile for this proxy.
profile, err := ProfileFromFile(filepath.Join(profileDir, profileName))
if err != nil {
return nil, trace.Wrap(err)
}
// Read in the SSH certificate for the user logged into this proxy.
store, err := NewFSLocalKeyStore(profileDir)
if err != nil {
return nil, trace.Wrap(err)
}
keys, err := store.GetKey(profile.Name(), profile.Username)
if err != nil {
return nil, trace.Wrap(err)
}
publicKey, _, _, _, err := ssh.ParseAuthorizedKey(keys.Cert)
if err != nil {
return nil, trace.Wrap(err)
}
cert, ok := publicKey.(*ssh.Certificate)
if !ok {
return nil, trace.BadParameter("no certificate found")
}
// Extract from the certificate how much longer it will be valid for.
validUntil := time.Unix(int64(cert.ValidBefore), 0)
// Extract roles from certificate. Note, if the certificate is in old format,
// this will be empty.
var roles []string
rawRoles, ok := cert.Extensions[teleport.CertExtensionTeleportRoles]
if ok {
roles, err = services.UnmarshalCertRoles(rawRoles)
if err != nil {
return nil, trace.Wrap(err)
}
}
sort.Strings(roles)
// Extract extensions from certificate. This lists the abilities of the
// certificate (like can the user request a PTY, port forwarding, etc.)
var extensions []string
for ext, _ := range cert.Extensions {
if ext == teleport.CertExtensionTeleportRoles {
continue
}
extensions = append(extensions, ext)
}
sort.Strings(extensions)
return &ProfileStatus{
ProxyURL: url.URL{
Scheme: "https",
Host: profile.WebProxyAddr,
},
Username: profile.Username,
Logins: cert.ValidPrincipals,
ValidUntil: validUntil,
Extensions: extensions,
Roles: roles,
Cluster: profile.Name(),
}, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L401-L423 | go | train | // fullProfileName takes a profile directory and the host the user is trying
// to connect to and returns the name of the profile file. | func fullProfileName(profileDir string, proxyHost string) (string, error) | // fullProfileName takes a profile directory and the host the user is trying
// to connect to and returns the name of the profile file.
func fullProfileName(profileDir string, proxyHost string) (string, error) | {
var err error
var profileName string
// If no profile name was passed in, try and extract the active profile from
// the ~/.tsh/profile symlink. If one was passed in, append .yaml to name.
if proxyHost == "" {
profileName, err = os.Readlink(filepath.Join(profileDir, "profile"))
if err != nil {
return "", trace.ConvertSystemError(err)
}
} else {
profileName = proxyHost + ".yaml"
}
// Make sure the profile requested actually exists.
_, err = os.Stat(filepath.Join(profileDir, profileName))
if err != nil {
return "", trace.ConvertSystemError(err)
}
return profileName, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L426-L506 | go | train | // Status returns the active profile as well as a list of available profiles. | func Status(profileDir string, proxyHost string) (*ProfileStatus, []*ProfileStatus, error) | // Status returns the active profile as well as a list of available profiles.
func Status(profileDir string, proxyHost string) (*ProfileStatus, []*ProfileStatus, error) | {
var err error
var profile *ProfileStatus
var others []*ProfileStatus
// remove ports from proxy host, because profile name is stored
// by host name
if proxyHost != "" {
proxyHost, err = utils.Host(proxyHost)
if err != nil {
return nil, nil, trace.Wrap(err)
}
}
// Construct the full path to the profile requested and make sure it exists.
profileDir = FullProfilePath(profileDir)
stat, err := os.Stat(profileDir)
if err != nil {
return nil, nil, trace.Wrap(err)
}
if !stat.IsDir() {
return nil, nil, trace.BadParameter("profile path not a directory")
}
// Construct the name of the profile requested. If an empty string was
// passed in, the name of the active profile will be extracted from the
// ~/.tsh/profile symlink.
profileName, err := fullProfileName(profileDir, proxyHost)
if err != nil {
if trace.IsNotFound(err) {
return nil, nil, trace.NotFound("not logged in")
}
return nil, nil, trace.Wrap(err)
}
// Read in the active profile first. If readProfile returns trace.NotFound,
// that means the profile may have been corrupted (for example keys were
// deleted but profile exists), treat this as the user not being logged in.
profile, err = readProfile(profileDir, profileName)
if err != nil {
if !trace.IsNotFound(err) {
return nil, nil, trace.Wrap(err)
}
// Make sure the profile is nil, which tsh uses to detect that no
// active profile exists.
profile = nil
}
// Next, get list of all other available profiles. Filter out logged in
// profile if it exists and return a slice of *ProfileStatus.
files, err := ioutil.ReadDir(profileDir)
if err != nil {
return nil, nil, trace.Wrap(err)
}
for _, file := range files {
if file.IsDir() {
continue
}
if file.Mode()&os.ModeSymlink != 0 {
continue
}
if !strings.HasSuffix(file.Name(), ".yaml") {
continue
}
if file.Name() == profileName {
continue
}
ps, err := readProfile(profileDir, file.Name())
if err != nil {
// parts of profile are missing?
// status skips these files
if trace.IsNotFound(err) {
continue
}
return nil, nil, trace.Wrap(err)
}
others = append(others, ps)
}
return profile, others, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L511-L562 | go | train | // LoadProfile populates Config with the values stored in the given
// profiles directory. If profileDir is an empty string, the default profile
// directory ~/.tsh is used. | func (c *Config) LoadProfile(profileDir string, proxyName string) error | // LoadProfile populates Config with the values stored in the given
// profiles directory. If profileDir is an empty string, the default profile
// directory ~/.tsh is used.
func (c *Config) LoadProfile(profileDir string, proxyName string) error | {
profileDir = FullProfilePath(profileDir)
// read the profile:
cp, err := ProfileFromDir(profileDir, ProxyHost(proxyName))
if err != nil {
if trace.IsNotFound(err) {
return nil
}
return trace.Wrap(err)
}
// DELETE IN: 3.1.0
// The "proxy_host" field (and associated ports) have been deprecated and
// replaced with "proxy_web_addr" and "proxy_ssh_addr".
if cp.ProxyHost != "" {
if cp.ProxyWebPort == 0 {
cp.ProxyWebPort = defaults.HTTPListenPort
}
if cp.ProxySSHPort == 0 {
cp.ProxySSHPort = defaults.SSHProxyListenPort
}
c.WebProxyAddr = net.JoinHostPort(cp.ProxyHost, strconv.Itoa(cp.ProxyWebPort))
c.SSHProxyAddr = net.JoinHostPort(cp.ProxyHost, strconv.Itoa(cp.ProxySSHPort))
}
c.Username = cp.Username
c.SiteName = cp.SiteName
c.KubeProxyAddr = cp.KubeProxyAddr
// UPDATE IN: 3.1.0
// Remove the above DELETE IN block and below if statements and always set
// WebProxyAddr and SSHProxyAddr. This needs to be done right now to support
// backward compatibility with Teleport 2.0.
if cp.WebProxyAddr != "" {
c.WebProxyAddr = cp.WebProxyAddr
}
if cp.SSHProxyAddr != "" {
c.SSHProxyAddr = cp.SSHProxyAddr
}
c.LocalForwardPorts, err = ParsePortForwardSpec(cp.ForwardedPorts)
if err != nil {
log.Warnf("Unable to parse port forwarding in user profile: %v.", err)
}
c.DynamicForwardedPorts, err = ParseDynamicPortForwardSpec(cp.DynamicForwardedPorts)
if err != nil {
log.Warnf("Unable to parse dynamic port forwarding in user profile: %v.", err)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L566-L607 | go | train | // SaveProfile updates the given profiles directory with the current configuration
// If profileDir is an empty string, the default ~/.tsh is used | func (c *Config) SaveProfile(profileAliasHost, profileDir string, profileOptions ...ProfileOptions) error | // SaveProfile updates the given profiles directory with the current configuration
// If profileDir is an empty string, the default ~/.tsh is used
func (c *Config) SaveProfile(profileAliasHost, profileDir string, profileOptions ...ProfileOptions) error | {
if c.WebProxyAddr == "" {
return nil
}
// The profile is saved to a directory with the name of the proxy web endpoint.
webProxyHost, _ := c.WebProxyHostPort()
profileDir = FullProfilePath(profileDir)
profilePath := path.Join(profileDir, webProxyHost) + ".yaml"
profileAliasPath := ""
if profileAliasHost != "" {
profileAliasPath = path.Join(profileDir, profileAliasHost) + ".yaml"
}
var cp ClientProfile
cp.Username = c.Username
cp.WebProxyAddr = c.WebProxyAddr
cp.SSHProxyAddr = c.SSHProxyAddr
cp.KubeProxyAddr = c.KubeProxyAddr
cp.ForwardedPorts = c.LocalForwardPorts.String()
cp.SiteName = c.SiteName
// create a profile file and set it current base on the option
var opts ProfileOptions
if len(profileOptions) == 0 {
// default behavior is to override the profile
opts = ProfileMakeCurrent
} else {
for _, flag := range profileOptions {
opts |= flag
}
}
if err := cp.SaveTo(ProfileLocation{
AliasPath: profileAliasPath,
Path: profilePath,
Options: opts,
}); err != nil {
return trace.Wrap(err)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L613-L653 | go | train | // ParseProxyHost parses the proxyHost string and updates the config.
//
// Format of proxyHost string:
// proxy_web_addr:<proxy_web_port>,<proxy_ssh_port> | func (c *Config) ParseProxyHost(proxyHost string) error | // ParseProxyHost parses the proxyHost string and updates the config.
//
// Format of proxyHost string:
// proxy_web_addr:<proxy_web_port>,<proxy_ssh_port>
func (c *Config) ParseProxyHost(proxyHost string) error | {
host, port, err := net.SplitHostPort(proxyHost)
if err != nil {
host = proxyHost
port = ""
}
// Split on comma.
parts := strings.Split(port, ",")
switch {
// Default ports for both the SSH and Web proxy.
case len(parts) == 0:
c.WebProxyAddr = net.JoinHostPort(host, strconv.Itoa(defaults.HTTPListenPort))
c.SSHProxyAddr = net.JoinHostPort(host, strconv.Itoa(defaults.SSHProxyListenPort))
// User defined HTTP proxy port, default SSH proxy port.
case len(parts) == 1:
webPort := parts[0]
if webPort == "" {
webPort = strconv.Itoa(defaults.HTTPListenPort)
}
c.WebProxyAddr = net.JoinHostPort(host, webPort)
c.SSHProxyAddr = net.JoinHostPort(host, strconv.Itoa(defaults.SSHProxyListenPort))
// User defined HTTP and SSH proxy ports.
case len(parts) == 2:
webPort := parts[0]
if webPort == "" {
webPort = strconv.Itoa(defaults.HTTPListenPort)
}
sshPort := parts[1]
if sshPort == "" {
sshPort = strconv.Itoa(defaults.SSHProxyListenPort)
}
c.WebProxyAddr = net.JoinHostPort(host, webPort)
c.SSHProxyAddr = net.JoinHostPort(host, sshPort)
default:
return trace.BadParameter("unable to parse port: %v", port)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L656-L666 | go | train | // KubeProxyHostPort returns the host and port of the Kubernetes proxy. | func (c *Config) KubeProxyHostPort() (string, int) | // KubeProxyHostPort returns the host and port of the Kubernetes proxy.
func (c *Config) KubeProxyHostPort() (string, int) | {
if c.KubeProxyAddr != "" {
addr, err := utils.ParseAddr(c.KubeProxyAddr)
if err == nil {
return addr.Host(), addr.Port(defaults.KubeProxyListenPort)
}
}
webProxyHost, _ := c.WebProxyHostPort()
return webProxyHost, defaults.KubeProxyListenPort
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L669-L679 | go | train | // WebProxyHostPort returns the host and port of the web proxy. | func (c *Config) WebProxyHostPort() (string, int) | // WebProxyHostPort returns the host and port of the web proxy.
func (c *Config) WebProxyHostPort() (string, int) | {
if c.WebProxyAddr != "" {
addr, err := utils.ParseAddr(c.WebProxyAddr)
if err == nil {
return addr.Host(), addr.Port(defaults.HTTPListenPort)
}
}
webProxyHost, _ := c.WebProxyHostPort()
return webProxyHost, defaults.HTTPListenPort
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L682-L692 | go | train | // SSHProxyHostPort returns the host and port of the SSH proxy. | func (c *Config) SSHProxyHostPort() (string, int) | // SSHProxyHostPort returns the host and port of the SSH proxy.
func (c *Config) SSHProxyHostPort() (string, int) | {
if c.SSHProxyAddr != "" {
addr, err := utils.ParseAddr(c.SSHProxyAddr)
if err == nil {
return addr.Host(), addr.Port(defaults.SSHProxyListenPort)
}
}
webProxyHost, _ := c.WebProxyHostPort()
return webProxyHost, defaults.SSHProxyListenPort
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L695-L701 | go | train | // ProxyHost returns the hostname of the proxy server (without any port numbers) | func ProxyHost(proxyHost string) string | // ProxyHost returns the hostname of the proxy server (without any port numbers)
func ProxyHost(proxyHost string) string | {
host, _, err := net.SplitHostPort(proxyHost)
if err != nil {
return proxyHost
}
return host
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L735-L810 | go | train | // NewClient creates a TeleportClient object and fully configures it | func NewClient(c *Config) (tc *TeleportClient, err error) | // NewClient creates a TeleportClient object and fully configures it
func NewClient(c *Config) (tc *TeleportClient, err error) | {
// validate configuration
if c.Username == "" {
c.Username, err = Username()
if err != nil {
return nil, trace.Wrap(err)
}
log.Infof("No teleport login given. defaulting to %s", c.Username)
}
if c.WebProxyAddr == "" {
return nil, trace.BadParameter("No proxy address specified, missed --proxy flag?")
}
if c.HostLogin == "" {
c.HostLogin, err = Username()
if err != nil {
return nil, trace.Wrap(err)
}
log.Infof("no host login given. defaulting to %s", c.HostLogin)
}
if c.KeyTTL == 0 {
c.KeyTTL = defaults.CertDuration
}
c.Namespace = services.ProcessNamespace(c.Namespace)
tc = &TeleportClient{Config: *c}
if tc.Stdout == nil {
tc.Stdout = os.Stdout
}
if tc.Stderr == nil {
tc.Stderr = os.Stderr
}
if tc.Stdin == nil {
tc.Stdin = os.Stdin
}
// Create a buffered channel to hold events that occurred during this session.
// This channel must be buffered because the SSH connection directly feeds
// into it. Delays in pulling messages off the global SSH request channel
// could lead to the connection hanging.
tc.eventsCh = make(chan events.EventFields, 1024)
// Create a client that can be used for the initial fetch of credentials.
tc.credClient, err = NewCredentialsClient(
c.WebProxyAddr,
c.InsecureSkipVerify,
loopbackPool(c.WebProxyAddr))
if err != nil {
return nil, trace.Wrap(err)
}
// sometimes we need to use external auth without using local auth
// methods, e.g. in automation daemons
if c.SkipLocalAuth {
if len(c.AuthMethods) == 0 {
return nil, trace.BadParameter("SkipLocalAuth is true but no AuthMethods provided")
}
// if the client was passed an agent in the configuration and skip local auth, use
// the passed in agent.
if c.Agent != nil {
tc.localAgent = &LocalKeyAgent{Agent: c.Agent}
}
} else {
// initialize the local agent (auth agent which uses local SSH keys signed by the CA):
webProxyHost, _ := tc.WebProxyHostPort()
tc.localAgent, err = NewLocalAgent(c.KeysDir, webProxyHost, c.Username)
if err != nil {
return nil, trace.Wrap(err)
}
if tc.HostKeyCallback == nil {
tc.HostKeyCallback = tc.localAgent.CheckHostSignature
}
}
return tc, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L813-L822 | go | train | // accessPoint returns access point based on the cache policy | func (tc *TeleportClient) accessPoint(clt auth.AccessPoint, proxyHostPort string, clusterName string) (auth.AccessPoint, error) | // accessPoint returns access point based on the cache policy
func (tc *TeleportClient) accessPoint(clt auth.AccessPoint, proxyHostPort string, clusterName string) (auth.AccessPoint, error) | {
// If no caching policy was set or on Windows (where Teleport does not
// support file locking at the moment), return direct access to the access
// point.
if tc.CachePolicy == nil || runtime.GOOS == teleport.WindowsOS {
log.Debugf("not using caching access point")
return clt, nil
}
return clt, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L830-L849 | go | train | // getTargetNodes returns a list of node addresses this SSH command needs to
// operate on. | func (tc *TeleportClient) getTargetNodes(ctx context.Context, proxy *ProxyClient) ([]string, error) | // getTargetNodes returns a list of node addresses this SSH command needs to
// operate on.
func (tc *TeleportClient) getTargetNodes(ctx context.Context, proxy *ProxyClient) ([]string, error) | {
var (
err error
nodes []services.Server
retval = make([]string, 0)
)
if tc.Labels != nil && len(tc.Labels) > 0 {
nodes, err = proxy.FindServersByLabels(ctx, tc.Namespace, tc.Labels)
if err != nil {
return nil, trace.Wrap(err)
}
for i := 0; i < len(nodes); i++ {
retval = append(retval, nodes[i].GetAddr())
}
}
if len(nodes) == 0 {
retval = append(retval, net.JoinHostPort(tc.Host, strconv.Itoa(tc.HostPort)))
}
return retval, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L855-L910 | go | train | // SSH connects to a node and, if 'command' is specified, executes the command on it,
// otherwise runs interactive shell
//
// Returns nil if successful, or (possibly) *exec.ExitError | func (tc *TeleportClient) SSH(ctx context.Context, command []string, runLocally bool) error | // SSH connects to a node and, if 'command' is specified, executes the command on it,
// otherwise runs interactive shell
//
// Returns nil if successful, or (possibly) *exec.ExitError
func (tc *TeleportClient) SSH(ctx context.Context, command []string, runLocally bool) error | {
// connect to proxy first:
if !tc.Config.ProxySpecified() {
return trace.BadParameter("proxy server is not specified")
}
proxyClient, err := tc.ConnectToProxy(ctx)
if err != nil {
return trace.Wrap(err)
}
defer proxyClient.Close()
siteInfo, err := proxyClient.currentCluster()
if err != nil {
return trace.Wrap(err)
}
// which nodes are we executing this commands on?
nodeAddrs, err := tc.getTargetNodes(ctx, proxyClient)
if err != nil {
return trace.Wrap(err)
}
if len(nodeAddrs) == 0 {
return trace.BadParameter("no target host specified")
}
nodeClient, err := proxyClient.ConnectToNode(
ctx,
nodeAddrs[0]+"@"+tc.Namespace+"@"+siteInfo.Name,
tc.Config.HostLogin,
false)
if err != nil {
tc.ExitStatus = 1
return trace.Wrap(err)
}
// proxy local ports (forward incoming connections to remote host ports)
tc.startPortForwarding(ctx, nodeClient)
// local execution?
if runLocally {
if len(tc.Config.LocalForwardPorts) == 0 {
fmt.Println("Executing command locally without connecting to any servers. This makes no sense.")
}
return runLocalCommand(command)
}
// Issue "exec" request(s) to run on remote node(s).
if len(command) > 0 {
if len(nodeAddrs) > 1 {
fmt.Printf("\x1b[1mWARNING\x1b[0m: Multiple nodes matched label selector, running command on all.")
}
return tc.runCommand(ctx, siteInfo.Name, nodeAddrs, proxyClient, command)
}
// Issue "shell" request to run single node.
if len(nodeAddrs) > 1 {
fmt.Printf("\x1b[1mWARNING\x1b[0m: Multiple nodes match the label selector, picking first: %v\n", nodeAddrs[0])
}
return tc.runShell(nodeClient, nil)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L939-L1016 | go | train | // Join connects to the existing/active SSH session | func (tc *TeleportClient) Join(ctx context.Context, namespace string, sessionID session.ID, input io.Reader) (err error) | // Join connects to the existing/active SSH session
func (tc *TeleportClient) Join(ctx context.Context, namespace string, sessionID session.ID, input io.Reader) (err error) | {
if namespace == "" {
return trace.BadParameter(auth.MissingNamespaceError)
}
tc.Stdin = input
if sessionID.Check() != nil {
return trace.Errorf("Invalid session ID format: %s", string(sessionID))
}
var notFoundErrorMessage = fmt.Sprintf("session '%s' not found or it has ended", sessionID)
// connect to proxy:
if !tc.Config.ProxySpecified() {
return trace.BadParameter("proxy server is not specified")
}
proxyClient, err := tc.ConnectToProxy(ctx)
if err != nil {
return trace.Wrap(err)
}
defer proxyClient.Close()
site, err := proxyClient.ConnectToCurrentCluster(ctx, false)
if err != nil {
return trace.Wrap(err)
}
// find the session ID on the site:
sessions, err := site.GetSessions(namespace)
if err != nil {
return trace.Wrap(err)
}
var session *session.Session
for _, s := range sessions {
if s.ID == sessionID {
session = &s
break
}
}
if session == nil {
return trace.NotFound(notFoundErrorMessage)
}
// pick the 1st party of the session and use his server ID to connect to
if len(session.Parties) == 0 {
return trace.NotFound(notFoundErrorMessage)
}
serverID := session.Parties[0].ServerID
// find a server address by its ID
nodes, err := site.GetNodes(namespace, services.SkipValidation())
if err != nil {
return trace.Wrap(err)
}
var node services.Server
for _, n := range nodes {
if n.GetName() == serverID {
node = n
break
}
}
if node == nil {
return trace.NotFound(notFoundErrorMessage)
}
// connect to server:
fullNodeAddr := node.GetAddr()
if tc.SiteName != "" {
fullNodeAddr = fmt.Sprintf("%s@%s@%s", node.GetAddr(), tc.Namespace, tc.SiteName)
}
nc, err := proxyClient.ConnectToNode(ctx, fullNodeAddr, tc.Config.HostLogin, false)
if err != nil {
return trace.Wrap(err)
}
defer nc.Close()
// Start forwarding ports if configured.
tc.startPortForwarding(ctx, nc)
// running shell with a given session means "join" it:
return tc.runShell(nc, session)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1019-L1108 | go | train | // Play replays the recorded session | func (tc *TeleportClient) Play(ctx context.Context, namespace, sessionId string) (err error) | // Play replays the recorded session
func (tc *TeleportClient) Play(ctx context.Context, namespace, sessionId string) (err error) | {
if namespace == "" {
return trace.BadParameter(auth.MissingNamespaceError)
}
sid, err := session.ParseID(sessionId)
if err != nil {
return fmt.Errorf("'%v' is not a valid session ID (must be GUID)", sid)
}
// connect to the auth server (site) who made the recording
proxyClient, err := tc.ConnectToProxy(ctx)
if err != nil {
return trace.Wrap(err)
}
site, err := proxyClient.ConnectToCurrentCluster(ctx, false)
if err != nil {
return trace.Wrap(err)
}
// request events for that session (to get timing data)
sessionEvents, err := site.GetSessionEvents(namespace, *sid, 0, true)
if err != nil {
return trace.Wrap(err)
}
// read the stream into a buffer:
var stream []byte
for err == nil {
tmp, err := site.GetSessionChunk(namespace, *sid, len(stream), events.MaxChunkBytes)
if err != nil {
return trace.Wrap(err)
}
if len(tmp) == 0 {
err = io.EOF
break
}
stream = append(stream, tmp...)
}
// configure terminal for direct unbuffered echo-less input:
if term.IsTerminal(0) {
state, err := term.SetRawTerminal(0)
if err != nil {
return nil
}
defer term.RestoreTerminal(0, state)
}
player := newSessionPlayer(sessionEvents, stream)
// keys:
const (
keyCtrlC = 3
keyCtrlD = 4
keySpace = 32
keyLeft = 68
keyRight = 67
keyUp = 65
keyDown = 66
)
// playback control goroutine
go func() {
defer player.Stop()
key := make([]byte, 1)
for {
_, err = os.Stdin.Read(key)
if err != nil {
return
}
switch key[0] {
// Ctrl+C or Ctrl+D
case keyCtrlC, keyCtrlD:
return
// Space key
case keySpace:
player.TogglePause()
// <- arrow
case keyLeft, keyDown:
player.Rewind()
// -> arrow
case keyRight, keyUp:
player.Forward()
}
}
}()
// player starts playing in its own goroutine
player.Play()
// wait for keypresses loop to end
<-player.stopC
fmt.Println("\n\nend of session playback")
return trace.Wrap(err)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1112-L1160 | go | train | // ExecuteSCP executes SCP command. It executes scp.Command using
// lower-level API integrations that mimic SCP CLI command behavior | func (tc *TeleportClient) ExecuteSCP(ctx context.Context, cmd scp.Command) (err error) | // ExecuteSCP executes SCP command. It executes scp.Command using
// lower-level API integrations that mimic SCP CLI command behavior
func (tc *TeleportClient) ExecuteSCP(ctx context.Context, cmd scp.Command) (err error) | {
// connect to proxy first:
if !tc.Config.ProxySpecified() {
return trace.BadParameter("proxy server is not specified")
}
proxyClient, err := tc.ConnectToProxy(ctx)
if err != nil {
return trace.Wrap(err)
}
defer proxyClient.Close()
clusterInfo, err := proxyClient.currentCluster()
if err != nil {
return trace.Wrap(err)
}
// which nodes are we executing this commands on?
nodeAddrs, err := tc.getTargetNodes(ctx, proxyClient)
if err != nil {
return trace.Wrap(err)
}
if len(nodeAddrs) == 0 {
return trace.BadParameter("no target host specified")
}
nodeClient, err := proxyClient.ConnectToNode(
ctx,
nodeAddrs[0]+"@"+tc.Namespace+"@"+clusterInfo.Name,
tc.Config.HostLogin,
false)
if err != nil {
tc.ExitStatus = 1
return trace.Wrap(err)
}
err = nodeClient.ExecuteSCP(cmd)
if err != nil {
// converts SSH error code to tc.ExitStatus
exitError, _ := trace.Unwrap(err).(*ssh.ExitError)
if exitError != nil {
tc.ExitStatus = exitError.ExitStatus()
}
return err
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1163-L1288 | go | train | // SCP securely copies file(s) from one SSH server to another | func (tc *TeleportClient) SCP(ctx context.Context, args []string, port int, recursive bool, quiet bool) (err error) | // SCP securely copies file(s) from one SSH server to another
func (tc *TeleportClient) SCP(ctx context.Context, args []string, port int, recursive bool, quiet bool) (err error) | {
if len(args) < 2 {
return trace.Errorf("Need at least two arguments for scp")
}
first := args[0]
last := args[len(args)-1]
// local copy?
if !isRemoteDest(first) && !isRemoteDest(last) {
return trace.BadParameter("making local copies is not supported")
}
if !tc.Config.ProxySpecified() {
return trace.BadParameter("proxy server is not specified")
}
log.Infof("Connecting to proxy to copy (recursively=%v)...", recursive)
proxyClient, err := tc.ConnectToProxy(ctx)
if err != nil {
return trace.Wrap(err)
}
defer proxyClient.Close()
// helper function connects to the src/target node:
connectToNode := func(addr string) (*NodeClient, error) {
// determine which cluster we're connecting to:
siteInfo, err := proxyClient.currentCluster()
if err != nil {
return nil, trace.Wrap(err)
}
return proxyClient.ConnectToNode(ctx, addr+"@"+tc.Namespace+"@"+siteInfo.Name, tc.HostLogin, false)
}
var progressWriter io.Writer
if !quiet {
progressWriter = tc.Stdout
}
// gets called to convert SSH error code to tc.ExitStatus
onError := func(err error) error {
exitError, _ := trace.Unwrap(err).(*ssh.ExitError)
if exitError != nil {
tc.ExitStatus = exitError.ExitStatus()
}
return err
}
// upload:
if isRemoteDest(last) {
filesToUpload := args[:len(args)-1]
// If more than a single file were provided, scp must be in directory mode
// and the target on the remote host needs to be a directory.
var directoryMode bool
if len(filesToUpload) > 1 {
directoryMode = true
}
login, host, dest := scp.ParseSCPDestination(last)
if login != "" {
tc.HostLogin = login
}
addr := net.JoinHostPort(host, strconv.Itoa(port))
client, err := connectToNode(addr)
if err != nil {
return trace.Wrap(err)
}
// copy everything except the last arg (that's destination)
for _, src := range filesToUpload {
scpConfig := scp.Config{
User: tc.Username,
ProgressWriter: progressWriter,
RemoteLocation: dest,
Flags: scp.Flags{
Target: []string{src},
Recursive: recursive,
DirectoryMode: directoryMode,
},
}
cmd, err := scp.CreateUploadCommand(scpConfig)
if err != nil {
return trace.Wrap(err)
}
err = client.ExecuteSCP(cmd)
if err != nil {
return onError(err)
}
}
// download:
} else {
login, host, src := scp.ParseSCPDestination(first)
addr := net.JoinHostPort(host, strconv.Itoa(port))
if login != "" {
tc.HostLogin = login
}
client, err := connectToNode(addr)
if err != nil {
return trace.Wrap(err)
}
// copy everything except the last arg (that's destination)
for _, dest := range args[1:] {
scpConfig := scp.Config{
User: tc.Username,
Flags: scp.Flags{
Recursive: recursive,
Target: []string{dest},
},
RemoteLocation: src,
ProgressWriter: progressWriter,
}
cmd, err := scp.CreateDownloadCommand(scpConfig)
if err != nil {
return trace.Wrap(err)
}
err = client.ExecuteSCP(cmd)
if err != nil {
return onError(err)
}
}
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1295-L1313 | go | train | // ListNodes returns a list of nodes connected to a proxy | func (tc *TeleportClient) ListNodes(ctx context.Context) ([]services.Server, error) | // ListNodes returns a list of nodes connected to a proxy
func (tc *TeleportClient) ListNodes(ctx context.Context) ([]services.Server, error) | {
var err error
// userhost is specified? that must be labels
if tc.Host != "" {
tc.Labels, err = ParseLabelSpec(tc.Host)
if err != nil {
return nil, trace.Wrap(err)
}
}
// connect to the proxy and ask it to return a full list of servers
proxyClient, err := tc.ConnectToProxy(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
defer proxyClient.Close()
return proxyClient.FindServersByLabels(ctx, tc.Namespace, tc.Labels)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1316-L1370 | go | train | // runCommand executes a given bash command on a bunch of remote nodes | func (tc *TeleportClient) runCommand(
ctx context.Context, siteName string, nodeAddresses []string, proxyClient *ProxyClient, command []string) error | // runCommand executes a given bash command on a bunch of remote nodes
func (tc *TeleportClient) runCommand(
ctx context.Context, siteName string, nodeAddresses []string, proxyClient *ProxyClient, command []string) error | {
resultsC := make(chan error, len(nodeAddresses))
for _, address := range nodeAddresses {
go func(address string) {
var (
err error
nodeSession *NodeSession
)
defer func() {
resultsC <- err
}()
var nodeClient *NodeClient
nodeClient, err = proxyClient.ConnectToNode(ctx, address+"@"+tc.Namespace+"@"+siteName, tc.Config.HostLogin, false)
if err != nil {
fmt.Fprintln(tc.Stderr, err)
return
}
defer nodeClient.Close()
// run the command on one node:
if len(nodeAddresses) > 1 {
fmt.Printf("Running command on %v:\n", address)
}
nodeSession, err = newSession(nodeClient, nil, tc.Config.Env, tc.Stdin, tc.Stdout, tc.Stderr)
if err != nil {
log.Error(err)
return
}
defer nodeSession.Close()
if err = nodeSession.runCommand(ctx, command, tc.OnShellCreated, tc.Config.Interactive); err != nil {
originErr := trace.Unwrap(err)
exitErr, ok := originErr.(*ssh.ExitError)
if ok {
tc.ExitStatus = exitErr.ExitStatus()
} else {
// if an error occurs, but no exit status is passed back, GoSSH returns
// a generic error like this. in this case the error message is printed
// to stderr by the remote process so we have to quietly return 1:
if strings.Contains(originErr.Error(), "exited without exit status") {
tc.ExitStatus = 1
}
}
}
}(address)
}
var lastError error
for range nodeAddresses {
if err := <-resultsC; err != nil {
lastError = err
}
}
return trace.Wrap(lastError)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1374-L1388 | go | train | // runShell starts an interactive SSH session/shell.
// sessionID : when empty, creates a new shell. otherwise it tries to join the existing session. | func (tc *TeleportClient) runShell(nodeClient *NodeClient, sessToJoin *session.Session) error | // runShell starts an interactive SSH session/shell.
// sessionID : when empty, creates a new shell. otherwise it tries to join the existing session.
func (tc *TeleportClient) runShell(nodeClient *NodeClient, sessToJoin *session.Session) error | {
nodeSession, err := newSession(nodeClient, sessToJoin, tc.Env, tc.Stdin, tc.Stdout, tc.Stderr)
if err != nil {
return trace.Wrap(err)
}
if err = nodeSession.runShell(tc.OnShellCreated); err != nil {
return trace.Wrap(err)
}
if nodeSession.ExitMsg == "" {
fmt.Fprintln(tc.Stderr, "the connection was closed on the remote side on ", time.Now().Format(time.RFC822))
} else {
fmt.Fprintln(tc.Stderr, nodeSession.ExitMsg)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1391-L1408 | go | train | // getProxyLogin determines which SSH principal to use when connecting to proxy. | func (tc *TeleportClient) getProxySSHPrincipal() string | // getProxyLogin determines which SSH principal to use when connecting to proxy.
func (tc *TeleportClient) getProxySSHPrincipal() string | {
proxyPrincipal := tc.Config.HostLogin
if tc.DefaultPrincipal != "" {
proxyPrincipal = tc.DefaultPrincipal
}
// see if we already have a signed key in the cache, we'll use that instead
if !tc.Config.SkipLocalAuth && tc.LocalAgent() != nil {
signers, err := tc.LocalAgent().Signers()
if err != nil || len(signers) == 0 {
return proxyPrincipal
}
cert, ok := signers[0].PublicKey().(*ssh.Certificate)
if ok && len(cert.ValidPrincipals) > 0 {
return cert.ValidPrincipals[0]
}
}
return proxyPrincipal
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1412-L1418 | go | train | // authMethods returns a list (slice) of all SSH auth methods this client
// can use to try to authenticate | func (tc *TeleportClient) authMethods() []ssh.AuthMethod | // authMethods returns a list (slice) of all SSH auth methods this client
// can use to try to authenticate
func (tc *TeleportClient) authMethods() []ssh.AuthMethod | {
m := append([]ssh.AuthMethod(nil), tc.Config.AuthMethods...)
if tc.LocalAgent() != nil {
m = append(m, tc.LocalAgent().AuthMethods()...)
}
return m
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1423-L1444 | go | train | // ConnectToProxy will dial to the proxy server and return a ProxyClient when
// successful. If the passed in context is canceled, this function will return
// a trace.ConnectionProblem right away. | func (tc *TeleportClient) ConnectToProxy(ctx context.Context) (*ProxyClient, error) | // ConnectToProxy will dial to the proxy server and return a ProxyClient when
// successful. If the passed in context is canceled, this function will return
// a trace.ConnectionProblem right away.
func (tc *TeleportClient) ConnectToProxy(ctx context.Context) (*ProxyClient, error) | {
var err error
var proxyClient *ProxyClient
// Use connectContext and the cancel function to signal when a response is
// returned from connectToProxy.
connectContext, cancel := context.WithCancel(context.Background())
go func() {
defer cancel()
proxyClient, err = tc.connectToProxy(ctx)
}()
select {
// ConnectToProxy returned a result, return that back to the caller.
case <-connectContext.Done():
return proxyClient, trace.Wrap(err)
// The passed in context timed out. This is often due to the network being
// down and the user hitting Ctrl-C.
case <-ctx.Done():
return nil, trace.ConnectionProblem(ctx.Err(), "connection canceled")
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1448-L1491 | go | train | // connectToProxy will dial to the proxy server and return a ProxyClient when
// successful. | func (tc *TeleportClient) connectToProxy(ctx context.Context) (*ProxyClient, error) | // connectToProxy will dial to the proxy server and return a ProxyClient when
// successful.
func (tc *TeleportClient) connectToProxy(ctx context.Context) (*ProxyClient, error) | {
var err error
proxyPrincipal := tc.getProxySSHPrincipal()
sshConfig := &ssh.ClientConfig{
User: proxyPrincipal,
HostKeyCallback: tc.HostKeyCallback,
}
// helper to create a ProxyClient struct
makeProxyClient := func(sshClient *ssh.Client, m ssh.AuthMethod) *ProxyClient {
return &ProxyClient{
teleportClient: tc,
Client: sshClient,
proxyAddress: tc.Config.SSHProxyAddr,
proxyPrincipal: proxyPrincipal,
hostKeyCallback: sshConfig.HostKeyCallback,
authMethod: m,
hostLogin: tc.Config.HostLogin,
siteName: tc.Config.SiteName,
clientAddr: tc.ClientAddr,
}
}
successMsg := fmt.Sprintf("Successful auth with proxy %v", tc.Config.SSHProxyAddr)
// try to authenticate using every non interactive auth method we have:
for i, m := range tc.authMethods() {
log.Infof("Connecting proxy=%v login='%v' method=%d", tc.Config.SSHProxyAddr, sshConfig.User, i)
var sshClient *ssh.Client
sshConfig.Auth = []ssh.AuthMethod{m}
sshClient, err = ssh.Dial("tcp", tc.Config.SSHProxyAddr, sshConfig)
if err != nil {
return nil, trace.Wrap(err)
}
log.Infof(successMsg)
return makeProxyClient(sshClient, m), nil
}
// we have exhausted all auth existing auth methods and local login
// is disabled in configuration, or the user refused connecting to untrusted hosts
if err == nil {
err = trace.BadParameter("failed to authenticate with proxy %v", tc.Config.SSHProxyAddr)
}
return nil, trace.Wrap(err)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1495-L1502 | go | train | // Logout removes certificate and key for the currently logged in user from
// the filesystem and agent. | func (tc *TeleportClient) Logout() error | // Logout removes certificate and key for the currently logged in user from
// the filesystem and agent.
func (tc *TeleportClient) Logout() error | {
err := tc.localAgent.DeleteKey()
if err != nil {
return trace.Wrap(err)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1506-L1512 | go | train | // LogoutAll removes all certificates for all users from the filesystem
// and agent. | func (tc *TeleportClient) LogoutAll() error | // LogoutAll removes all certificates for all users from the filesystem
// and agent.
func (tc *TeleportClient) LogoutAll() error | {
err := tc.localAgent.DeleteKeys()
if err != nil {
return trace.Wrap(err)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1519-L1627 | go | train | // Login logs the user into a Teleport cluster by talking to a Teleport proxy.
//
// If 'activateKey' is true, saves the received session cert into the local
// keystore (and into the ssh-agent) for future use.
// | func (tc *TeleportClient) Login(ctx context.Context, activateKey bool) (*Key, error) | // Login logs the user into a Teleport cluster by talking to a Teleport proxy.
//
// If 'activateKey' is true, saves the received session cert into the local
// keystore (and into the ssh-agent) for future use.
//
func (tc *TeleportClient) Login(ctx context.Context, activateKey bool) (*Key, error) | {
// Ping the endpoint to see if it's up and find the type of authentication
// supported.
pr, err := tc.credClient.Ping(ctx, tc.AuthConnector)
if err != nil {
return nil, trace.Wrap(err)
}
// If version checking was requested and the server advertises a minimum version.
if tc.CheckVersions && pr.MinClientVersion != "" {
if err := utils.CheckVersions(teleport.Version, pr.MinClientVersion); err != nil {
return nil, trace.Wrap(err)
}
}
// preserve original web proxy host that could have
webProxyHost, _ := tc.WebProxyHostPort()
if err := tc.applyProxySettings(pr.Proxy); err != nil {
return nil, trace.Wrap(err)
}
// generate a new keypair. the public key will be signed via proxy if client's
// password+OTP are valid
key, err := NewKey()
if err != nil {
return nil, trace.Wrap(err)
}
var response *auth.SSHLoginResponse
switch pr.Auth.Type {
case teleport.Local:
response, err = tc.localLogin(ctx, pr.Auth.SecondFactor, key.Pub)
if err != nil {
return nil, trace.Wrap(err)
}
case teleport.OIDC:
response, err = tc.ssoLogin(ctx, pr.Auth.OIDC.Name, key.Pub, teleport.OIDC)
if err != nil {
return nil, trace.Wrap(err)
}
// in this case identity is returned by the proxy
tc.Username = response.Username
if tc.localAgent != nil {
tc.localAgent.username = response.Username
}
case teleport.SAML:
response, err = tc.ssoLogin(ctx, pr.Auth.SAML.Name, key.Pub, teleport.SAML)
if err != nil {
return nil, trace.Wrap(err)
}
// in this case identity is returned by the proxy
tc.Username = response.Username
if tc.localAgent != nil {
tc.localAgent.username = response.Username
}
case teleport.Github:
response, err = tc.ssoLogin(ctx, pr.Auth.Github.Name, key.Pub, teleport.Github)
if err != nil {
return nil, trace.Wrap(err)
}
// in this case identity is returned by the proxy
tc.Username = response.Username
if tc.localAgent != nil {
tc.localAgent.username = response.Username
}
default:
return nil, trace.BadParameter("unsupported authentication type: %q", pr.Auth.Type)
}
// extract the new certificate out of the response
key.Cert = response.Cert
key.TLSCert = response.TLSCert
key.ProxyHost = webProxyHost
if len(response.HostSigners) <= 0 {
return nil, trace.BadParameter("bad response from the server: expected at least one certificate, got 0")
}
key.ClusterName = response.HostSigners[0].ClusterName
if activateKey {
// save the list of CAs client trusts to ~/.tsh/known_hosts
err = tc.localAgent.AddHostSignersToCache(response.HostSigners)
if err != nil {
return nil, trace.Wrap(err)
}
// save the list of TLS CAs client trusts
err = tc.localAgent.SaveCerts(response.HostSigners)
if err != nil {
return nil, trace.Wrap(err)
}
// save the cert to the local storage (~/.tsh usually):
_, err = tc.localAgent.AddKey(key)
if err != nil {
return nil, trace.Wrap(err)
}
// Connect to the Auth Server of the main cluster
// and fetch the known hosts for this cluster.
if err := tc.UpdateTrustedCA(ctx, key.ClusterName); err != nil {
return nil, trace.Wrap(err)
}
}
return key, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1631-L1650 | go | train | // GetTrustedCA returns a list of host certificate authorities
// trusted by the cluster client is authenticated with. | func (tc *TeleportClient) GetTrustedCA(ctx context.Context, clusterName string) ([]services.CertAuthority, error) | // GetTrustedCA returns a list of host certificate authorities
// trusted by the cluster client is authenticated with.
func (tc *TeleportClient) GetTrustedCA(ctx context.Context, clusterName string) ([]services.CertAuthority, error) | {
// Connect to the proxy.
if !tc.Config.ProxySpecified() {
return nil, trace.BadParameter("proxy server is not specified")
}
proxyClient, err := tc.ConnectToProxy(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
defer proxyClient.Close()
// Get a client to the Auth Server.
clt, err := proxyClient.ClusterAccessPoint(ctx, clusterName, true)
if err != nil {
return nil, trace.Wrap(err)
}
// Get the list of host certificates that this cluster knows about.
return clt.GetCertAuthorities(services.HostCA, false)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1654-L1676 | go | train | // UpdateTrustedCA connects to the Auth Server and fetches all host certificates
// and updates ~/.tsh/keys/proxy/certs.pem and ~/.tsh/known_hosts. | func (tc *TeleportClient) UpdateTrustedCA(ctx context.Context, clusterName string) error | // UpdateTrustedCA connects to the Auth Server and fetches all host certificates
// and updates ~/.tsh/keys/proxy/certs.pem and ~/.tsh/known_hosts.
func (tc *TeleportClient) UpdateTrustedCA(ctx context.Context, clusterName string) error | {
// Get the list of host certificates that this cluster knows about.
hostCerts, err := tc.GetTrustedCA(ctx, clusterName)
if err != nil {
return trace.Wrap(err)
}
trustedCerts := auth.AuthoritiesToTrustedCerts(hostCerts)
// Update the ~/.tsh/known_hosts file to include all the CA the cluster
// knows about.
err = tc.localAgent.AddHostSignersToCache(trustedCerts)
if err != nil {
return trace.Wrap(err)
}
// Update the CA pool with all the CA the cluster knows about.
err = tc.localAgent.SaveCerts(trustedCerts)
if err != nil {
return trace.Wrap(err)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1681-L1738 | go | train | // applyProxySettings updates configuration changes based on the advertised
// proxy settings, user supplied values take precedence - will be preserved
// if set | func (tc *TeleportClient) applyProxySettings(proxySettings ProxySettings) error | // applyProxySettings updates configuration changes based on the advertised
// proxy settings, user supplied values take precedence - will be preserved
// if set
func (tc *TeleportClient) applyProxySettings(proxySettings ProxySettings) error | {
// Kubernetes proxy settings.
if proxySettings.Kube.Enabled && proxySettings.Kube.PublicAddr != "" && tc.KubeProxyAddr == "" {
_, err := utils.ParseAddr(proxySettings.Kube.PublicAddr)
if err != nil {
return trace.BadParameter(
"failed to parse value received from the server: %q, contact your administrator for help",
proxySettings.Kube.PublicAddr)
}
tc.KubeProxyAddr = proxySettings.Kube.PublicAddr
} else if proxySettings.Kube.Enabled && tc.KubeProxyAddr == "" {
webProxyHost, _ := tc.WebProxyHostPort()
tc.KubeProxyAddr = fmt.Sprintf("%s:%d", webProxyHost, defaults.KubeProxyListenPort)
}
// Read in settings for HTTP endpoint of the proxy.
if proxySettings.SSH.PublicAddr != "" {
addr, err := utils.ParseAddr(proxySettings.SSH.PublicAddr)
if err != nil {
return trace.BadParameter(
"failed to parse value received from the server: %q, contact your administrator for help",
proxySettings.SSH.PublicAddr)
}
tc.WebProxyAddr = net.JoinHostPort(addr.Host(), strconv.Itoa(addr.Port(defaults.HTTPListenPort)))
// Update local agent (that reads/writes to ~/.tsh) with the new address
// of the web proxy. This will control where the keys are stored on disk
// after login.
tc.localAgent.UpdateProxyHost(addr.Host())
}
// Read in settings for the SSH endpoint of the proxy.
//
// If listen_addr is set, take host from ProxyWebHost and port from what
// was set. This is to maintain backward compatibility when Teleport only
// supported public_addr.
if proxySettings.SSH.ListenAddr != "" {
addr, err := utils.ParseAddr(proxySettings.SSH.ListenAddr)
if err != nil {
return trace.BadParameter(
"failed to parse value received from the server: %q, contact your administrator for help",
proxySettings.SSH.ListenAddr)
}
webProxyHost, _ := tc.WebProxyHostPort()
tc.SSHProxyAddr = net.JoinHostPort(webProxyHost, strconv.Itoa(addr.Port(defaults.SSHProxyListenPort)))
}
// If ssh_public_addr is set, override settings from listen_addr.
if proxySettings.SSH.SSHPublicAddr != "" {
addr, err := utils.ParseAddr(proxySettings.SSH.SSHPublicAddr)
if err != nil {
return trace.BadParameter(
"failed to parse value received from the server: %q, contact your administrator for help",
proxySettings.SSH.ListenAddr)
}
tc.SSHProxyAddr = net.JoinHostPort(addr.Host(), strconv.Itoa(addr.Port(defaults.SSHProxyListenPort)))
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1763-L1779 | go | train | // Adds a new CA as trusted CA for this client, used in tests | func (tc *TeleportClient) AddTrustedCA(ca services.CertAuthority) error | // Adds a new CA as trusted CA for this client, used in tests
func (tc *TeleportClient) AddTrustedCA(ca services.CertAuthority) error | {
err := tc.LocalAgent().AddHostSignersToCache(auth.AuthoritiesToTrustedCerts([]services.CertAuthority{ca}))
if err != nil {
return trace.Wrap(err)
}
// only host CA has TLS certificates, user CA will overwrite trusted certs
// to empty file if called
if ca.GetType() == services.HostCA {
err = tc.LocalAgent().SaveCerts(auth.AuthoritiesToTrustedCerts([]services.CertAuthority{ca}))
if err != nil {
return trace.Wrap(err)
}
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1786-L1816 | go | train | // directLogin asks for a password + HOTP token, makes a request to CA via proxy | func (tc *TeleportClient) directLogin(ctx context.Context, secondFactorType string, pub []byte) (*auth.SSHLoginResponse, error) | // directLogin asks for a password + HOTP token, makes a request to CA via proxy
func (tc *TeleportClient) directLogin(ctx context.Context, secondFactorType string, pub []byte) (*auth.SSHLoginResponse, error) | {
var err error
var password string
var otpToken string
password, err = tc.AskPassword()
if err != nil {
return nil, trace.Wrap(err)
}
// only ask for a second factor if it's enabled
if secondFactorType != teleport.OFF {
otpToken, err = tc.AskOTP()
if err != nil {
return nil, trace.Wrap(err)
}
}
// ask the CA (via proxy) to sign our public key:
response, err := tc.credClient.SSHAgentLogin(
ctx,
tc.Config.Username,
password,
otpToken,
pub,
tc.KeyTTL,
tc.CertificateFormat)
return response, trace.Wrap(err)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1819-L1832 | go | train | // samlLogin opens browser window and uses OIDC or SAML redirect cycle with browser | func (tc *TeleportClient) ssoLogin(ctx context.Context, connectorID string, pub []byte, protocol string) (*auth.SSHLoginResponse, error) | // samlLogin opens browser window and uses OIDC or SAML redirect cycle with browser
func (tc *TeleportClient) ssoLogin(ctx context.Context, connectorID string, pub []byte, protocol string) (*auth.SSHLoginResponse, error) | {
log.Debugf("samlLogin start")
// ask the CA (via proxy) to sign our public key:
response, err := tc.credClient.SSHAgentSSOLogin(SSHLogin{
Context: ctx,
ConnectorID: connectorID,
PubKey: pub,
TTL: tc.KeyTTL,
Protocol: protocol,
Compatibility: tc.CertificateFormat,
BindAddr: tc.BindAddr,
})
return response, trace.Wrap(err)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1835-L1856 | go | train | // directLogin asks for a password and performs the challenge-response authentication | func (tc *TeleportClient) u2fLogin(ctx context.Context, pub []byte) (*auth.SSHLoginResponse, error) | // directLogin asks for a password and performs the challenge-response authentication
func (tc *TeleportClient) u2fLogin(ctx context.Context, pub []byte) (*auth.SSHLoginResponse, error) | {
// U2F login requires the official u2f-host executable
_, err := exec.LookPath("u2f-host")
if err != nil {
return nil, trace.Wrap(err)
}
password, err := tc.AskPassword()
if err != nil {
return nil, trace.Wrap(err)
}
response, err := tc.credClient.SSHAgentU2FLogin(
ctx,
tc.Config.Username,
password,
pub,
tc.KeyTTL,
tc.CertificateFormat)
return response, trace.Wrap(err)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1859-L1868 | go | train | // SendEvent adds a events.EventFields to the channel. | func (tc *TeleportClient) SendEvent(ctx context.Context, e events.EventFields) error | // SendEvent adds a events.EventFields to the channel.
func (tc *TeleportClient) SendEvent(ctx context.Context, e events.EventFields) error | {
// Try and send the event to the eventsCh. If blocking, keep blocking until
// the passed in context in canceled.
select {
case tc.eventsCh <- e:
return nil
case <-ctx.Done():
return trace.Wrap(ctx.Err())
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1878-L1908 | go | train | // loopbackPool reads trusted CAs if it finds it in a predefined location
// and will work only if target proxy address is loopback | func loopbackPool(proxyAddr string) *x509.CertPool | // loopbackPool reads trusted CAs if it finds it in a predefined location
// and will work only if target proxy address is loopback
func loopbackPool(proxyAddr string) *x509.CertPool | {
if !utils.IsLoopback(proxyAddr) {
log.Debugf("not using loopback pool for remote proxy addr: %v", proxyAddr)
return nil
}
log.Debugf("attempting to use loopback pool for local proxy addr: %v", proxyAddr)
certPool := x509.NewCertPool()
certPath := filepath.Join(defaults.DataDir, defaults.SelfSignedCertPath)
pemByte, err := ioutil.ReadFile(certPath)
if err != nil {
log.Debugf("could not open any path in: %v", certPath)
return nil
}
for {
var block *pem.Block
block, pemByte = pem.Decode(pemByte)
if block == nil {
break
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
log.Debugf("could not parse cert in: %v, err: %v", certPath, err)
return nil
}
certPool.AddCert(cert)
}
log.Debugf("using local pool for loopback proxy: %v, err: %v", certPath, err)
return certPool
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1911-L1921 | go | train | // connectToSSHAgent connects to the local SSH agent and returns a agent.Agent. | func connectToSSHAgent() agent.Agent | // connectToSSHAgent connects to the local SSH agent and returns a agent.Agent.
func connectToSSHAgent() agent.Agent | {
socketPath := os.Getenv(teleport.SSHAuthSock)
conn, err := agentconn.Dial(socketPath)
if err != nil {
log.Errorf("[KEY AGENT] Unable to connect to SSH agent on socket: %q.", socketPath)
return nil
}
log.Infof("[KEY AGENT] Connected to the system agent: %q", socketPath)
return agent.NewClient(conn)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1924-L1930 | go | train | // Username returns the current user's username | func Username() (string, error) | // Username returns the current user's username
func Username() (string, error) | {
u, err := user.Current()
if err != nil {
return "", trace.Wrap(err)
}
return u.Username, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1933-L1941 | go | train | // AskOTP prompts the user to enter the OTP token. | func (tc *TeleportClient) AskOTP() (token string, err error) | // AskOTP prompts the user to enter the OTP token.
func (tc *TeleportClient) AskOTP() (token string, err error) | {
fmt.Printf("Enter your OTP token:\n")
token, err = lineFromConsole()
if err != nil {
fmt.Fprintln(tc.Stderr, err)
return "", trace.Wrap(err)
}
return token, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1944-L1953 | go | train | // AskPassword prompts the user to enter the password | func (tc *TeleportClient) AskPassword() (pwd string, err error) | // AskPassword prompts the user to enter the password
func (tc *TeleportClient) AskPassword() (pwd string, err error) | {
fmt.Printf("Enter password for Teleport user %v:\n", tc.Config.Username)
pwd, err = passwordFromConsole()
if err != nil {
fmt.Fprintln(tc.Stderr, err)
return "", trace.Wrap(err)
}
return pwd, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1956-L1982 | go | train | // passwordFromConsole reads from stdin without echoing typed characters to stdout | func passwordFromConsole() (string, error) | // passwordFromConsole reads from stdin without echoing typed characters to stdout
func passwordFromConsole() (string, error) | {
fd := syscall.Stdin
state, err := terminal.GetState(int(fd))
// intercept Ctr+C and restore terminal
sigCh := make(chan os.Signal, 1)
closeCh := make(chan int)
if err != nil {
log.Warnf("failed reading terminal state: %v", err)
} else {
signal.Notify(sigCh, syscall.SIGINT)
go func() {
select {
case <-sigCh:
terminal.Restore(int(fd), state)
os.Exit(1)
case <-closeCh:
}
}()
}
defer func() {
close(closeCh)
}()
bytes, err := terminal.ReadPassword(int(fd))
return string(bytes), err
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1985-L1988 | go | train | // lineFromConsole reads a line from stdin | func lineFromConsole() (string, error) | // lineFromConsole reads a line from stdin
func lineFromConsole() (string, error) | {
bytes, _, err := bufio.NewReader(os.Stdin).ReadLine()
return string(bytes), err
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L1992-L2032 | go | train | // ParseLabelSpec parses a string like 'name=value,"long name"="quoted value"` into a map like
// { "name" -> "value", "long name" -> "quoted value" } | func ParseLabelSpec(spec string) (map[string]string, error) | // ParseLabelSpec parses a string like 'name=value,"long name"="quoted value"` into a map like
// { "name" -> "value", "long name" -> "quoted value" }
func ParseLabelSpec(spec string) (map[string]string, error) | {
tokens := []string{}
var openQuotes = false
var tokenStart, assignCount int
var specLen = len(spec)
// tokenize the label spec:
for i, ch := range spec {
endOfToken := false
// end of line?
if i+utf8.RuneLen(ch) == specLen {
i += utf8.RuneLen(ch)
endOfToken = true
}
switch ch {
case '"':
openQuotes = !openQuotes
case '=', ',', ';':
if !openQuotes {
endOfToken = true
if ch == '=' {
assignCount++
}
}
}
if endOfToken && i > tokenStart {
tokens = append(tokens, strings.TrimSpace(strings.Trim(spec[tokenStart:i], `"`)))
tokenStart = i + 1
}
}
// simple validation of tokenization: must have an even number of tokens (because they're pairs)
// and the number of such pairs must be equal the number of assignments
if len(tokens)%2 != 0 || assignCount != len(tokens)/2 {
return nil, fmt.Errorf("invalid label spec: '%s', should be 'key=value'", spec)
}
// break tokens in pairs and put into a map:
labels := make(map[string]string)
for i := 0; i < len(tokens); i += 2 {
labels[tokens[i]] = tokens[i+1]
}
return labels, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L2036-L2053 | go | train | // Executes the given command on the client machine (localhost). If no command is given,
// executes shell | func runLocalCommand(command []string) error | // Executes the given command on the client machine (localhost). If no command is given,
// executes shell
func runLocalCommand(command []string) error | {
if len(command) == 0 {
user, err := user.Current()
if err != nil {
return trace.Wrap(err)
}
shell, err := shell.GetLoginShell(user.Username)
if err != nil {
return trace.Wrap(err)
}
command = []string{shell}
}
cmd := exec.Command(command[0], command[1:]...)
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
return cmd.Run()
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L2065-L2093 | go | train | // ParsePortForwardSpec parses parameter to -L flag, i.e. strings like "[ip]:80:remote.host:3000"
// The opposite of this function (spec generation) is ForwardedPorts.String() | func ParsePortForwardSpec(spec []string) (ports ForwardedPorts, err error) | // ParsePortForwardSpec parses parameter to -L flag, i.e. strings like "[ip]:80:remote.host:3000"
// The opposite of this function (spec generation) is ForwardedPorts.String()
func ParsePortForwardSpec(spec []string) (ports ForwardedPorts, err error) | {
if len(spec) == 0 {
return ports, nil
}
const errTemplate = "Invalid port forwarding spec: '%s'. Could be like `80:remote.host:80`"
ports = make([]ForwardedPort, len(spec), len(spec))
for i, str := range spec {
parts := strings.Split(str, ":")
if len(parts) < 3 || len(parts) > 4 {
return nil, fmt.Errorf(errTemplate, str)
}
if len(parts) == 3 {
parts = append([]string{"127.0.0.1"}, parts...)
}
p := &ports[i]
p.SrcIP = parts[0]
p.SrcPort, err = strconv.Atoi(parts[1])
if err != nil {
return nil, fmt.Errorf(errTemplate, str)
}
p.DestHost = parts[2]
p.DestPort, err = strconv.Atoi(parts[3])
if err != nil {
return nil, fmt.Errorf(errTemplate, str)
}
}
return ports, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L2097-L2102 | go | train | // String returns the same string spec which can be parsed by
// ParseDynamicPortForwardSpec. | func (fp DynamicForwardedPorts) String() (retval []string) | // String returns the same string spec which can be parsed by
// ParseDynamicPortForwardSpec.
func (fp DynamicForwardedPorts) String() (retval []string) | {
for _, p := range fp {
retval = append(retval, p.ToString())
}
return retval
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L2107-L2133 | go | train | // ParseDynamicPortForwardSpec parses the dynamic port forwarding spec
// passed in the -D flag. The format of the dynamic port forwarding spec
// is [bind_address:]port. | func ParseDynamicPortForwardSpec(spec []string) (DynamicForwardedPorts, error) | // ParseDynamicPortForwardSpec parses the dynamic port forwarding spec
// passed in the -D flag. The format of the dynamic port forwarding spec
// is [bind_address:]port.
func ParseDynamicPortForwardSpec(spec []string) (DynamicForwardedPorts, error) | {
result := make(DynamicForwardedPorts, 0, len(spec))
for _, str := range spec {
host, port, err := net.SplitHostPort(str)
if err != nil {
return nil, trace.Wrap(err)
}
// If no host is provided, bind to localhost.
if host == "" {
host = defaults.Localhost
}
srcPort, err := strconv.Atoi(port)
if err != nil {
return nil, trace.Wrap(err)
}
result = append(result, DynamicForwardedPort{
SrcIP: host,
SrcPort: srcPort,
})
}
return result, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/client/api.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/client/api.go#L2137-L2139 | go | train | // InsecureSkipHostKeyChecking is used when the user passes in
// "StrictHostKeyChecking yes". | func InsecureSkipHostKeyChecking(host string, remote net.Addr, key ssh.PublicKey) error | // InsecureSkipHostKeyChecking is used when the user passes in
// "StrictHostKeyChecking yes".
func InsecureSkipHostKeyChecking(host string, remote net.Addr, key ssh.PublicKey) error | {
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/reversetunnel/conn.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/conn.go#L149-L156 | go | train | // OpenChannel will open a SSH channel to the remote side. | func (c *remoteConn) OpenChannel(name string, data []byte) (ssh.Channel, error) | // OpenChannel will open a SSH channel to the remote side.
func (c *remoteConn) OpenChannel(name string, data []byte) (ssh.Channel, error) | {
channel, _, err := c.sconn.OpenChannel(name, data)
if err != nil {
return nil, trace.Wrap(err)
}
return channel, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/reversetunnel/conn.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/conn.go#L159-L161 | go | train | // ChannelConn creates a net.Conn over a SSH channel. | func (c *remoteConn) ChannelConn(channel ssh.Channel) net.Conn | // ChannelConn creates a net.Conn over a SSH channel.
func (c *remoteConn) ChannelConn(channel ssh.Channel) net.Conn | {
return utils.NewChConn(c.sconn, channel)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/reversetunnel/conn.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/conn.go#L232-L258 | go | train | // sendDiscovery requests sends special "Discovery Requests" back to the
// connected agent. Discovery request consists of the proxies that are part
// of the cluster, but did not receive the connection from the agent. Agent
// will act on a discovery request attempting to establish connection to the
// proxies that were not discovered. | func (c *remoteConn) findAndSend() error | // sendDiscovery requests sends special "Discovery Requests" back to the
// connected agent. Discovery request consists of the proxies that are part
// of the cluster, but did not receive the connection from the agent. Agent
// will act on a discovery request attempting to establish connection to the
// proxies that were not discovered.
func (c *remoteConn) findAndSend() error | {
// Find all proxies that don't have a connection to a remote agent. If all
// proxies have connections, return right away.
disconnectedProxies, err := c.findDisconnectedProxies()
if err != nil {
return trace.Wrap(err)
}
if len(disconnectedProxies) == 0 {
return nil
}
c.log.Debugf("Proxy %v sending %v discovery request with tunnel ID: %v and disconnected proxies: %v.",
c.proxyName, string(c.tunnelType), c.tunnelID, Proxies(disconnectedProxies))
req := discoveryRequest{
TunnelID: c.tunnelID,
Type: string(c.tunnelType),
Proxies: disconnectedProxies,
}
err = c.sendDiscoveryRequests(req)
if err != nil {
return trace.Wrap(err)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/reversetunnel/conn.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/conn.go#L262-L295 | go | train | // findDisconnectedProxies finds proxies that do not have inbound reverse tunnel
// connections. | func (c *remoteConn) findDisconnectedProxies() ([]services.Server, error) | // findDisconnectedProxies finds proxies that do not have inbound reverse tunnel
// connections.
func (c *remoteConn) findDisconnectedProxies() ([]services.Server, error) | {
// Find all proxies that have connection from the remote domain.
conns, err := c.accessPoint.GetTunnelConnections(c.clusterName, services.SkipValidation())
if err != nil {
return nil, trace.Wrap(err)
}
connected := make(map[string]bool)
for _, conn := range conns {
if c.isOnline(conn) {
connected[conn.GetProxyName()] = true
}
}
// Build a list of local proxies that do not have a remote connection to them.
var missing []services.Server
proxies, err := c.accessPoint.GetProxies()
if err != nil {
return nil, trace.Wrap(err)
}
for i := range proxies {
proxy := proxies[i]
// A proxy should never add itself to the list of missing proxies.
if proxy.GetName() == c.proxyName {
continue
}
if !connected[proxy.GetName()] {
missing = append(missing, proxy)
}
}
return missing, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/reversetunnel/conn.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/conn.go#L298-L317 | go | train | // sendDiscoveryRequests sends a discovery request with missing proxies. | func (c *remoteConn) sendDiscoveryRequests(req discoveryRequest) error | // sendDiscoveryRequests sends a discovery request with missing proxies.
func (c *remoteConn) sendDiscoveryRequests(req discoveryRequest) error | {
discoveryCh, err := c.openDiscoveryChannel()
if err != nil {
return trace.Wrap(err)
}
// Marshal and send the request. If the connection failed, mark the
// connection as invalid so it will be removed later.
payload, err := marshalDiscoveryRequest(req)
if err != nil {
return trace.Wrap(err)
}
_, err = discoveryCh.SendRequest(chanDiscoveryReq, false, payload)
if err != nil {
c.markInvalid(err)
return trace.Wrap(err)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/reversetunnel/conn.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/conn.go#L341-L356 | go | train | // TunnelAuthDialer connects to the Auth Server through the reverse tunnel. | func TunnelAuthDialer(proxyAddr string, sshConfig *ssh.ClientConfig) auth.DialContext | // TunnelAuthDialer connects to the Auth Server through the reverse tunnel.
func TunnelAuthDialer(proxyAddr string, sshConfig *ssh.ClientConfig) auth.DialContext | {
return func(ctx context.Context, network string, addr string) (net.Conn, error) {
// Connect to the reverse tunnel server.
dialer := proxy.DialerFromEnvironment(proxyAddr)
sconn, err := dialer.Dial("tcp", proxyAddr, sshConfig)
if err != nil {
return nil, trace.Wrap(err)
}
conn, err := connectProxyTransport(sconn.Conn, RemoteAuthServer)
if err != nil {
return nil, trace.Wrap(err)
}
return conn, nil
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/reversetunnel/conn.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/conn.go#L360-L387 | go | train | // connectProxyTransport opens a channel over the remote tunnel and connects
// to the requested host. | func connectProxyTransport(sconn ssh.Conn, addr string) (net.Conn, error) | // connectProxyTransport opens a channel over the remote tunnel and connects
// to the requested host.
func connectProxyTransport(sconn ssh.Conn, addr string) (net.Conn, error) | {
channel, _, err := sconn.OpenChannel(chanTransport, nil)
if err != nil {
return nil, trace.Wrap(err)
}
// Send a special SSH out-of-band request called "teleport-transport"
// the agent on the other side will create a new TCP/IP connection to
// 'addr' on its network and will start proxying that connection over
// this SSH channel.
ok, err := channel.SendRequest(chanTransportDialReq, true, []byte(addr))
if err != nil {
return nil, trace.Wrap(err)
}
if !ok {
defer channel.Close()
// Pull the error message from the tunnel client (remote cluster)
// passed to us via stderr.
errMessage, _ := ioutil.ReadAll(channel.Stderr())
if errMessage == nil {
errMessage = []byte("failed connecting to " + addr)
}
return nil, trace.Errorf(strings.TrimSpace(string(errMessage)))
}
return utils.NewChConn(sconn, channel), nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/reversetunnel/conn.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/conn.go#L392-L528 | go | train | // proxyTransport runs either in the agent or reverse tunnel itself. It's
// used to establish connections from remote clusters into the main cluster
// or for remote nodes that have no direct network access to the cluster. | func proxyTransport(p *transportParams) | // proxyTransport runs either in the agent or reverse tunnel itself. It's
// used to establish connections from remote clusters into the main cluster
// or for remote nodes that have no direct network access to the cluster.
func proxyTransport(p *transportParams) | {
defer p.channel.Close()
// Always push space into stderr to make sure the caller can always
// safely call read (stderr) without blocking. This stderr is only used
// to request proxying of TCP/IP via reverse tunnel.
fmt.Fprint(p.channel.Stderr(), " ")
// Wait for a request to come in from the other side telling the server
// where to dial to.
var req *ssh.Request
select {
case <-p.closeContext.Done():
return
case req = <-p.requestCh:
if req == nil {
return
}
case <-time.After(defaults.DefaultDialTimeout):
p.log.Warnf("Transport request failed: timed out waiting for request.")
return
}
server := string(req.Payload)
var servers []string
// Handle special non-resolvable addresses first.
switch server {
// Connect to an Auth Server.
case RemoteAuthServer:
authServers, err := p.authClient.GetAuthServers()
if err != nil {
p.log.Errorf("Transport request failed: unable to get list of Auth Servers: %v.", err)
req.Reply(false, []byte("connection rejected: failed to connect to auth server"))
return
}
if len(authServers) == 0 {
p.log.Errorf("Transport request failed: no auth servers found.")
req.Reply(false, []byte("connection rejected: failed to connect to auth server"))
return
}
for _, as := range authServers {
servers = append(servers, as.GetAddr())
}
// Connect to the Kubernetes proxy.
case RemoteKubeProxy:
if p.component == teleport.ComponentReverseTunnelServer {
req.Reply(false, []byte("connection rejected: no remote kubernetes proxy"))
return
}
// If Kubernetes is not configured, reject the connection.
if p.kubeDialAddr.IsEmpty() {
req.Reply(false, []byte("connection rejected: configure kubernetes proxy for this cluster."))
return
}
servers = append(servers, p.kubeDialAddr.Addr)
// LocalNode requests are for the single server running in the agent pool.
case LocalNode:
if p.component == teleport.ComponentReverseTunnelServer {
req.Reply(false, []byte("connection rejected: no local node"))
return
}
if p.server == nil {
req.Reply(false, []byte("connection rejected: server missing"))
return
}
if p.sconn == nil {
req.Reply(false, []byte("connection rejected: server connection missing"))
return
}
req.Reply(true, []byte("Connected."))
// Hand connection off to the SSH server.
p.server.HandleConnection(utils.NewChConn(p.sconn, p.channel))
return
default:
servers = append(servers, server)
}
p.log.Debugf("Received out-of-band proxy transport request: %v", servers)
// Loop over all servers and try and connect to one of them.
var err error
var conn net.Conn
for _, s := range servers {
conn, err = net.Dial("tcp", s)
if err == nil {
break
}
// Log the reason the connection failed.
p.log.Debugf(trace.DebugReport(err))
}
// If all net.Dial attempts failed, write the last connection error to stderr
// of the caller (via SSH channel) so the error will be propagated all the
// way back to the client (tsh or ssh).
if err != nil {
fmt.Fprint(p.channel.Stderr(), err.Error())
req.Reply(false, []byte(err.Error()))
return
}
// Dail was successful.
req.Reply(true, []byte("Connected."))
p.log.Debugf("Successfully dialed to %v, start proxying.", server)
errorCh := make(chan error, 2)
go func() {
// Make sure that we close the client connection on a channel
// close, otherwise the other goroutine would never know
// as it will block on read from the connection.
defer conn.Close()
_, err := io.Copy(conn, p.channel)
errorCh <- err
}()
go func() {
_, err := io.Copy(p.channel, conn)
errorCh <- err
}()
for i := 0; i < 2; i++ {
select {
case err := <-errorCh:
if err != nil && err != io.EOF {
p.log.Warnf("Proxy transport failed: %v %T.", trace.DebugReport(err), err)
}
case <-p.closeContext.Done():
p.log.Warnf("Proxy transport failed: closing context.")
return
}
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L179-L184 | go | train | // Close closes resources associated with connector | func (c *Connector) Close() error | // Close closes resources associated with connector
func (c *Connector) Close() error | {
if c.Client != nil {
return c.Close()
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L279-L288 | go | train | // getConnectors returns a copy of the identities registered for auth server | func (process *TeleportProcess) getConnectors() []*Connector | // getConnectors returns a copy of the identities registered for auth server
func (process *TeleportProcess) getConnectors() []*Connector | {
process.Lock()
defer process.Unlock()
out := make([]*Connector, 0, len(process.connectors))
for role := range process.connectors {
out = append(out, process.connectors[role])
}
return out
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L292-L297 | go | train | // addConnector adds connector to registered connectors list,
// it will overwrite the connector for the same role | func (process *TeleportProcess) addConnector(connector *Connector) | // addConnector adds connector to registered connectors list,
// it will overwrite the connector for the same role
func (process *TeleportProcess) addConnector(connector *Connector) | {
process.Lock()
defer process.Unlock()
process.connectors[connector.ClientIdentity.ID.Role] = connector
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L302-L345 | go | train | // GetIdentity returns the process identity (credentials to the auth server) for a given
// teleport Role. A teleport process can have any combination of 3 roles: auth, node, proxy
// and they have their own identities | func (process *TeleportProcess) GetIdentity(role teleport.Role) (i *auth.Identity, err error) | // GetIdentity returns the process identity (credentials to the auth server) for a given
// teleport Role. A teleport process can have any combination of 3 roles: auth, node, proxy
// and they have their own identities
func (process *TeleportProcess) GetIdentity(role teleport.Role) (i *auth.Identity, err error) | {
var found bool
process.Lock()
defer process.Unlock()
i, found = process.Identities[role]
if found {
return i, nil
}
i, err = process.storage.ReadIdentity(auth.IdentityCurrent, role)
id := auth.IdentityID{
Role: role,
HostUUID: process.Config.HostUUID,
NodeName: process.Config.Hostname,
}
if err != nil {
if !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
if role == teleport.RoleAdmin {
// for admin identity use local auth server
// because admin identity is requested by auth server
// itself
principals, dnsNames, err := process.getAdditionalPrincipals(role)
if err != nil {
return nil, trace.Wrap(err)
}
i, err = auth.GenerateIdentity(process.localAuth, id, principals, dnsNames)
} else {
// try to locate static identity provided in the file
i, err = process.findStaticIdentity(id)
if err != nil {
return nil, trace.Wrap(err)
}
process.Infof("Found static identity %v in the config file, writing to disk.", &id)
if err = process.storage.WriteIdentity(auth.IdentityCurrent, *i); err != nil {
return nil, trace.Wrap(err)
}
}
}
process.Identities[role] = i
return i, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L379-L406 | go | train | // Run starts teleport processes, waits for signals
// and handles internal process reloads. | func Run(ctx context.Context, cfg Config, newTeleport NewProcess) error | // Run starts teleport processes, waits for signals
// and handles internal process reloads.
func Run(ctx context.Context, cfg Config, newTeleport NewProcess) error | {
if newTeleport == nil {
newTeleport = newTeleportProcess
}
copyCfg := cfg
srv, err := newTeleport(©Cfg)
if err != nil {
return trace.Wrap(err, "initialization failed")
}
if srv == nil {
return trace.BadParameter("process has returned nil server")
}
if err := srv.Start(); err != nil {
return trace.Wrap(err, "startup failed")
}
// Wait and reload until called exit.
for {
srv, err = waitAndReload(ctx, cfg, srv, newTeleport)
if err != nil {
// This error means that was a clean shutdown
// and no reload is necessary.
if err == ErrTeleportExited {
return nil
}
return trace.Wrap(err)
}
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L486-L662 | go | train | // NewTeleport takes the daemon configuration, instantiates all required services
// and starts them under a supervisor, returning the supervisor object. | func NewTeleport(cfg *Config) (*TeleportProcess, error) | // NewTeleport takes the daemon configuration, instantiates all required services
// and starts them under a supervisor, returning the supervisor object.
func NewTeleport(cfg *Config) (*TeleportProcess, error) | {
// before we do anything reset the SIGINT handler back to the default
system.ResetInterruptSignalHandler()
if err := validateConfig(cfg); err != nil {
return nil, trace.Wrap(err, "configuration error")
}
// create the data directory if it's missing
_, err := os.Stat(cfg.DataDir)
if os.IsNotExist(err) {
err := os.MkdirAll(cfg.DataDir, os.ModeDir|0700)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
}
if len(cfg.FileDescriptors) == 0 {
cfg.FileDescriptors, err = importFileDescriptors()
if err != nil {
return nil, trace.Wrap(err)
}
}
// if there's no host uuid initialized yet, try to read one from the
// one of the identities
cfg.HostUUID, err = utils.ReadHostUUID(cfg.DataDir)
if err != nil {
if !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
if len(cfg.Identities) != 0 {
cfg.HostUUID = cfg.Identities[0].ID.HostUUID
log.Infof("Taking host UUID from first identity: %v.", cfg.HostUUID)
} else {
cfg.HostUUID = uuid.New()
log.Infof("Generating new host UUID: %v.", cfg.HostUUID)
}
if err := utils.WriteHostUUID(cfg.DataDir, cfg.HostUUID); err != nil {
return nil, trace.Wrap(err)
}
}
// if user started auth and another service (without providing the auth address for
// that service, the address of the in-process auth will be used
if cfg.Auth.Enabled && len(cfg.AuthServers) == 0 {
cfg.AuthServers = []utils.NetAddr{cfg.Auth.SSHAddr}
}
// if user did not provide auth domain name, use this host's name
if cfg.Auth.Enabled && cfg.Auth.ClusterName == nil {
cfg.Auth.ClusterName, err = services.NewClusterName(services.ClusterNameSpecV2{
ClusterName: cfg.Hostname,
})
if err != nil {
return nil, trace.Wrap(err)
}
}
processID := fmt.Sprintf("%v", nextProcessID())
supervisor := NewSupervisor(processID)
storage, err := auth.NewProcessStorage(supervisor.ExitContext(), filepath.Join(cfg.DataDir, teleport.ComponentProcess))
if err != nil {
return nil, trace.Wrap(err)
}
if cfg.Clock == nil {
cfg.Clock = clockwork.NewRealClock()
}
process := &TeleportProcess{
Clock: cfg.Clock,
Supervisor: supervisor,
Config: cfg,
Identities: make(map[teleport.Role]*auth.Identity),
connectors: make(map[teleport.Role]*Connector),
importedDescriptors: cfg.FileDescriptors,
storage: storage,
id: processID,
keyPairs: make(map[keyPairKey]KeyPair),
}
process.Entry = logrus.WithFields(logrus.Fields{
trace.Component: teleport.Component(teleport.ComponentProcess, process.id),
})
serviceStarted := false
if !cfg.DiagnosticAddr.IsEmpty() {
if err := process.initDiagnosticService(); err != nil {
return nil, trace.Wrap(err)
}
} else {
warnOnErr(process.closeImportedDescriptors(teleport.ComponentDiagnostic))
}
// Create a process wide key generator that will be shared. This is so the
// key generator can pre-generate keys and share these across services.
if cfg.Keygen == nil {
precomputeCount := native.PrecomputedNum
// in case if not auth or proxy services are enabled,
// there is no need to precompute any SSH keys in the pool
if !cfg.Auth.Enabled && !cfg.Proxy.Enabled {
precomputeCount = 0
}
var err error
cfg.Keygen, err = native.New(process.ExitContext(), native.PrecomputeKeys(precomputeCount))
if err != nil {
return nil, trace.Wrap(err)
}
}
// Produce global TeleportReadyEvent
// when all components have started
eventMapping := EventMapping{
Out: TeleportReadyEvent,
}
if cfg.Auth.Enabled {
eventMapping.In = append(eventMapping.In, AuthTLSReady)
}
if cfg.SSH.Enabled {
eventMapping.In = append(eventMapping.In, NodeSSHReady)
}
if cfg.Proxy.Enabled {
eventMapping.In = append(eventMapping.In, ProxySSHReady)
}
process.RegisterEventMapping(eventMapping)
if cfg.Auth.Enabled {
if err := process.initAuthService(); err != nil {
return nil, trace.Wrap(err)
}
serviceStarted = true
} else {
warnOnErr(process.closeImportedDescriptors(teleport.ComponentAuth))
}
if cfg.SSH.Enabled {
if err := process.initSSH(); err != nil {
return nil, err
}
serviceStarted = true
} else {
warnOnErr(process.closeImportedDescriptors(teleport.ComponentNode))
}
if cfg.Proxy.Enabled {
eventMapping.In = append(eventMapping.In, ProxySSHReady)
if err := process.initProxy(); err != nil {
return nil, err
}
serviceStarted = true
} else {
warnOnErr(process.closeImportedDescriptors(teleport.ComponentProxy))
}
process.RegisterFunc("common.rotate", process.periodicSyncRotationState)
if !serviceStarted {
return nil, trace.BadParameter("all services failed to start")
}
// create the new pid file only after started successfully
if cfg.PIDFile != "" {
f, err := os.OpenFile(cfg.PIDFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
fmt.Fprintf(f, "%v", os.Getpid())
defer f.Close()
}
// notify parent process that this process has started
go process.notifyParent()
return process, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L666-L699 | go | train | // notifyParent notifies parent process that this process has started
// by writing to in-memory pipe used by communication channel. | func (process *TeleportProcess) notifyParent() | // notifyParent notifies parent process that this process has started
// by writing to in-memory pipe used by communication channel.
func (process *TeleportProcess) notifyParent() | {
signalPipe, err := process.importSignalPipe()
if err != nil {
if !trace.IsNotFound(err) {
process.Warningf("Failed to import signal pipe")
}
process.Debugf("No signal pipe to import, must be first Teleport process.")
return
}
defer signalPipe.Close()
ctx, cancel := context.WithTimeout(process.ExitContext(), signalPipeTimeout)
defer cancel()
eventC := make(chan Event, 1)
process.WaitForEvent(ctx, TeleportReadyEvent, eventC)
select {
case <-eventC:
process.Infof("New service has started successfully.")
case <-ctx.Done():
process.Errorf("Timeout waiting for a forked process to start: %v. Initiating self-shutdown.", ctx.Err())
if err := process.Close(); err != nil {
process.Warningf("Failed to shutdown process: %v.", err)
}
return
}
if err := process.writeToSignalPipe(signalPipe, fmt.Sprintf("Process %v has started.", os.Getpid())); err != nil {
process.Warningf("Failed to write to signal pipe: %v", err)
// despite the failure, it's ok to proceed,
// it could mean that the parent process has crashed and the pipe
// is no longer valid.
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L714-L730 | go | train | // adminCreds returns admin UID and GID settings based on the OS | func adminCreds() (*int, *int, error) | // adminCreds returns admin UID and GID settings based on the OS
func adminCreds() (*int, *int, error) | {
if runtime.GOOS != teleport.LinuxOS {
return nil, nil, nil
}
// if the user member of adm linux group,
// make audit log folder readable by admins
isAdmin, err := utils.IsGroupMember(teleport.LinuxAdminGID)
if err != nil {
return nil, nil, trace.Wrap(err)
}
if !isAdmin {
return nil, nil, nil
}
uid := os.Getuid()
gid := teleport.LinuxAdminGID
return &uid, &gid, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L735-L775 | go | train | // initUploadHandler initializes upload handler based on the config settings,
// currently the only upload handler supported is S3
// the call can return trace.NotFOund if no upload handler is setup | func initUploadHandler(auditConfig services.AuditConfig) (events.UploadHandler, error) | // initUploadHandler initializes upload handler based on the config settings,
// currently the only upload handler supported is S3
// the call can return trace.NotFOund if no upload handler is setup
func initUploadHandler(auditConfig services.AuditConfig) (events.UploadHandler, error) | {
if auditConfig.AuditSessionsURI == "" {
return nil, trace.NotFound("no upload handler is setup")
}
uri, err := utils.ParseSessionsURI(auditConfig.AuditSessionsURI)
if err != nil {
return nil, trace.Wrap(err)
}
switch uri.Scheme {
case teleport.SchemeS3:
region := auditConfig.Region
if uriRegion := uri.Query().Get(teleport.Region); uriRegion != "" {
region = uriRegion
}
handler, err := s3sessions.NewHandler(s3sessions.Config{
Bucket: uri.Host,
Region: region,
Path: uri.Path,
})
if err != nil {
return nil, trace.Wrap(err)
}
return handler, nil
case teleport.SchemeFile:
if err := os.MkdirAll(uri.Path, teleport.SharedDirMode); err != nil {
return nil, trace.ConvertSystemError(err)
}
handler, err := filesessions.NewHandler(filesessions.Config{
Directory: uri.Path,
})
if err != nil {
return nil, trace.Wrap(err)
}
return handler, nil
default:
return nil, trace.BadParameter(
"unsupported scheme for audit_sesions_uri: %q, currently supported schemes are %q and %q",
uri.Scheme, teleport.SchemeS3, teleport.SchemeFile)
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L779-L843 | go | train | // initExternalLog initializes external storage, if the storage is not
// setup, returns nil | func initExternalLog(auditConfig services.AuditConfig) (events.IAuditLog, error) | // initExternalLog initializes external storage, if the storage is not
// setup, returns nil
func initExternalLog(auditConfig services.AuditConfig) (events.IAuditLog, error) | {
if auditConfig.AuditTableName != "" {
log.Warningf("Please note that 'audit_table_name' is deprecated and will be removed in several releases. Use audit_events_uri: '%v://%v' instead.", dynamo.GetName(), auditConfig.AuditTableName)
if len(auditConfig.AuditEventsURI) != 0 {
return nil, trace.BadParameter("Detected configuration specifying 'audit_table_name' and 'audit_events_uri' at the same time. Please migrate your config to use 'audit_events_uri' only.")
}
auditConfig.AuditEventsURI = []string{fmt.Sprintf("%v://%v", dynamo.GetName(), auditConfig.AuditTableName)}
}
if len(auditConfig.AuditEventsURI) > 0 && !auditConfig.ShouldUploadSessions() {
return nil, trace.BadParameter("please specify audit_sessions_uri when using external audit backends")
}
var hasNonFileLog bool
var loggers []events.IAuditLog
for _, eventsURI := range auditConfig.AuditEventsURI {
uri, err := utils.ParseSessionsURI(eventsURI)
if err != nil {
return nil, trace.Wrap(err)
}
switch uri.Scheme {
case dynamo.GetName():
hasNonFileLog = true
logger, err := dynamoevents.New(dynamoevents.Config{
Tablename: uri.Host,
Region: auditConfig.Region,
})
if err != nil {
return nil, trace.Wrap(err)
}
loggers = append(loggers, logger)
case teleport.SchemeFile:
if err := os.MkdirAll(uri.Path, teleport.SharedDirMode); err != nil {
return nil, trace.ConvertSystemError(err)
}
logger, err := events.NewFileLog(events.FileLogConfig{
Dir: uri.Path,
})
if err != nil {
return nil, trace.Wrap(err)
}
loggers = append(loggers, logger)
default:
return nil, trace.BadParameter(
"unsupported scheme for audit_events_uri: %q, currently supported schemes are %q and %q",
uri.Scheme, dynamo.GetName(), teleport.SchemeFile)
}
}
// only file external loggers are prohibited (they are not supposed
// to be used on their own, only in combo with external loggers)
// they also don't implement certain features, so they are going
// to be inefficient
switch len(loggers) {
case 0:
return nil, trace.NotFound("no external log is defined")
case 1:
if !hasNonFileLog {
return nil, trace.BadParameter("file:// log can not be used on it's own, can be only used in combination with external session logs, e.g. dynamodb://")
}
return loggers[0], nil
default:
if !hasNonFileLog {
return nil, trace.BadParameter("file:// log can not be used on it's own, can be only used in combination with external session logs, e.g. dynamodb://")
}
return events.NewMultiLog(loggers...), nil
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L846-L1146 | go | train | // initAuthService can be called to initialize auth server service | func (process *TeleportProcess) initAuthService() error | // initAuthService can be called to initialize auth server service
func (process *TeleportProcess) initAuthService() error | {
var err error
cfg := process.Config
// Initialize the storage back-ends for keys, events and records
b, err := process.initAuthStorage()
if err != nil {
return trace.Wrap(err)
}
process.backend = b
// create the audit log, which will be consuming (and recording) all events
// and recording all sessions.
if cfg.Auth.NoAudit {
// this is for teleconsole
process.auditLog = events.NewDiscardAuditLog()
warningMessage := "Warning: Teleport audit and session recording have been " +
"turned off. This is dangerous, you will not be able to view audit events " +
"or save and playback recorded sessions."
process.Warn(warningMessage)
} else {
// check if session recording has been disabled. note, we will continue
// logging audit events, we just won't record sessions.
recordSessions := true
if cfg.Auth.ClusterConfig.GetSessionRecording() == services.RecordOff {
recordSessions = false
warningMessage := "Warning: Teleport session recording have been turned off. " +
"This is dangerous, you will not be able to save and playback sessions."
process.Warn(warningMessage)
}
auditConfig := cfg.Auth.ClusterConfig.GetAuditConfig()
uploadHandler, err := initUploadHandler(auditConfig)
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
externalLog, err := initExternalLog(auditConfig)
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
auditServiceConfig := events.AuditLogConfig{
Context: process.ExitContext(),
DataDir: filepath.Join(cfg.DataDir, teleport.LogsDir),
RecordSessions: recordSessions,
ServerID: cfg.HostUUID,
UploadHandler: uploadHandler,
ExternalLog: externalLog,
}
auditServiceConfig.UID, auditServiceConfig.GID, err = adminCreds()
if err != nil {
return trace.Wrap(err)
}
process.auditLog, err = events.NewAuditLog(auditServiceConfig)
if err != nil {
return trace.Wrap(err)
}
}
// first, create the AuthServer
authServer, err := auth.Init(auth.InitConfig{
Backend: b,
Authority: cfg.Keygen,
ClusterConfiguration: cfg.ClusterConfiguration,
ClusterConfig: cfg.Auth.ClusterConfig,
ClusterName: cfg.Auth.ClusterName,
AuthServiceName: cfg.Hostname,
DataDir: cfg.DataDir,
HostUUID: cfg.HostUUID,
NodeName: cfg.Hostname,
Authorities: cfg.Auth.Authorities,
ReverseTunnels: cfg.ReverseTunnels,
Trust: cfg.Trust,
Presence: cfg.Presence,
Events: cfg.Events,
Provisioner: cfg.Provisioner,
Identity: cfg.Identity,
Access: cfg.Access,
StaticTokens: cfg.Auth.StaticTokens,
Roles: cfg.Auth.Roles,
AuthPreference: cfg.Auth.Preference,
OIDCConnectors: cfg.OIDCConnectors,
AuditLog: process.auditLog,
CipherSuites: cfg.CipherSuites,
})
if err != nil {
return trace.Wrap(err)
}
process.setLocalAuth(authServer)
connector, err := process.connectToAuthService(teleport.RoleAdmin)
if err != nil {
return trace.Wrap(err)
}
// second, create the API Server: it's actually a collection of API servers,
// each serving requests for a "role" which is assigned to every connected
// client based on their certificate (user, server, admin, etc)
sessionService, err := session.New(b)
if err != nil {
return trace.Wrap(err)
}
authorizer, err := auth.NewAuthorizer(authServer.Access, authServer.Identity, authServer.Trust)
if err != nil {
return trace.Wrap(err)
}
apiConf := &auth.APIConfig{
AuthServer: authServer,
SessionService: sessionService,
Authorizer: authorizer,
AuditLog: process.auditLog,
}
var authCache auth.AuthCache
if process.Config.CachePolicy.Enabled {
cache, err := process.newAccessCache(accessCacheConfig{
services: authServer.AuthServices,
setup: cache.ForAuth,
cacheName: []string{teleport.ComponentAuth},
inMemory: true,
events: true,
})
if err != nil {
return trace.Wrap(err)
}
authCache = cache
} else {
authCache = authServer.AuthServices
}
authServer.SetCache(authCache)
log := logrus.WithFields(logrus.Fields{
trace.Component: teleport.Component(teleport.ComponentAuth, process.id),
})
// Register TLS endpoint of the auth service
tlsConfig, err := connector.ServerIdentity.TLSConfig(cfg.CipherSuites)
if err != nil {
return trace.Wrap(err)
}
tlsServer, err := auth.NewTLSServer(auth.TLSServerConfig{
TLS: tlsConfig,
APIConfig: *apiConf,
LimiterConfig: cfg.Auth.Limiter,
AccessPoint: authCache,
Component: teleport.Component(teleport.ComponentAuth, process.id),
})
if err != nil {
return trace.Wrap(err)
}
// auth server listens on SSH and TLS, reusing the same socket
listener, err := process.importOrCreateListener(teleport.ComponentAuth, cfg.Auth.SSHAddr.Addr)
if err != nil {
log.Errorf("PID: %v Failed to bind to address %v: %v, exiting.", os.Getpid(), cfg.Auth.SSHAddr.Addr, err)
return trace.Wrap(err)
}
// clean up unused descriptors passed for proxy, but not used by it
warnOnErr(process.closeImportedDescriptors(teleport.ComponentAuth))
if cfg.Auth.EnableProxyProtocol {
log.Infof("Starting Auth service with PROXY protocol support.")
}
mux, err := multiplexer.New(multiplexer.Config{
EnableProxyProtocol: cfg.Auth.EnableProxyProtocol,
Listener: listener,
ID: teleport.Component(process.id),
})
if err != nil {
listener.Close()
return trace.Wrap(err)
}
go mux.Serve()
process.RegisterCriticalFunc("auth.tls", func() error {
utils.Consolef(cfg.Console, teleport.ComponentAuth, "Auth service is starting on %v.", cfg.Auth.SSHAddr.Addr)
// since tlsServer.Serve is a blocking call, we emit this even right before
// the service has started
process.BroadcastEvent(Event{Name: AuthTLSReady, Payload: nil})
err := tlsServer.Serve(mux.TLS())
if err != nil && err != http.ErrServerClosed {
log.Warningf("TLS server exited with error: %v.", err)
}
return nil
})
process.RegisterFunc("auth.heartbeat.broadcast", func() error {
// Heart beat auth server presence, this is not the best place for this
// logic, consolidate it into auth package later
connector, err := process.connectToAuthService(teleport.RoleAdmin)
if err != nil {
return trace.Wrap(err)
}
// External integrations rely on this event:
process.BroadcastEvent(Event{Name: AuthIdentityEvent, Payload: connector})
process.onExit("auth.broadcast", func(payload interface{}) {
connector.Close()
})
return nil
})
// figure out server public address
authAddr := cfg.Auth.SSHAddr.Addr
host, port, err := net.SplitHostPort(authAddr)
if err != nil {
return trace.Wrap(err)
}
// advertise-ip is explicitly set:
if process.Config.AdvertiseIP != "" {
ahost, aport, err := utils.ParseAdvertiseAddr(process.Config.AdvertiseIP)
if err != nil {
return trace.Wrap(err)
}
// if port is not set in the advertise addr, use the default one
if aport == "" {
aport = port
}
authAddr = fmt.Sprintf("%v:%v", ahost, aport)
} else {
// advertise-ip is not set, while the CA is listening on 0.0.0.0? lets try
// to guess the 'advertise ip' then:
if net.ParseIP(host).IsUnspecified() {
ip, err := utils.GuessHostIP()
if err != nil {
log.Warn(err)
} else {
authAddr = net.JoinHostPort(ip.String(), port)
}
}
log.Warnf("Configuration setting auth_service/advertise_ip is not set. guessing %v.", authAddr)
}
heartbeat, err := srv.NewHeartbeat(srv.HeartbeatConfig{
Mode: srv.HeartbeatModeAuth,
Context: process.ExitContext(),
Component: teleport.ComponentAuth,
Announcer: authServer,
GetServerInfo: func() (services.Server, error) {
srv := services.ServerV2{
Kind: services.KindAuthServer,
Version: services.V2,
Metadata: services.Metadata{
Namespace: defaults.Namespace,
Name: process.Config.HostUUID,
},
Spec: services.ServerSpecV2{
Addr: authAddr,
Hostname: process.Config.Hostname,
},
}
state, err := process.storage.GetState(teleport.RoleAdmin)
if err != nil {
if !trace.IsNotFound(err) {
log.Warningf("Failed to get rotation state: %v.", err)
return nil, trace.Wrap(err)
}
} else {
srv.Spec.Rotation = state.Spec.Rotation
}
srv.SetTTL(process, defaults.ServerAnnounceTTL)
return &srv, nil
},
KeepAlivePeriod: defaults.ServerKeepAliveTTL,
AnnouncePeriod: defaults.ServerAnnounceTTL/2 + utils.RandomDuration(defaults.ServerAnnounceTTL/10),
CheckPeriod: defaults.HeartbeatCheckPeriod,
ServerTTL: defaults.ServerAnnounceTTL,
})
if err != nil {
return trace.Wrap(err)
}
process.RegisterFunc("auth.heartbeat", heartbeat.Run)
// execute this when process is asked to exit:
process.onExit("auth.shutdown", func(payload interface{}) {
// The listeners have to be closed here, because if shutdown
// was called before the start of the http server,
// the http server would have not started tracking the listeners
// and http.Shutdown will do nothing.
if mux != nil {
warnOnErr(mux.Close())
}
if listener != nil {
warnOnErr(listener.Close())
}
if payload == nil {
log.Info("Shutting down immediately.")
warnOnErr(tlsServer.Close())
} else {
log.Info("Shutting down gracefully.")
ctx := payloadContext(payload)
warnOnErr(tlsServer.Shutdown(ctx))
}
log.Info("Exited.")
})
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1160-L1170 | go | train | // onExit allows individual services to register a callback function which will be
// called when Teleport Process is asked to exit. Usually services terminate themselves
// when the callback is called | func (process *TeleportProcess) onExit(serviceName string, callback func(interface{})) | // onExit allows individual services to register a callback function which will be
// called when Teleport Process is asked to exit. Usually services terminate themselves
// when the callback is called
func (process *TeleportProcess) onExit(serviceName string, callback func(interface{})) | {
process.RegisterFunc(serviceName, func() error {
eventC := make(chan Event)
process.WaitForEvent(context.TODO(), TeleportExitEvent, eventC)
select {
case event := <-eventC:
callback(event.Payload)
}
return nil
})
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1209-L1264 | go | train | // newAccessCache returns new local cache access point | func (process *TeleportProcess) newAccessCache(cfg accessCacheConfig) (*cache.Cache, error) | // newAccessCache returns new local cache access point
func (process *TeleportProcess) newAccessCache(cfg accessCacheConfig) (*cache.Cache, error) | {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
var cacheBackend backend.Backend
if cfg.inMemory {
mem, err := memory.New(memory.Config{
Context: process.ExitContext(),
EventsOff: !cfg.events,
Mirror: true,
})
if err != nil {
return nil, trace.Wrap(err)
}
cacheBackend = mem
} else {
path := filepath.Join(append([]string{process.Config.DataDir, "cache"}, cfg.cacheName...)...)
if err := os.MkdirAll(path, teleport.SharedDirMode); err != nil {
return nil, trace.ConvertSystemError(err)
}
liteBackend, err := lite.NewWithConfig(process.ExitContext(),
lite.Config{
Path: path,
EventsOff: !cfg.events,
Memory: false,
Mirror: true,
PollStreamPeriod: 100 * time.Millisecond,
})
if err != nil {
return nil, trace.Wrap(err)
}
cacheBackend = liteBackend
}
reporter, err := backend.NewReporter(backend.ReporterConfig{
Component: teleport.ComponentCache,
Backend: cacheBackend,
TrackTopRequests: process.Config.Debug,
})
if err != nil {
return nil, trace.Wrap(err)
}
return cache.New(cfg.setup(cache.Config{
Context: process.ExitContext(),
Backend: reporter,
Events: cfg.services,
ClusterConfig: cfg.services,
Provisioner: cfg.services,
Trust: cfg.services,
Users: cfg.services,
Access: cfg.services,
Presence: cfg.services,
Component: teleport.Component(append(cfg.cacheName, process.id, teleport.ComponentCache)...),
MetricComponent: teleport.Component(append(cfg.cacheName, teleport.ComponentCache)...),
}))
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1268-L1278 | go | train | // setupCachePolicy sets up cache policy based on teleport configuration,
// it is a wrapper function, that sets up configuration | func (process *TeleportProcess) setupCachePolicy(in cache.SetupConfigFn) cache.SetupConfigFn | // setupCachePolicy sets up cache policy based on teleport configuration,
// it is a wrapper function, that sets up configuration
func (process *TeleportProcess) setupCachePolicy(in cache.SetupConfigFn) cache.SetupConfigFn | {
return func(c cache.Config) cache.Config {
config := in(c)
config.PreferRecent = cache.PreferRecent{
Enabled: process.Config.CachePolicy.Enabled,
NeverExpires: process.Config.CachePolicy.NeverExpires,
MaxTTL: process.Config.CachePolicy.TTL,
}
return config
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1281-L1283 | go | train | // newAccessPointCache returns new instance of access point configured for proxy | func (process *TeleportProcess) newLocalCacheForProxy(clt auth.ClientI, cacheName []string) (auth.AccessPoint, error) | // newAccessPointCache returns new instance of access point configured for proxy
func (process *TeleportProcess) newLocalCacheForProxy(clt auth.ClientI, cacheName []string) (auth.AccessPoint, error) | {
return process.newLocalCache(clt, cache.ForProxy, cacheName)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1286-L1300 | go | train | // newAccessPointCache returns new instance of access point | func (process *TeleportProcess) newLocalCache(clt auth.ClientI, setupConfig cache.SetupConfigFn, cacheName []string) (auth.AccessPoint, error) | // newAccessPointCache returns new instance of access point
func (process *TeleportProcess) newLocalCache(clt auth.ClientI, setupConfig cache.SetupConfigFn, cacheName []string) (auth.AccessPoint, error) | {
// if caching is disabled, return access point
if !process.Config.CachePolicy.Enabled {
return clt, nil
}
cache, err := process.newAccessCache(accessCacheConfig{
services: clt,
setup: process.setupCachePolicy(setupConfig),
cacheName: cacheName,
})
if err != nil {
return nil, trace.Wrap(err)
}
return auth.NewWrapper(clt, cache), nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1318-L1484 | go | train | // initSSH initializes the "node" role, i.e. a simple SSH server connected to the auth server. | func (process *TeleportProcess) initSSH() error | // initSSH initializes the "node" role, i.e. a simple SSH server connected to the auth server.
func (process *TeleportProcess) initSSH() error | {
process.registerWithAuthServer(teleport.RoleNode, SSHIdentityEvent)
eventsC := make(chan Event)
process.WaitForEvent(process.ExitContext(), SSHIdentityEvent, eventsC)
log := logrus.WithFields(logrus.Fields{
trace.Component: teleport.Component(teleport.ComponentNode, process.id),
})
var conn *Connector
var s *regular.Server
var agentPool *reversetunnel.AgentPool
process.RegisterCriticalFunc("ssh.node", func() error {
var ok bool
var event Event
select {
case event = <-eventsC:
log.Debugf("Received event %q.", event.Name)
case <-process.ExitContext().Done():
log.Debugf("Process is exiting.")
return nil
}
conn, ok = (event.Payload).(*Connector)
if !ok {
return trace.BadParameter("unsupported connector type: %T", event.Payload)
}
cfg := process.Config
limiter, err := limiter.NewLimiter(cfg.SSH.Limiter)
if err != nil {
return trace.Wrap(err)
}
authClient, err := process.newLocalCache(conn.Client, cache.ForNode, []string{teleport.ComponentNode})
if err != nil {
return trace.Wrap(err)
}
// make sure the namespace exists
namespace := services.ProcessNamespace(cfg.SSH.Namespace)
_, err = authClient.GetNamespace(namespace)
if err != nil {
if trace.IsNotFound(err) {
return trace.NotFound(
"namespace %v is not found, ask your system administrator to create this namespace so you can register nodes there.", namespace)
}
return trace.Wrap(err)
}
s, err = regular.New(cfg.SSH.Addr,
cfg.Hostname,
[]ssh.Signer{conn.ServerIdentity.KeySigner},
authClient,
cfg.DataDir,
cfg.AdvertiseIP,
process.proxyPublicAddr(),
regular.SetLimiter(limiter),
regular.SetShell(cfg.SSH.Shell),
regular.SetAuditLog(conn.Client),
regular.SetSessionServer(conn.Client),
regular.SetLabels(cfg.SSH.Labels, cfg.SSH.CmdLabels),
regular.SetNamespace(namespace),
regular.SetPermitUserEnvironment(cfg.SSH.PermitUserEnvironment),
regular.SetCiphers(cfg.Ciphers),
regular.SetKEXAlgorithms(cfg.KEXAlgorithms),
regular.SetMACAlgorithms(cfg.MACAlgorithms),
regular.SetPAMConfig(cfg.SSH.PAM),
regular.SetRotationGetter(process.getRotation),
regular.SetUseTunnel(conn.UseTunnel),
)
if err != nil {
return trace.Wrap(err)
}
// init uploader service for recording SSH node, if proxy is not
// enabled on this node, because proxy stars uploader service as well
if !cfg.Proxy.Enabled {
if err := process.initUploaderService(authClient, conn.Client); err != nil {
return trace.Wrap(err)
}
}
if !conn.UseTunnel {
listener, err := process.importOrCreateListener(teleport.ComponentNode, cfg.SSH.Addr.Addr)
if err != nil {
return trace.Wrap(err)
}
// clean up unused descriptors passed for proxy, but not used by it
warnOnErr(process.closeImportedDescriptors(teleport.ComponentNode))
log.Infof("Service is starting on %v %v.", cfg.SSH.Addr.Addr, process.Config.CachePolicy)
utils.Consolef(cfg.Console, teleport.ComponentNode, "Service is starting on %v.", cfg.SSH.Addr.Addr)
// Start the SSH server. This kicks off updating labels, starting the
// heartbeat, and accepting connections.
go s.Serve(listener)
// Broadcast that the node has started.
process.BroadcastEvent(Event{Name: NodeSSHReady, Payload: nil})
} else {
// Start the SSH server. This kicks off updating labels and starting the
// heartbeat.
s.Start()
// Start upserting reverse tunnel in a loop while the process is running.
go process.upsertTunnelForever(conn)
// Create and start an agent pool.
agentPool, err = reversetunnel.NewAgentPool(reversetunnel.AgentPoolConfig{
Component: teleport.ComponentNode,
HostUUID: conn.ServerIdentity.ID.HostUUID,
Client: conn.Client,
AccessPoint: conn.Client,
HostSigners: []ssh.Signer{conn.ServerIdentity.KeySigner},
Cluster: conn.ServerIdentity.Cert.Extensions[utils.CertExtensionAuthority],
Server: s,
})
if err != nil {
return trace.Wrap(err)
}
err = agentPool.Start()
if err != nil {
return trace.Wrap(err)
}
log.Infof("Service is starting in tunnel mode.")
// Broadcast that the node has started.
process.BroadcastEvent(Event{Name: NodeSSHReady, Payload: nil})
}
// Block and wait while the node is running.
s.Wait()
if conn.UseTunnel {
agentPool.Wait()
}
log.Infof("Exited.")
return nil
})
// Execute this when process is asked to exit.
process.onExit("ssh.shutdown", func(payload interface{}) {
if payload == nil {
log.Infof("Shutting down immediately.")
if s != nil {
warnOnErr(s.Close())
}
} else {
log.Infof("Shutting down gracefully.")
if s != nil {
warnOnErr(s.Shutdown(payloadContext(payload)))
}
}
if conn.UseTunnel {
agentPool.Stop()
}
log.Infof("Exited.")
})
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1521-L1538 | go | train | // registerWithAuthServer uses one time provisioning token obtained earlier
// from the server to get a pair of SSH keys signed by Auth server host
// certificate authority | func (process *TeleportProcess) registerWithAuthServer(role teleport.Role, eventName string) | // registerWithAuthServer uses one time provisioning token obtained earlier
// from the server to get a pair of SSH keys signed by Auth server host
// certificate authority
func (process *TeleportProcess) registerWithAuthServer(role teleport.Role, eventName string) | {
serviceName := strings.ToLower(role.String())
process.RegisterCriticalFunc(fmt.Sprintf("register.%v", serviceName), func() error {
connector, err := process.reconnectToAuthService(role)
if err != nil {
return trace.Wrap(err)
}
process.onExit(fmt.Sprintf("auth.client.%v", serviceName), func(interface{}) {
process.Debugf("Closed client for %v.", role)
err := connector.Client.Close()
if err != nil {
process.Debugf("Failed to close client: %v", err)
}
})
process.BroadcastEvent(Event{Name: eventName, Payload: connector})
return nil
})
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1599-L1694 | go | train | // initDiagnosticService starts diagnostic service currently serving healthz
// and prometheus endpoints | func (process *TeleportProcess) initDiagnosticService() error | // initDiagnosticService starts diagnostic service currently serving healthz
// and prometheus endpoints
func (process *TeleportProcess) initDiagnosticService() error | {
mux := http.NewServeMux()
mux.Handle("/metrics", prometheus.Handler())
if process.Config.Debug {
log.Infof("Adding diagnostic debugging handlers. To connect with profiler, use `go tool pprof %v`.", process.Config.DiagnosticAddr.Addr)
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
roundtrip.ReplyJSON(w, http.StatusOK, map[string]interface{}{"status": "ok"})
})
log := logrus.WithFields(logrus.Fields{
trace.Component: teleport.Component(teleport.ComponentDiagnostic, process.id),
})
// Create a state machine that will process and update the internal state of
// Teleport based off Events. Use this state machine to return return the
// status from the /readyz endpoint.
ps := newProcessState(process)
process.RegisterFunc("readyz.monitor", func() error {
// Start loop to monitor for events that are used to update Teleport state.
eventCh := make(chan Event, 1024)
process.WaitForEvent(process.ExitContext(), TeleportReadyEvent, eventCh)
process.WaitForEvent(process.ExitContext(), TeleportDegradedEvent, eventCh)
process.WaitForEvent(process.ExitContext(), TeleportOKEvent, eventCh)
for {
select {
case e := <-eventCh:
ps.Process(e)
case <-process.ExitContext().Done():
log.Debugf("Teleport is exiting, returning.")
return nil
}
}
})
mux.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) {
switch ps.GetState() {
// 503
case stateDegraded:
roundtrip.ReplyJSON(w, http.StatusServiceUnavailable, map[string]interface{}{
"status": "teleport is in a degraded state, check logs for details",
})
// 400
case stateRecovering:
roundtrip.ReplyJSON(w, http.StatusBadRequest, map[string]interface{}{
"status": "teleport is recovering from a degraded state, check logs for details",
})
// 200
case stateOK:
roundtrip.ReplyJSON(w, http.StatusOK, map[string]interface{}{
"status": "ok",
})
}
})
listener, err := process.importOrCreateListener(teleport.ComponentDiagnostic, process.Config.DiagnosticAddr.Addr)
if err != nil {
return trace.Wrap(err)
}
warnOnErr(process.closeImportedDescriptors(teleport.ComponentDiagnostic))
server := &http.Server{
Handler: mux,
}
log.Infof("Starting diagnostic service on %v.", process.Config.DiagnosticAddr.Addr)
process.RegisterFunc("diagnostic.service", func() error {
err := server.Serve(listener)
if err != nil && err != http.ErrServerClosed {
log.Warningf("Diagnostic server exited with error: %v.", err)
}
return nil
})
process.onExit("diagnostic.shutdown", func(payload interface{}) {
if payload == nil {
log.Infof("Shutting down immediately.")
warnOnErr(server.Close())
} else {
log.Infof("Shutting down gracefully.")
ctx := payloadContext(payload)
warnOnErr(server.Shutdown(ctx))
}
log.Infof("Exited.")
})
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1698-L1748 | go | train | // getAdditionalPrincipals returns a list of additional principals to add
// to role's service certificates. | func (process *TeleportProcess) getAdditionalPrincipals(role teleport.Role) ([]string, []string, error) | // getAdditionalPrincipals returns a list of additional principals to add
// to role's service certificates.
func (process *TeleportProcess) getAdditionalPrincipals(role teleport.Role) ([]string, []string, error) | {
var principals []string
var dnsNames []string
if process.Config.Hostname != "" {
principals = append(principals, process.Config.Hostname)
}
var addrs []utils.NetAddr
switch role {
case teleport.RoleProxy:
addrs = append(process.Config.Proxy.PublicAddrs, utils.NetAddr{Addr: reversetunnel.RemoteKubeProxy})
addrs = append(addrs, process.Config.Proxy.SSHPublicAddrs...)
addrs = append(addrs, process.Config.Proxy.TunnelPublicAddrs...)
addrs = append(addrs, process.Config.Proxy.Kube.PublicAddrs...)
// Automatically add wildcards for every proxy public address for k8s SNI routing
if process.Config.Proxy.Kube.Enabled {
for _, publicAddr := range utils.JoinAddrSlices(process.Config.Proxy.PublicAddrs, process.Config.Proxy.Kube.PublicAddrs) {
host, err := utils.Host(publicAddr.Addr)
if err != nil {
return nil, nil, trace.Wrap(err)
}
if ip := net.ParseIP(host); ip == nil {
dnsNames = append(dnsNames, "*."+host)
}
}
}
case teleport.RoleAuth, teleport.RoleAdmin:
addrs = process.Config.Auth.PublicAddrs
case teleport.RoleNode:
addrs = process.Config.SSH.PublicAddrs
// If advertise IP is set, add it to the list of principals. Otherwise
// add in the default (0.0.0.0) which will be replaced by the Auth Server
// when a host certificate is issued.
if process.Config.AdvertiseIP != "" {
advertiseIP, err := utils.ParseAddr(process.Config.AdvertiseIP)
if err != nil {
return nil, nil, trace.Wrap(err)
}
addrs = append(addrs, *advertiseIP)
} else {
addrs = append(addrs, process.Config.SSH.Addr)
}
}
for _, addr := range addrs {
host, err := utils.Host(addr.Addr)
if err != nil {
return nil, nil, trace.Wrap(err)
}
principals = append(principals, host)
}
return principals, dnsNames, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1755-L1790 | go | train | // initProxy gets called if teleport runs with 'proxy' role enabled.
// this means it will do two things:
// 1. serve a web UI
// 2. proxy SSH connections to nodes running with 'node' role
// 3. take care of reverse tunnels | func (process *TeleportProcess) initProxy() error | // initProxy gets called if teleport runs with 'proxy' role enabled.
// this means it will do two things:
// 1. serve a web UI
// 2. proxy SSH connections to nodes running with 'node' role
// 3. take care of reverse tunnels
func (process *TeleportProcess) initProxy() error | {
// if no TLS key was provided for the web UI, generate a self signed cert
if process.Config.Proxy.TLSKey == "" && !process.Config.Proxy.DisableTLS && !process.Config.Proxy.DisableWebService {
err := initSelfSignedHTTPSCert(process.Config)
if err != nil {
return trace.Wrap(err)
}
}
process.registerWithAuthServer(teleport.RoleProxy, ProxyIdentityEvent)
process.RegisterCriticalFunc("proxy.init", func() error {
eventsC := make(chan Event)
process.WaitForEvent(process.ExitContext(), ProxyIdentityEvent, eventsC)
var event Event
select {
case event = <-eventsC:
process.Debugf("Received event %q.", event.Name)
case <-process.ExitContext().Done():
process.Debugf("Process is exiting.")
return nil
}
conn, ok := (event.Payload).(*Connector)
if !ok {
return trace.BadParameter("unsupported connector type: %T", event.Payload)
}
err := process.initProxyEndpoint(conn)
if err != nil {
return trace.Wrap(err)
}
return nil
})
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L1815-L1899 | go | train | // setupProxyListeners sets up web proxy listeners based on the configuration | func (process *TeleportProcess) setupProxyListeners() (*proxyListeners, error) | // setupProxyListeners sets up web proxy listeners based on the configuration
func (process *TeleportProcess) setupProxyListeners() (*proxyListeners, error) | {
cfg := process.Config
process.Debugf("Setup Proxy: Web Proxy Address: %v, Reverse Tunnel Proxy Address: %v", cfg.Proxy.WebAddr.Addr, cfg.Proxy.ReverseTunnelListenAddr.Addr)
var err error
var listeners proxyListeners
if cfg.Proxy.Kube.Enabled {
process.Debugf("Setup Proxy: turning on Kubernetes proxy.")
listener, err := process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "kube"), cfg.Proxy.Kube.ListenAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
listeners.kube = listener
}
switch {
case cfg.Proxy.DisableWebService && cfg.Proxy.DisableReverseTunnel:
process.Debugf("Setup Proxy: Reverse tunnel proxy and web proxy are disabled.")
return &listeners, nil
case cfg.Proxy.ReverseTunnelListenAddr.Equals(cfg.Proxy.WebAddr) && !cfg.Proxy.DisableTLS:
process.Debugf("Setup Proxy: Reverse tunnel proxy and web proxy listen on the same port, multiplexing is on.")
listener, err := process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "tunnel", "web"), cfg.Proxy.WebAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
listeners.mux, err = multiplexer.New(multiplexer.Config{
EnableProxyProtocol: cfg.Proxy.EnableProxyProtocol,
Listener: listener,
DisableTLS: cfg.Proxy.DisableWebService,
DisableSSH: cfg.Proxy.DisableReverseTunnel,
ID: teleport.Component(teleport.ComponentProxy, "tunnel", "web", process.id),
})
if err != nil {
listener.Close()
return nil, trace.Wrap(err)
}
listeners.web = listeners.mux.TLS()
listeners.reverseTunnel = listeners.mux.SSH()
go listeners.mux.Serve()
return &listeners, nil
case cfg.Proxy.EnableProxyProtocol && !cfg.Proxy.DisableWebService && !cfg.Proxy.DisableTLS:
process.Debugf("Setup Proxy: Proxy protocol is enabled for web service, multiplexing is on.")
listener, err := process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "web"), cfg.Proxy.WebAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
listeners.mux, err = multiplexer.New(multiplexer.Config{
EnableProxyProtocol: cfg.Proxy.EnableProxyProtocol,
Listener: listener,
DisableTLS: false,
DisableSSH: true,
ID: teleport.Component(teleport.ComponentProxy, "web", process.id),
})
if err != nil {
listener.Close()
return nil, trace.Wrap(err)
}
listeners.web = listeners.mux.TLS()
listeners.reverseTunnel, err = process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "tunnel"), cfg.Proxy.ReverseTunnelListenAddr.Addr)
if err != nil {
listener.Close()
listeners.Close()
return nil, trace.Wrap(err)
}
go listeners.mux.Serve()
return &listeners, nil
default:
process.Debugf("Proxy reverse tunnel are listening on the separate ports.")
if !cfg.Proxy.DisableReverseTunnel {
listeners.reverseTunnel, err = process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "tunnel"), cfg.Proxy.ReverseTunnelListenAddr.Addr)
if err != nil {
listeners.Close()
return nil, trace.Wrap(err)
}
}
if !cfg.Proxy.DisableWebService {
listeners.web, err = process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "web"), cfg.Proxy.WebAddr.Addr)
if err != nil {
listeners.Close()
return nil, trace.Wrap(err)
}
}
return &listeners, nil
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L2225-L2281 | go | train | // initAuthStorage initializes the storage backend for the auth service. | func (process *TeleportProcess) initAuthStorage() (bk backend.Backend, err error) | // initAuthStorage initializes the storage backend for the auth service.
func (process *TeleportProcess) initAuthStorage() (bk backend.Backend, err error) | {
bc := &process.Config.Auth.StorageConfig
process.Debugf("Using %v backend.", bc.Type)
switch bc.Type {
case lite.GetName():
bk, err = lite.New(context.TODO(), bc.Params)
// legacy bolt backend, import all data into SQLite and return
// SQLite data
case boltbk.GetName():
litebk, err := lite.New(context.TODO(), bc.Params)
if err != nil {
return nil, trace.Wrap(err)
}
err = legacy.Import(context.TODO(), litebk, func() (legacy.Exporter, error) {
return boltbk.New(legacy.Params(bc.Params))
})
if err != nil {
return nil, trace.Wrap(err)
}
bk = litebk
// legacy filesystem backend, import all data into SQLite and return
// SQLite data
case dir.GetName():
litebk, err := lite.New(context.TODO(), bc.Params)
if err != nil {
return nil, trace.Wrap(err)
}
err = legacy.Import(context.TODO(), litebk, func() (legacy.Exporter, error) {
return dir.New(legacy.Params(bc.Params))
})
if err != nil {
return nil, trace.Wrap(err)
}
bk = litebk
// DynamoDB backend:
case dynamo.GetName():
bk, err = dynamo.New(context.TODO(), bc.Params)
// etcd backend:
case etcdbk.GetName():
bk, err = etcdbk.New(context.TODO(), bc.Params)
default:
err = trace.BadParameter("unsupported secrets storage type: %q", bc.Type)
}
if err != nil {
return nil, trace.Wrap(err)
}
reporter, err := backend.NewReporter(backend.ReporterConfig{
Component: teleport.ComponentBackend,
Backend: backend.NewSanitizer(bk),
TrackTopRequests: process.Config.Debug,
})
if err != nil {
return nil, trace.Wrap(err)
}
process.setReporter(reporter)
return reporter, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L2296-L2306 | go | train | // WaitWithContext waits until all internal services stop. | func (process *TeleportProcess) WaitWithContext(ctx context.Context) | // WaitWithContext waits until all internal services stop.
func (process *TeleportProcess) WaitWithContext(ctx context.Context) | {
local, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
process.Supervisor.Wait()
}()
select {
case <-local.Done():
return
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L2310-L2326 | go | train | // StartShutdown launches non-blocking graceful shutdown process that signals
// completion, returns context that will be closed once the shutdown is done | func (process *TeleportProcess) StartShutdown(ctx context.Context) context.Context | // StartShutdown launches non-blocking graceful shutdown process that signals
// completion, returns context that will be closed once the shutdown is done
func (process *TeleportProcess) StartShutdown(ctx context.Context) context.Context | {
process.BroadcastEvent(Event{Name: TeleportExitEvent, Payload: ctx})
localCtx, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
process.Supervisor.Wait()
process.Debugf("All supervisor functions are completed.")
localAuth := process.getLocalAuth()
if localAuth != nil {
if err := process.localAuth.Close(); err != nil {
process.Warningf("Failed closing auth server: %v.", err)
}
}
}()
go process.printShutdownStatus(localCtx)
return localCtx
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L2330-L2337 | go | train | // Shutdown launches graceful shutdown process and waits
// for it to complete | func (process *TeleportProcess) Shutdown(ctx context.Context) | // Shutdown launches graceful shutdown process and waits
// for it to complete
func (process *TeleportProcess) Shutdown(ctx context.Context) | {
localCtx := process.StartShutdown(ctx)
// wait until parent context closes
select {
case <-localCtx.Done():
process.Debugf("Process completed.")
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L2340-L2356 | go | train | // Close broadcasts close signals and exits immediately | func (process *TeleportProcess) Close() error | // Close broadcasts close signals and exits immediately
func (process *TeleportProcess) Close() error | {
process.BroadcastEvent(Event{Name: TeleportExitEvent})
process.Config.Keygen.Close()
var errors []error
localAuth := process.getLocalAuth()
if localAuth != nil {
errors = append(errors, process.localAuth.Close())
}
if process.storage != nil {
errors = append(errors, process.storage.Close())
}
return trace.NewAggregate(errors...)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/service/service.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/service/service.go#L2401-L2432 | go | train | // initSelfSignedHTTPSCert generates and self-signs a TLS key+cert pair for https connection
// to the proxy server. | func initSelfSignedHTTPSCert(cfg *Config) (err error) | // initSelfSignedHTTPSCert generates and self-signs a TLS key+cert pair for https connection
// to the proxy server.
func initSelfSignedHTTPSCert(cfg *Config) (err error) | {
log.Warningf("No TLS Keys provided, using self signed certificate.")
keyPath := filepath.Join(cfg.DataDir, defaults.SelfSignedKeyPath)
certPath := filepath.Join(cfg.DataDir, defaults.SelfSignedCertPath)
cfg.Proxy.TLSKey = keyPath
cfg.Proxy.TLSCert = certPath
// return the existing pair if they have already been generated:
_, err = tls.LoadX509KeyPair(certPath, keyPath)
if err == nil {
return nil
}
if !os.IsNotExist(err) {
return trace.Wrap(err, "unrecognized error reading certs")
}
log.Warningf("Generating self signed key and cert to %v %v.", keyPath, certPath)
creds, err := utils.GenerateSelfSignedCert([]string{cfg.Hostname, "localhost"})
if err != nil {
return trace.Wrap(err)
}
if err := ioutil.WriteFile(keyPath, creds.PrivateKey, 0600); err != nil {
return trace.Wrap(err, "error writing key PEM")
}
if err := ioutil.WriteFile(certPath, creds.Cert, 0600); err != nil {
return trace.Wrap(err, "error writing key PEM")
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/kube/utils/utils.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/kube/utils/utils.go#L16-L36 | go | train | // GetKubeClient returns instance of client to the kubernetes cluster
// using in-cluster configuration if available and falling back to
// configuration file under configPath otherwise | func GetKubeClient(configPath string) (client *kubernetes.Clientset, config *rest.Config, err error) | // GetKubeClient returns instance of client to the kubernetes cluster
// using in-cluster configuration if available and falling back to
// configuration file under configPath otherwise
func GetKubeClient(configPath string) (client *kubernetes.Clientset, config *rest.Config, err error) | {
// if path to kubeconfig was provided, init config from it
if configPath != "" {
config, err = clientcmd.BuildConfigFromFlags("", configPath)
if err != nil {
return nil, nil, trace.Wrap(err)
}
} else {
// otherwise attempt to init as if connecting from cluster
config, err = rest.InClusterConfig()
if err != nil {
return nil, nil, trace.Wrap(err)
}
}
client, err = kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, trace.Wrap(err)
}
return client, config, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/kube/utils/utils.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/kube/utils/utils.go#L40-L46 | go | train | // GetKubeConfig returns kubernetes configuration
// from configPath file or, by default reads in-cluster configuration | func GetKubeConfig(configPath string) (*rest.Config, error) | // GetKubeConfig returns kubernetes configuration
// from configPath file or, by default reads in-cluster configuration
func GetKubeConfig(configPath string) (*rest.Config, error) | {
// if path to kubeconfig was provided, init config from it
if configPath != "" {
return clientcmd.BuildConfigFromFlags("", configPath)
}
return rest.InClusterConfig()
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L33-L39 | go | train | // MustCreateProvisionToken returns a new valid provision token
// or panics, used in testes | func MustCreateProvisionToken(token string, roles teleport.Roles, expires time.Time) ProvisionToken | // MustCreateProvisionToken returns a new valid provision token
// or panics, used in testes
func MustCreateProvisionToken(token string, roles teleport.Roles, expires time.Time) ProvisionToken | {
t, err := NewProvisionToken(token, roles, expires)
if err != nil {
panic(err)
}
return t
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L42-L59 | go | train | // NewProvisionToken returns a new instance of provision token resource | func NewProvisionToken(token string, roles teleport.Roles, expires time.Time) (ProvisionToken, error) | // NewProvisionToken returns a new instance of provision token resource
func NewProvisionToken(token string, roles teleport.Roles, expires time.Time) (ProvisionToken, error) | {
t := &ProvisionTokenV2{
Kind: KindToken,
Version: V2,
Metadata: Metadata{
Name: token,
Expires: &expires,
Namespace: defaults.Namespace,
},
Spec: ProvisionTokenSpecV2{
Roles: roles,
},
}
if err := t.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
return t, nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L97-L106 | go | train | // ProvisionTokensToV1 converts provision tokens to V1 list | func ProvisionTokensToV1(in []ProvisionToken) []ProvisionTokenV1 | // ProvisionTokensToV1 converts provision tokens to V1 list
func ProvisionTokensToV1(in []ProvisionToken) []ProvisionTokenV1 | {
if in == nil {
return nil
}
out := make([]ProvisionTokenV1, len(in))
for i := range in {
out[i] = *in[i].V1()
}
return out
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L109-L118 | go | train | // ProvisionTokensFromV1 converts V1 provision tokens to resource list | func ProvisionTokensFromV1(in []ProvisionTokenV1) []ProvisionToken | // ProvisionTokensFromV1 converts V1 provision tokens to resource list
func ProvisionTokensFromV1(in []ProvisionTokenV1) []ProvisionToken | {
if in == nil {
return nil
}
out := make([]ProvisionToken, len(in))
for i := range in {
out[i] = in[i].V2()
}
return out
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L121-L134 | go | train | // CheckAndSetDefaults checks and set default values for any missing fields. | func (p *ProvisionTokenV2) CheckAndSetDefaults() error | // CheckAndSetDefaults checks and set default values for any missing fields.
func (p *ProvisionTokenV2) CheckAndSetDefaults() error | {
p.Kind = KindToken
err := p.Metadata.CheckAndSetDefaults()
if err != nil {
return trace.Wrap(err)
}
if len(p.Spec.Roles) == 0 {
return trace.BadParameter("provisioning token is missing roles")
}
if err := teleport.Roles(p.Spec.Roles).Check(); err != nil {
return trace.Wrap(err)
}
return nil
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L149-L151 | go | train | // SetRoles sets teleport roles | func (p *ProvisionTokenV2) SetRoles(r teleport.Roles) | // SetRoles sets teleport roles
func (p *ProvisionTokenV2) SetRoles(r teleport.Roles) | {
p.Spec.Roles = r
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L184-L190 | go | train | // V1 returns V1 version of the resource | func (p *ProvisionTokenV2) V1() *ProvisionTokenV1 | // V1 returns V1 version of the resource
func (p *ProvisionTokenV2) V1() *ProvisionTokenV1 | {
return &ProvisionTokenV1{
Roles: p.Spec.Roles,
Expires: p.Metadata.Expiry(),
Token: p.Metadata.Name,
}
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L198-L200 | go | train | // SetExpiry sets expiry time for the object | func (p *ProvisionTokenV2) SetExpiry(expires time.Time) | // SetExpiry sets expiry time for the object
func (p *ProvisionTokenV2) SetExpiry(expires time.Time) | {
p.Metadata.SetExpiry(expires)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L208-L210 | go | train | // SetTTL sets Expires header using realtime clock | func (p *ProvisionTokenV2) SetTTL(clock clockwork.Clock, ttl time.Duration) | // SetTTL sets Expires header using realtime clock
func (p *ProvisionTokenV2) SetTTL(clock clockwork.Clock, ttl time.Duration) | {
p.Metadata.SetTTL(clock, ttl)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L223-L229 | go | train | // String returns the human readable representation of a provisioning token. | func (p ProvisionTokenV2) String() string | // String returns the human readable representation of a provisioning token.
func (p ProvisionTokenV2) String() string | {
expires := "never"
if !p.Expiry().IsZero() {
expires = p.Expiry().String()
}
return fmt.Sprintf("ProvisionToken(Roles=%v, Expires=%v)", p.Spec.Roles, expires)
} |
gravitational/teleport | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | lib/services/provisioning.go | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/provisioning.go#L237-L253 | go | train | // V2 returns V2 version of the resource | func (p *ProvisionTokenV1) V2() *ProvisionTokenV2 | // V2 returns V2 version of the resource
func (p *ProvisionTokenV1) V2() *ProvisionTokenV2 | {
t := &ProvisionTokenV2{
Kind: KindToken,
Version: V2,
Metadata: Metadata{
Name: p.Token,
Namespace: defaults.Namespace,
},
Spec: ProvisionTokenSpecV2{
Roles: p.Roles,
},
}
if !p.Expires.IsZero() {
t.SetExpiry(p.Expires)
}
return t
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.