file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
src/vec.rs | Rust | //! Vector-like collection for indexed keys.
use std::{
ops::{Index, IndexMut},
slice::GetDisjointMutError,
};
use index_vec::Idx;
use crate::{
IndexedDomain, IndexedValue, ToIndex,
pointer::{ArcFamily, PointerFamily, RcFamily, RefFamily},
};
/// A fixed-sized vector with one value for each key in the domain.
pub struct IndexVec<'a, K: IndexedValue + 'a, V, P: PointerFamily<'a>> {
vec: Vec<V>,
pub(crate) domain: P::Pointer<IndexedDomain<K>>,
}
impl<'a, K, V, P> IndexVec<'a, K, V, P>
where
K: IndexedValue + 'a,
P: PointerFamily<'a>,
V: Clone,
{
/// Constructs a new vector where each index maps to the initial element `elem`.
pub fn from_elem(elem: V, domain: &P::Pointer<IndexedDomain<K>>) -> Self {
let vec = vec![elem; domain.len()];
IndexVec {
vec,
domain: domain.clone(),
}
}
}
impl<'a, K, V, P> IndexVec<'a, K, V, P>
where
K: IndexedValue + 'a,
P: PointerFamily<'a>,
{
/// Constructs a new vector where each index maps to the output of `f(index)`.
pub fn from_fn(f: impl FnMut(K::Index) -> V, domain: &P::Pointer<IndexedDomain<K>>) -> Self {
let vec = domain.indices().map(f).collect();
IndexVec {
vec,
domain: domain.clone(),
}
}
/// Returns an immutable reference to a value for a given index.
pub fn get<M>(&self, idx: impl ToIndex<K, M>) -> &V {
let idx = idx.to_index(&self.domain);
debug_assert!(self.domain.contains_index(idx));
unsafe { self.vec.get_unchecked(idx.index()) }
}
/// Returns a mutable reference to a value for a given index.
pub fn get_mut<M>(&mut self, idx: impl ToIndex<K, M>) -> &mut V {
let idx = idx.to_index(&self.domain);
debug_assert!(self.domain.contains_index(idx));
unsafe { self.vec.get_unchecked_mut(idx.index()) }
}
/// Returns an iterator over immutable references to the values.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &V> + ExactSizeIterator<Item = &V> {
self.vec.iter()
}
/// Returns an iterator over mutable references to the values.
pub fn iter_mut(
&mut self,
) -> impl DoubleEndedIterator<Item = &mut V> + ExactSizeIterator<Item = &mut V> {
self.vec.iter_mut()
}
/// Returns the underlying vector as a slice.
pub fn as_slice(&self) -> &[V] {
&self.vec
}
/// Returns the underlying vector as a mutable slice.
pub fn as_slice_mut(&mut self) -> &mut [V] {
&mut self.vec
}
/// Returns multiple mutable references to disjoint indices.
///
/// # Errors
/// Returns [`GetDisjointMutError`] if not disjoint or in-bounds.
pub fn get_disjoint_mut<const N: usize>(
&mut self,
indices: [K::Index; N],
) -> Result<[&mut V; N], GetDisjointMutError> {
self.vec.get_disjoint_mut(indices.map(Idx::index))
}
}
impl<'a, K, V, P> Clone for IndexVec<'a, K, V, P>
where
K: IndexedValue + 'a,
P: PointerFamily<'a>,
V: Clone,
{
fn clone(&self) -> Self {
IndexVec {
vec: self.vec.clone(),
domain: self.domain.clone(),
}
}
fn clone_from(&mut self, source: &Self) {
self.vec.clone_from(&source.vec);
}
}
impl<'a, K, V, P> PartialEq for IndexVec<'a, K, V, P>
where
K: IndexedValue + 'a,
P: PointerFamily<'a>,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.vec == other.vec
}
}
impl<'a, K, V, P> Eq for IndexVec<'a, K, V, P>
where
K: IndexedValue + 'a,
P: PointerFamily<'a>,
V: Eq,
{
}
impl<'a, K, V, P> Index<K::Index> for IndexVec<'a, K, V, P>
where
K: IndexedValue + 'a,
P: PointerFamily<'a>,
{
type Output = V;
fn index(&self, index: K::Index) -> &Self::Output {
self.get(index)
}
}
impl<'a, K, V, P> IndexMut<K::Index> for IndexVec<'a, K, V, P>
where
K: IndexedValue + 'a,
P: PointerFamily<'a>,
{
fn index_mut(&mut self, index: K::Index) -> &mut Self::Output {
self.get_mut(index)
}
}
/// [`IndexVec`] specialized to the [`RcFamily`].
pub type RcIndexVec<K, V> = IndexVec<'static, K, V, RcFamily>;
/// [`IndexVec`] specialized to the [`ArcFamily`].
pub type ArcIndexVec<K, V> = IndexVec<'static, K, V, ArcFamily>;
/// [`IndexVec`] specialized to the [`RefFamily`].
pub type RefIndexVec<'a, K, V> = IndexVec<'a, K, V, RefFamily<'a>>;
| willcrichton/indexical | 56 | Human-friendly indexed collections | Rust | willcrichton | Will Crichton | Brown University |
crates/miniserve/src/lib.rs | Rust | #![warn(clippy::pedantic)]
use std::{
collections::HashMap,
io::{self},
net::{TcpListener, TcpStream},
sync::Arc,
thread,
};
/// Re-export for library clients.
pub use http;
/// Implementation details for HTTP.
mod protocol;
/// A request from a client, either a GET or a POST with a body.
#[derive(Debug, Clone)]
pub enum Request {
Get,
Post(String),
}
/// Content to give to a client, either HTML or JSON.
#[derive(Debug, Clone)]
pub enum Content {
Html(String),
Json(String),
}
/// Response to give to a client, either content or a status code for a failure (e.g. 404).
pub type Response = Result<Content, http::StatusCode>;
/// Trait alias for functions that can handle requests and return responses.
pub trait Handler: Fn(Request) -> Response + Send + Sync + 'static {}
impl<F> Handler for F where F: Fn(Request) -> Response + Send + Sync + 'static {}
/// The main server data structure.
#[derive(Default)]
pub struct Server {
/// Map from a route path (e.g., "/foo") to a handler function for that route.
routes: HashMap<String, Box<dyn Handler>>,
}
impl Server {
/// Creates a server with no routes.
#[must_use]
pub fn new() -> Self {
Server {
routes: HashMap::new(),
}
}
/// Adds a new route to the server.
#[must_use]
pub fn route<H: Handler>(mut self, route: impl Into<String>, handler: H) -> Self {
self.routes.insert(route.into(), Box::new(handler));
self
}
/// Runs the server by listening for connections and returning responses.
///
/// This function should never return.
///
/// # Panics
///
/// Panics if `127.0.0.1:3000` is not available.
pub fn run(self) {
let listener =
TcpListener::bind("127.0.0.1:3000").expect("Failed to connect to 127.0.0.1:3000");
let this = Arc::new(self);
for stream in listener.incoming().flatten() {
let this_ref = Arc::clone(&this);
thread::spawn(move || {
let _ = this_ref.handle(&stream);
});
}
}
fn handle(&self, stream: &TcpStream) -> io::Result<()> {
protocol::handle(stream, |route, request| {
self.routes.get(route).map(move |handler| handler(request))
})
}
}
| willcrichton/rqst-async | 0 | Rust | willcrichton | Will Crichton | Brown University | |
crates/miniserve/src/protocol.rs | Rust | //! Implementation details for HTTP.
//!
//! You should not need to deal with this module.
use std::{
io::{self, BufRead, BufReader, BufWriter, Write},
net::{Shutdown, TcpStream},
};
use http::StatusCode;
pub fn stringify_response(response: http::Response<Vec<u8>>) -> Vec<u8> {
let (parts, body) = response.into_parts();
let mut buf = Vec::with_capacity(body.len() + 256);
buf.extend_from_slice(b"HTTP/1.1 ");
buf.extend(parts.status.as_str().as_bytes());
if let Some(reason) = parts.status.canonical_reason() {
buf.extend_from_slice(b" ");
buf.extend(reason.as_bytes());
}
buf.extend_from_slice(b"\r\n");
for (name, value) in parts.headers {
if let Some(name) = name {
buf.extend(name.as_str().as_bytes());
buf.extend_from_slice(b": ");
}
buf.extend(value.as_bytes());
buf.extend_from_slice(b"\r\n");
}
buf.extend_from_slice(b"\r\n");
buf.extend(body);
buf
}
#[allow(clippy::result_large_err)]
fn parse_request(src: &[u8]) -> Result<Option<http::Request<Vec<u8>>>, http::Response<Vec<u8>>> {
let mut headers = [httparse::EMPTY_HEADER; 64];
let mut parsed_req = httparse::Request::new(&mut headers);
let Ok(status) = parsed_req.parse(src) else {
return Err(make_response(
StatusCode::BAD_REQUEST,
"Failed to parse request",
));
};
let amt = match status {
httparse::Status::Complete(amt) => amt,
httparse::Status::Partial => return Ok(None),
};
let Ok(method) = http::Method::try_from(parsed_req.method.unwrap()) else {
return Err(make_response(
StatusCode::BAD_REQUEST,
"Failed to parse request",
));
};
let data = &src[amt..];
let mut builder = http::Request::builder()
.method(method)
.version(http::Version::HTTP_11)
.uri(parsed_req.path.unwrap());
for header in parsed_req.headers {
builder = builder.header(header.name, header.value);
}
Ok(Some(builder.body(data.to_vec()).unwrap()))
}
fn make_response(status: http::StatusCode, explanation: &str) -> http::Response<Vec<u8>> {
http::Response::builder()
.status(status)
.body(explanation.as_bytes().to_vec())
.unwrap()
}
fn generate_response(
req: http::Request<Vec<u8>>,
callback: impl Fn(&str, crate::Request) -> Option<crate::Response>,
) -> http::Response<Vec<u8>> {
let (parts, body) = req.into_parts();
let request = match parts.method {
http::Method::GET => crate::Request::Get,
http::Method::POST => crate::Request::Post(String::from_utf8(body).unwrap()),
_ => return make_response(StatusCode::METHOD_NOT_ALLOWED, "Not implemented"),
};
let Some(response_res) = callback(parts.uri.path(), request) else {
return make_response(StatusCode::NOT_FOUND, "No valid route");
};
match response_res {
Ok(content) => {
let (body, ty) = match content {
crate::Content::Html(body) => (body, "text/html"),
crate::Content::Json(body) => (body, "application/json"),
};
http::Response::builder()
.header("Content-Type", ty)
.header("Content-Length", body.len())
.body(body.into_bytes())
.unwrap()
}
Err(status) => make_response(status, "Handler failed"),
}
}
pub fn handle(
stream: &TcpStream,
callback: impl Fn(&str, crate::Request) -> Option<crate::Response>,
) -> io::Result<()> {
let mut reader = BufReader::new(stream.try_clone()?);
let mut writer = BufWriter::new(stream.try_clone()?);
loop {
let req = loop {
let buf = reader.fill_buf()?;
if buf.is_empty() {
stream.shutdown(Shutdown::Both)?;
return Ok(());
}
match parse_request(buf) {
Ok(None) => {}
Ok(Some(req)) => {
let amt = buf.len();
reader.consume(amt);
break Ok(req);
}
Err(resp) => {
let amt = buf.len();
reader.consume(amt);
break Err(resp);
}
}
};
let resp = match req {
Ok(req) => generate_response(req, &callback),
Err(resp) => resp,
};
let buf = stringify_response(resp);
writer.write_all(&buf)?;
writer.flush()?;
}
}
| willcrichton/rqst-async | 0 | Rust | willcrichton | Will Crichton | Brown University | |
crates/server/index.html | HTML | <!DOCTYPE html>
<html>
<head>
<title>ChatABC</title>
<style>
#spinner.active {
width: 0.75em;
height: 0.75em;
border-top: 2px solid #555;
border-right: 2px solid transparent;
border-radius: 50%;
display: inline-block;
box-sizing: border-box;
animation: rotation 1s linear infinite;
margin-left: 10px;
}
@keyframes rotation {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
</style>
</head>
<body>
<h1>ChatABC</h1>
<form id="input">
<input type="text" />
<input type="submit" />
<span id="spinner"></span>
</form>
<div id="messages"></div>
<script>
let chat = { messages: [] };
let msgContainer = document.getElementById("messages");
let textEl = document.querySelector("form input[type=text]");
let spinnerEl = document.getElementById("spinner");
async function fetchChat(chat) {
spinnerEl.classList.add("active");
textEl.setAttribute("disabled", "");
try {
let response = await fetch("/chat", {
method: "post",
headers: {"Content-Type": "application/json"},
body: JSON.stringify(chat)
});
if (!response.ok) throw new Error(response.statusText);
return await response.json();
} finally {
spinnerEl.classList.remove("active");
textEl.removeAttribute("disabled");
}
}
function updateChat(newChat) {
chat = newChat;
msgContainer.innerHTML = '';
for (let msg of chat.messages) {
msgContainer.innerHTML += `<div>${msg}</div>`;
}
}
function onSubmit(event) {
event.preventDefault();
chat.messages.push(textEl.value);
textEl.value = "";
fetchChat(chat).then(updateChat);
}
function main() {
document.getElementById("input").addEventListener("submit", onSubmit);
}
main();
</script>
</body>
</html> | willcrichton/rqst-async | 0 | Rust | willcrichton | Will Crichton | Brown University | |
crates/server/src/main.rs | Rust | use miniserve::{http::StatusCode, Content, Request, Response};
use serde::{Deserialize, Serialize};
fn index(_req: Request) -> Response {
let content = include_str!("../index.html").to_string();
Ok(Content::Html(content))
}
#[derive(Serialize, Deserialize)]
struct Messages {
messages: Vec<String>,
}
fn chat(req: Request) -> Response {
let Request::Post(body) = req else {
return Err(StatusCode::METHOD_NOT_ALLOWED);
};
let Ok(mut messages) = serde_json::from_str::<Messages>(&body) else {
return Err(StatusCode::INTERNAL_SERVER_ERROR);
};
messages
.messages
.push("And how does that make you feel?".into());
Ok(Content::Json(serde_json::to_string(&messages).unwrap()))
}
fn main() {
miniserve::Server::new()
.route("/", index)
.route("/chat", chat)
.run()
}
| willcrichton/rqst-async | 0 | Rust | willcrichton | Will Crichton | Brown University | |
index.html | HTML | <title>Tic Tac Toe</title>
<meta charset="UTF-8" />
<style>
body,
td,
input {
font-size: 24px;
line-height: 1.5;
text-align: center;
font-family: system-ui, sans-serif;
}
td {
height: 3rem;
width: 3rem;
text-align: center;
}
table {
margin: 5rem auto;
}
@media (prefers-color-scheme: dark) {
body,
input {
background: #222;
color: #ccc;
}
}
</style>
<h1>Tic Tac Toe</h1>
<table>
<tr>
<td><input type="checkbox" /></td>
<td><input type="checkbox" /></td>
<td><input type="checkbox" /></td>
</tr>
<tr>
<td><input type="checkbox" /></td>
<td><input type="checkbox" /></td>
<td><input type="checkbox" /></td>
</tr>
<tr>
<td><input type="checkbox" /></td>
<td><input type="checkbox" /></td>
<td><input type="checkbox" /></td>
</tr>
</table>
<h2 hidden>Game over</h2>
<script>
// Initialization
const boxes = Array.from(document.querySelectorAll("td"));
const inputs = Array.from(document.querySelectorAll("[type=checkbox]"));
const rows = [1, 2, 3].map((n) =>
Array.from(document.querySelectorAll(`tr:nth-child(${n}) td`))
);
const columns = [1, 2, 3].map((n) =>
Array.from(document.querySelectorAll(`td:nth-child(${n})`))
);
const diagonals = [
[
[1, 1],
[2, 2],
[3, 3],
],
[
[3, 1],
[2, 2],
[1, 3],
],
].map((set) =>
set.map(([x, y]) =>
document.querySelector(`tr:nth-child(${x}) td:nth-child(${y})`)
)
);
const gameOver = document.querySelector("h2");
const HUMAN = "🧑💻";
const ROBOT = "🤖";
// Decide who goes first
if (Math.random() > 0.5) {
runRobotTurn();
}
// Handle when a user clicks an input
for (const input of inputs) {
input.addEventListener("click", handleClickInput);
}
// The user chose a box, check for win, robot turn, check for win
function handleClickInput(evt) {
evt.target.closest("td").innerHTML = HUMAN;
if (hasWin()) {
return endGame();
}
runRobotTurn();
if (hasWin()) {
return endGame();
}
}
// Choose a random available box
function runRobotTurn() {
const robotBoxes = shuffle(boxes);
while (robotBoxes.length) {
const box = robotBoxes.shift();
if (box.querySelector("input")) {
box.innerHTML = ROBOT;
break;
}
}
}
// Determine win
function hasWin() {
return (
rows.some(hasAllSame) ||
columns.some(hasAllSame) ||
diagonals.some(hasAllSame)
);
}
// Determine if every cell matches
function hasAllSame(arr) {
return arr.every(
(td) => !td.querySelector("input") && td.innerHTML === arr[0].innerHTML
);
}
// Display game over, cancel events
function endGame() {
gameOver.hidden = false;
for (const input of inputs) {
input.removeEventListener("click", handleClickInput);
input.remove();
}
}
// Shuffle a copy of the input array
function shuffle(array) {
array = array.slice(0);
for (let i = array.length - 1; i > 0; i--) {
const j = Math.floor(Math.random() * i);
const temp = array[i];
array[i] = array[j];
array[j] = temp;
}
return array;
}
</script>
| willcrichton/skills-review-pull-requests | 0 | My clone repository | HTML | willcrichton | Will Crichton | Brown University |
client.go | Go | package main
import (
"context"
"github.com/google/go-github/v50/github"
)
var packageType = "container"
// GitHubClient describes a (partial) GitHub REST API client.
type GitHubClient interface {
ListPackages(ctx context.Context, user string, opts *github.PackageListOptions) ([]*github.Package, *github.Response, error)
PackageGetAllVersions(ctx context.Context, user, packageType, packageName string, opts *github.PackageListOptions) ([]*github.PackageVersion, *github.Response, error)
}
| willdurand/container-registry-proxy | 15 | 📖 A proxy that makes the GitHub Container Registry compatible with the Docker Registry HTTP API V2 specification. | Go | willdurand | William Durand | mozilla |
errors.go | Go | package main
const (
ERROR_UNKNOWN = "UNKNOWN"
)
type apiError struct {
Code string `json:"code"`
Message string `json:"message"`
Detail string `json:"detail"`
}
type apiErrors struct {
Errors []apiError `json:"errors"`
}
func makeError(code, message string) apiErrors {
return apiErrors{
Errors: []apiError{
{Code: code, Message: message},
},
}
}
| willdurand/container-registry-proxy | 15 | 📖 A proxy that makes the GitHub Container Registry compatible with the Docker Registry HTTP API V2 specification. | Go | willdurand | William Durand | mozilla |
main.go | Go | package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/google/go-github/v50/github"
)
const (
defaultHost = "127.0.0.1"
defaultPort = "10000"
defaultUpstreamURL = "https://ghcr.io"
)
type containerProxy struct {
ghClient GitHubClient
owner string
}
// NewProxy returns an instance of container proxy, which implements the Docker
// Registry HTTP API V2.
func NewProxy(addr string, ghClient GitHubClient, rawUpstreamURL, owner string) *http.Server {
proxy := containerProxy{
ghClient: ghClient,
owner: owner,
}
if owner != "" {
log.Printf("packages owner set to: %s", owner)
}
// Create an upstream (reverse) proxy to handle the requests not supported by
// the container proxy.
upstreamURL, err := url.Parse(rawUpstreamURL)
if err != nil {
log.Fatal(err)
}
upstreamProxy := &httputil.ReverseProxy{
Rewrite: func(r *httputil.ProxyRequest) {
r.SetURL(upstreamURL)
},
}
router := chi.NewRouter()
// Set a timeout value on the request context (ctx), that will signal through
// ctx.Done() that the request has timed out and further processing should be
// stopped.
router.Use(middleware.Timeout(30 * time.Second))
router.Get("/v2/_catalog", proxy.Catalog)
router.Get("/v2/{owner}/{name}/tags/list", proxy.TagsList)
router.NotFound(func(w http.ResponseWriter, r *http.Request) {
log.Printf("%s %s -> %s", r.Method, r.URL, upstreamURL)
upstreamProxy.ServeHTTP(w, r)
})
return &http.Server{
Addr: addr,
Handler: router,
}
}
// Catalog returns the list of repositories available in the Container Registry.
func (p *containerProxy) Catalog(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Fetch the list of container packages the current (authenticated) user has
// access to, unless `GITHUB_PACKAGES_OWNER` is set. In which case, we fetch
// the list of container packages for this specified owner.
opts := &github.PackageListOptions{PackageType: &packageType}
packages, _, err := p.ghClient.ListPackages(r.Context(), p.owner, opts)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
errors := makeError(ERROR_UNKNOWN, fmt.Sprintf("ListPackages: %s", err))
json.NewEncoder(w).Encode(&errors)
return
}
catalog := struct {
Repositories []string `json:"repositories"`
}{
Repositories: []string{},
}
for _, pack := range packages {
if pack.Name == nil || pack.Owner.Login == nil {
continue
}
catalog.Repositories = append(
catalog.Repositories,
fmt.Sprintf("%s/%s", *pack.Owner.Login, *pack.Name),
)
}
json.NewEncoder(w).Encode(catalog)
}
// TagsList returns the list of tags for a given repository.
func (p *containerProxy) TagsList(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
owner := chi.URLParam(r, "owner")
name := chi.URLParam(r, "name")
versions, _, err := p.ghClient.PackageGetAllVersions(r.Context(), owner, packageType, name, nil)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
errors := makeError(ERROR_UNKNOWN, fmt.Sprintf("PackageGetAllVersions: %s", err))
json.NewEncoder(w).Encode(errors)
return
}
list := struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}{
Name: fmt.Sprintf("%s/%s", owner, name),
Tags: []string{},
}
for _, version := range versions {
if version.Metadata == nil || version.Metadata.Container == nil {
continue
}
list.Tags = append(
list.Tags,
version.Metadata.Container.Tags...,
)
}
json.NewEncoder(w).Encode(list)
}
func main() {
host := os.Getenv("HOST")
if host == "" {
host = defaultHost
}
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
rawUpstreamURL := os.Getenv("UPSTREAM_URL")
if rawUpstreamURL == "" {
rawUpstreamURL = defaultUpstreamURL
}
// Create a GitHub client to call the REST API.
ctx := context.Background()
client := github.NewTokenClient(ctx, os.Getenv("GITHUB_TOKEN"))
addr := fmt.Sprintf("%s:%s", host, port)
// We intentionally allow empty string to be the default value here, since an
// empty owner means the GitHub API will default to the authenticated user.
owner := os.Getenv("GITHUB_PACKAGES_OWNER")
proxy := NewProxy(addr, client.Users, rawUpstreamURL, owner)
log.Printf("starting container registry proxy on %s", addr)
log.Fatal(proxy.ListenAndServe())
}
| willdurand/container-registry-proxy | 15 | 📖 A proxy that makes the GitHub Container Registry compatible with the Docker Registry HTTP API V2 specification. | Go | willdurand | William Durand | mozilla |
main_test.go | Go | package main
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/google/go-github/v50/github"
)
type githubClientMock struct {
Packages []*github.Package
PackageVersions []*github.PackageVersion
Err error
}
func (c *githubClientMock) ListPackages(ctx context.Context, user string, opts *github.PackageListOptions) ([]*github.Package, *github.Response, error) {
if user != "" {
return nil, nil, fmt.Errorf("invalid user: %s", user)
}
return c.Packages, nil, c.Err
}
func (c *githubClientMock) PackageGetAllVersions(ctx context.Context, user, packageType, packageName string, opts *github.PackageListOptions) ([]*github.PackageVersion, *github.Response, error) {
return c.PackageVersions, nil, c.Err
}
func TestCatalog(t *testing.T) {
owner := &github.User{Login: github.String("some-owner")}
for _, tc := range []struct {
client githubClientMock
expectedStatusCode int
expectedContent string
}{
{
client: githubClientMock{},
expectedStatusCode: 200,
expectedContent: `{"repositories":[]}`,
},
{
client: githubClientMock{
Packages: []*github.Package{
{
Name: github.String("some-package"),
Owner: owner,
},
},
},
expectedStatusCode: 200,
expectedContent: `{"repositories":["some-owner/some-package"]}`,
},
{
client: githubClientMock{
Packages: []*github.Package{
{
Name: github.String("package-1"),
Owner: owner,
},
{
Name: github.String("package-2"),
Owner: owner,
},
},
},
expectedStatusCode: 200,
expectedContent: `{"repositories":["some-owner/package-1","some-owner/package-2"]}`,
},
{
client: githubClientMock{
Packages: []*github.Package{
{
// No Name here
Owner: owner,
},
},
},
expectedStatusCode: 200,
expectedContent: `{"repositories":[]}`,
},
{
client: githubClientMock{
Packages: []*github.Package{
{
Name: github.String("some-package"),
Owner: &github.User{},
},
},
},
expectedStatusCode: 200,
expectedContent: `{"repositories":[]}`,
},
{
client: githubClientMock{
Err: fmt.Errorf("an error"),
},
expectedStatusCode: 400,
expectedContent: `{"errors":[{"code":"UNKNOWN","message":"ListPackages: an error","detail":""}]}`,
},
} {
proxy := NewProxy(
"127.0.0.1:10000",
&tc.client,
"http://127.0.0.1/upstream",
"", // owner
)
req, _ := http.NewRequest("GET", "/v2/_catalog", nil)
res := httptest.NewRecorder()
proxy.Handler.ServeHTTP(res, req)
if res.Code != tc.expectedStatusCode {
t.Fatalf("expected: %d, got: %d", tc.expectedStatusCode, res.Code)
}
if strings.TrimSpace(res.Body.String()) != tc.expectedContent {
t.Fatalf("expected: %s, got: %s", tc.expectedContent, res.Body.String())
}
}
}
func TestTagsList(t *testing.T) {
for _, tc := range []struct {
client githubClientMock
owner string
name string
expectedStatusCode int
expectedContent string
}{
{
client: githubClientMock{},
owner: "some-owner",
name: "some-package",
expectedStatusCode: 200,
expectedContent: `{"name":"some-owner/some-package","tags":[]}`,
},
{
client: githubClientMock{
PackageVersions: []*github.PackageVersion{
{
Metadata: &github.PackageMetadata{
Container: &github.PackageContainerMetadata{
Tags: []string{"tag-1", "tag-2"},
},
},
},
},
},
owner: "some-owner",
name: "some-package",
expectedStatusCode: 200,
expectedContent: `{"name":"some-owner/some-package","tags":["tag-1","tag-2"]}`,
},
{
client: githubClientMock{
PackageVersions: []*github.PackageVersion{},
},
owner: "some-owner",
name: "some-package",
expectedStatusCode: 200,
expectedContent: `{"name":"some-owner/some-package","tags":[]}`,
},
{
client: githubClientMock{
PackageVersions: []*github.PackageVersion{
{},
},
},
owner: "some-owner",
name: "some-package",
expectedStatusCode: 200,
expectedContent: `{"name":"some-owner/some-package","tags":[]}`,
},
{
client: githubClientMock{
PackageVersions: []*github.PackageVersion{
{
Metadata: &github.PackageMetadata{},
},
},
},
owner: "some-owner",
name: "some-package",
expectedStatusCode: 200,
expectedContent: `{"name":"some-owner/some-package","tags":[]}`,
},
{
client: githubClientMock{
Err: fmt.Errorf("an error"),
},
expectedStatusCode: 400,
expectedContent: `{"errors":[{"code":"UNKNOWN","message":"PackageGetAllVersions: an error","detail":""}]}`,
},
} {
proxy := NewProxy(
"127.0.0.1:10000",
&tc.client,
"http://127.0.0.1/upstream",
"", // owner
)
req, _ := http.NewRequest("GET", fmt.Sprintf("/v2/%s/%s/tags/list", tc.owner, tc.name), nil)
res := httptest.NewRecorder()
proxy.Handler.ServeHTTP(res, req)
if res.Code != tc.expectedStatusCode {
t.Fatalf("expected: %d, got: %d", tc.expectedStatusCode, res.Code)
}
if strings.TrimSpace(res.Body.String()) != tc.expectedContent {
t.Fatalf("expected: %s, got: %s", tc.expectedContent, res.Body.String())
}
}
}
func TestCallUpstreamServer(t *testing.T) {
upstreamResponse := "upstream server called"
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "%s", upstreamResponse)
}))
defer upstream.Close()
proxy := NewProxy(
"127.0.0.1:10000",
&githubClientMock{},
upstream.URL,
"", // owner
)
req, _ := http.NewRequest("GET", "/some/other/path", nil)
res := httptest.NewRecorder()
proxy.Handler.ServeHTTP(res, req)
if res.Code != 200 {
t.Fatalf("expected: %d, got: %d", 200, res.Code)
}
if strings.TrimSpace(res.Body.String()) != upstreamResponse {
t.Fatalf("expected: %s, got: %s", upstreamResponse, res.Body.String())
}
}
| willdurand/container-registry-proxy | 15 | 📖 A proxy that makes the GitHub Container Registry compatible with the Docker Registry HTTP API V2 specification. | Go | willdurand | William Durand | mozilla |
cmd/microvm/create.go | Go | package main
import (
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/microvm"
)
func init() {
createCmd := &cobra.Command{
Use: "create <id>",
Short: "Create a container",
Run: cli.HandleErrors(func(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
bundle, _ := cmd.Flags().GetString("bundle")
pidFile, _ := cmd.Flags().GetString("pid-file")
consoleSocket, _ := cmd.Flags().GetString("console-socket")
debug, _ := cmd.Flags().GetBool("debug")
opts := microvm.CreateOpts{
PidFile: pidFile,
ConsoleSocket: consoleSocket,
Debug: debug,
}
return microvm.Create(rootDir, args[0], bundle, opts)
}),
Args: cobra.ExactArgs(1),
}
createCmd.PersistentFlags().StringP("bundle", "b", "", "path to the root of the bundle directory")
createCmd.MarkFlagRequired("bundle")
createCmd.Flags().String("pid-file", "", "specify the file to write the process id to")
createCmd.Flags().String("console-socket", "", "console unix socket used to pass a PTY descriptor")
rootCmd.AddCommand(createCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/microvm/delete.go | Go | package main
import (
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
)
func init() {
deleteCmd := &cobra.Command{
Use: "delete <id>",
Aliases: []string{"del", "rm"},
Short: "Delete a container",
Run: cli.HandleErrors(func(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
baseDir := filepath.Join(rootDir, args[0])
return os.RemoveAll(baseDir)
}),
Args: cobra.ExactArgs(1),
}
deleteCmd.Flags().BoolP("force", "f", false, "UNSUPPORTED FLAG")
rootCmd.AddCommand(deleteCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/microvm/kill.go | Go | package main
import (
"syscall"
"github.com/docker/docker/pkg/signal"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/microvm"
)
func init() {
killCmd := &cobra.Command{
Use: "kill <id> [<signal>]",
Short: "Send a signal to a container",
Run: cli.HandleErrors(func(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
sig := syscall.SIGTERM
if len(args) > 1 {
if s, err := signal.ParseSignal(args[1]); err == nil {
sig = s
}
}
return microvm.Kill(rootDir, args[0], sig)
}),
Args: cobra.MinimumNArgs(1),
}
killCmd.Flags().Bool("all", false, "UNSUPPORTED FLAG")
rootCmd.AddCommand(killCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/microvm/main.go | Go | package main
import "github.com/willdurand/containers/internal/cli"
const (
programName string = "microvm"
)
// rootCmd represents the base command when called without any subcommands.
var rootCmd = cli.NewRootCommand(
programName,
"An experimental runtime backed by micro VMs",
)
func init() {
rootCmd.PersistentFlags().Bool("systemd-cgroup", false, "UNSUPPORTED FLAG")
}
func main() {
cli.Execute(rootCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/microvm/redirect_stdio.go | Go | package main
import (
"bufio"
"fmt"
"io"
"os"
"sync"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/microvm/container"
)
func init() {
redirectStdioCmd := &cobra.Command{
Use: "redirect-stdio <id>",
Short: "redirect the microvm standard IOs",
Hidden: true,
Run: cli.HandleErrors(func(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
debug, _ := cmd.Flags().GetBool("debug")
container, err := container.LoadWithBundleConfig(rootDir, args[0])
if err != nil {
return err
}
// So... We need to wait until the VM is "ready" to send STDIN data,
// otherwise STDIN might be ECHO'ed on STDOUT. I am not too sure why this
// happens (maybe that's how the Linux console is configured?) so I
// introduced a workaround...
//
// The `init(1)` process will disable ECHO and print a special message
// for us here. When we receive it, we can copy the data.
vmReady := make(chan interface{})
pipeOut, err := os.OpenFile(container.PipePathOut(), os.O_RDONLY, 0o600)
if err != nil {
return err
}
defer pipeOut.Close()
s := bufio.NewScanner(pipeOut)
for s.Scan() {
if debug {
fmt.Println(s.Text())
}
// Should be kept in sync with `extras/microvm/init.c`.
if s.Text() == "init: ready" {
break
}
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
close(vmReady)
if _, err := io.Copy(os.Stdout, pipeOut); err != nil {
logrus.WithError(err).Error("copy: pipe out")
}
}()
wg.Add(1)
go func() {
defer wg.Done()
pipeIn, err := os.OpenFile(container.PipePathIn(), os.O_WRONLY, 0o600)
if err != nil {
logrus.WithError(err).Error("open: pipe in")
return
}
defer pipeIn.Close()
<-vmReady
if _, err := io.Copy(pipeIn, os.Stdin); err != nil {
logrus.WithError(err).Error("copy: pipe in")
}
}()
wg.Wait()
return nil
}),
}
rootCmd.AddCommand(redirectStdioCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/microvm/start.go | Go | package main
import (
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/microvm"
)
func init() {
startCmd := &cobra.Command{
Use: "start <id>",
Short: "Start a container",
Run: cli.HandleErrors(func(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
return microvm.Start(rootDir, args[0])
}),
Args: cobra.ExactArgs(1),
}
rootCmd.AddCommand(startCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/microvm/state.go | Go | package main
import (
"os"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/microvm"
)
func init() {
stateCmd := &cobra.Command{
Use: "state <id>",
Short: "Query the state of a container",
Run: cli.HandleErrors(func(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
return microvm.State(rootDir, args[0], os.Stdout)
}),
Args: cobra.ExactArgs(1),
}
rootCmd.AddCommand(stateCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacr/create.go | Go | package main
import (
"fmt"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yacr"
)
func init() {
cmd := &cobra.Command{
Use: "create <id>",
Short: "Create a container",
Run: cli.HandleErrors(create),
Args: cobra.ExactArgs(1),
}
cmd.PersistentFlags().StringP("bundle", "b", "", "path to the root of the bundle directory")
cmd.MarkFlagRequired("bundle")
cmd.Flags().String("pid-file", "", "specify the file to write the process id to")
cmd.Flags().String("console-socket", "", "console unix socket used to pass a PTY descriptor")
cmd.Flags().Bool("no-pivot", false, "do not use pivot root to jail process inside rootfs")
rootCmd.AddCommand(cmd)
containerCmd := &cobra.Command{
Use: "container <id>",
Run: cli.HandleErrors(createContainer),
Hidden: true,
Args: cobra.ExactArgs(1),
}
containerCmd.Flags().Bool("no-pivot", false, "do not use pivot root to jail process inside rootfs")
cmd.AddCommand(containerCmd)
}
func create(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
bundle, _ := cmd.Flags().GetString("bundle")
pidFile, _ := cmd.Flags().GetString("pid-file")
consoleSocket, _ := cmd.Flags().GetString("console-socket")
noPivot, _ := cmd.Flags().GetBool("no-pivot")
logFile, _ := cmd.Flags().GetString("log")
logFormat, _ := cmd.Flags().GetString("log-format")
debug, _ := cmd.Flags().GetBool("debug")
opts := yacr.CreateOpts{
ID: args[0],
Bundle: bundle,
PidFile: pidFile,
ConsoleSocket: consoleSocket,
NoPivot: noPivot,
LogFile: logFile,
LogFormat: logFormat,
Debug: debug,
}
if err := yacr.Create(rootDir, opts); err != nil {
return fmt.Errorf("create: %w", err)
}
return nil
}
func createContainer(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
bundle, _ := cmd.Flags().GetString("bundle")
noPivot, _ := cmd.Flags().GetBool("no-pivot")
opts := yacr.CreateOpts{
ID: args[0],
Bundle: bundle,
NoPivot: noPivot,
}
if err := yacr.CreateContainer(rootDir, opts); err != nil {
return fmt.Errorf("create container: %w", err)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacr/delete.go | Go | package main
import (
"fmt"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yacr"
)
func init() {
cmd := &cobra.Command{
Use: "delete <id>",
Aliases: []string{"del", "rm"},
Short: "Delete a container",
Run: cli.HandleErrors(delete),
Args: cobra.ExactArgs(1),
}
cmd.Flags().BoolP("force", "f", false, "force delete a container")
rootCmd.AddCommand(cmd)
}
func delete(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
force, _ := cmd.Flags().GetBool("force")
if err := yacr.Delete(rootDir, args[0], force); err != nil {
return fmt.Errorf("delete: %w", err)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacr/kill.go | Go | package main
import (
"fmt"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yacr"
)
func init() {
cmd := &cobra.Command{
Use: "kill <id> [<signal>]",
Short: "Send a signal to a container",
Run: cli.HandleErrors(kill),
Args: cobra.MinimumNArgs(1),
}
cmd.Flags().Bool("all", false, "UNSUPPORTED FLAG")
rootCmd.AddCommand(cmd)
}
func kill(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
if err := yacr.Kill(rootDir, args); err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacr/list.go | Go | package main
import (
"fmt"
"os"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yacr"
)
func init() {
cmd := &cobra.Command{
Use: "list",
Aliases: []string{"ls"},
Short: "List containers",
Run: cli.HandleErrors(list),
Args: cobra.NoArgs,
}
rootCmd.AddCommand(cmd)
}
func list(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
list, err := yacr.List(rootDir)
if err != nil {
return fmt.Errorf("list: %w", err)
}
w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0)
fmt.Fprint(w, "ID\tSTATUS\tCREATED\tPID\tBUNDLE\n")
for _, container := range list {
fmt.Fprintf(
w, "%s\t%s\t%s\t%d\t%s\n",
container.ID,
container.Status,
container.CreatedAt.Format(time.RFC3339),
container.PID,
container.BundlePath,
)
}
return w.Flush()
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacr/main.go | Go | // Yet Another Container Runtime
package main
import "github.com/willdurand/containers/internal/cli"
const (
programName string = "yacr"
)
// rootCmd represents the base command when called without any subcommands.
var rootCmd = cli.NewRootCommand(
programName,
"Yet another (unsafe) container runtime",
)
func init() {
rootCmd.PersistentFlags().Bool("systemd-cgroup", false, "UNSUPPORTED FLAG")
}
func main() {
cli.Execute(rootCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacr/spec.go | Go | package main
import (
"encoding/json"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/runtime"
)
func init() {
cmd := &cobra.Command{
Use: "spec",
Short: "Create a new specification file for a bundle",
Run: cli.HandleErrors(spec),
Args: cobra.NoArgs,
}
cmd.Flags().StringP("bundle", "b", "", "path to the root of the bundle directory")
cmd.Flags().Bool("rootless", false, "generate a configuration for a rootless container")
rootCmd.AddCommand(cmd)
}
func spec(cmd *cobra.Command, args []string) error {
bundle, _ := cmd.Flags().GetString("bundle")
rootless, _ := cmd.Flags().GetBool("rootless")
configFile, err := os.Create(filepath.Join(bundle, "config.json"))
if err != nil {
return err
}
defer configFile.Close()
encoder := json.NewEncoder(configFile)
encoder.SetIndent("", " ")
rootfs, err := filepath.Abs(filepath.Join(bundle, "rootfs"))
if err != nil {
return err
}
spec, err := runtime.BaseSpec(rootfs, rootless)
if err != nil {
return err
}
return encoder.Encode(spec)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacr/start.go | Go | package main
import (
"fmt"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yacr"
)
func init() {
cmd := &cobra.Command{
Use: "start <id>",
Short: "Start a container",
Run: cli.HandleErrors(start),
Args: cobra.ExactArgs(1),
}
rootCmd.AddCommand(cmd)
}
func start(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
if err := yacr.Start(rootDir, args[0]); err != nil {
return fmt.Errorf("start: %w", err)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacr/state.go | Go | package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yacr"
)
func init() {
cmd := &cobra.Command{
Use: "state <id>",
Short: "Query the state of a container",
Run: cli.HandleErrors(state),
Args: cobra.ExactArgs(1),
}
rootCmd.AddCommand(cmd)
}
func state(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
if err := yacr.State(rootDir, args[0], os.Stdout); err != nil {
return fmt.Errorf("state: %w", err)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yacs/main.go | Go | package main
import (
"fmt"
"github.com/sevlyar/go-daemon"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yacs"
)
func main() {
rootCmd := cli.NewRootCommand("yacs", "Yet another container shim")
rootCmd.Run = cli.HandleErrors(run)
rootCmd.Args = cobra.NoArgs
rootCmd.Flags().String("base-dir", "", `path to the base directory (default "<rootDir>/<containerId>"`)
rootCmd.Flags().StringP("bundle", "b", "", "path to the root of the bundle directory")
rootCmd.MarkFlagRequired("bundle")
rootCmd.Flags().String("container-id", "", "container id")
rootCmd.MarkFlagRequired("container-id")
rootCmd.Flags().String("container-log-file", "", `path to the container log file (default "container.log")`)
rootCmd.Flags().String("exit-command", "", "path to the exit command executed when the container has exited")
rootCmd.Flags().StringArray("exit-command-arg", []string{}, "argument to pass to the execute command")
rootCmd.Flags().String("runtime", "yacr", "container runtime to use")
rootCmd.Flags().String("stdio-dir", "", "the directory to use when creating the stdio named pipes")
cli.Execute(rootCmd)
}
func run(cmd *cobra.Command, args []string) error {
// The code below (until `ctx.Reborn()`) is shared between a "parent" and a
// "child" process. Both initialize Yacs but most of the logic lives in the
// "child" process.
shim, err := yacs.NewShimFromFlags(cmd.Flags())
if err != nil {
return err
}
ctx := &daemon.Context{
PidFileName: shim.PidFilePath(),
PidFilePerm: 0o644,
}
child, err := ctx.Reborn()
if err != nil {
return fmt.Errorf("failed to create daemon: %w", err)
}
// This block is the "parent" process (in a fork/exec model). We wait until
//receive a message from the "child" process.
if child != nil {
if err := shim.Err(); err != nil {
return err
}
// When the shim has started successfully, we print the unix socket
// address so that another program can interact with the shim.
fmt.Println(shim.SocketPath())
return nil
}
// This is the "child" process.
defer ctx.Release()
return shim.Run()
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/attach.go | Go | package container
import (
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "attach <container>",
Short: "Attach standard input, output, and error streams to a running container",
Run: cli.HandleErrors(attach),
Args: cobra.ExactArgs(1),
ValidArgsFunction: completeContainerIds,
}
cmd.Flags().Bool("no-stdin", false, "do not attach stdin")
containerCommand.AddCommand(cmd)
}
func attach(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
noStdin, _ := cmd.Flags().GetBool("no-stdin")
opts := yaman.AttachOpts{
Stdin: !noStdin,
Stdout: true,
Stderr: true,
}
return yaman.Attach(rootDir, args[0], opts)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/cleanup.go | Go | package container
import (
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "cleanup <container>",
Short: "Clean-up a container",
Hidden: true,
Run: cli.HandleErrors(cleanUp),
Args: cobra.ExactArgs(1),
}
containerCommand.AddCommand(cmd)
}
func cleanUp(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
return yaman.CleanUp(rootDir, args[0])
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/container.go | Go | package container
import (
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/yaman"
)
var containerCommand = &cobra.Command{
Use: "container",
Aliases: []string{"c"},
Short: "Manage containers",
}
func Register(rootCmd *cobra.Command) {
rootCmd.AddCommand(containerCommand)
}
func completeContainerIds(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 0 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return completeManyContainerIds(cmd, args, toComplete)
}
func completeManyContainerIds(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
rootDir, _ := cmd.Flags().GetString("root")
return yaman.GetContainerIds(rootDir, toComplete), cobra.ShellCompDirectiveNoFileComp
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/create.go | Go | package container
import (
"encoding/json"
"fmt"
"os"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
"github.com/willdurand/containers/internal/yaman/container"
"github.com/willdurand/containers/internal/yaman/registry"
"github.com/willdurand/containers/internal/yaman/shim"
)
func init() {
cmd := &cobra.Command{
Use: "create <image> [<command> [<args>...]]",
Short: "Create a new container",
Run: cli.HandleErrors(create),
Args: cobra.MinimumNArgs(1),
}
addCreateFlagsToCommand(cmd)
containerCommand.AddCommand(cmd)
}
func addCreateFlagsToCommand(cmd *cobra.Command) {
cmd.Flags().String("entrypoint", "", "overwrite the default entrypoint set by the image")
cmd.Flags().String("hostname", "", "set the container hostname")
cmd.Flags().BoolP("interactive", "i", false, "keep stdin open")
cmd.Flags().BoolP("publish-all", "P", false, "publish all exposed ports to random ports")
cmd.Flags().String("pull", string(registry.PullMissing), `pull image before running ("always"|"missing"|"never")`)
cmd.Flags().Bool("rm", false, "automatically remove the container when it exits")
cmd.Flags().String("runtime", "", "runtime to use for this container")
cmd.Flags().BoolP("tty", "t", false, "allocate a pseudo-tty")
}
func makeContainerOptsFromCommand(cmd *cobra.Command, command []string) container.ContainerOpts {
var entrypoint []string
entrypointStr, _ := cmd.Flags().GetString("entrypoint")
if entrypointStr != "" {
if err := json.Unmarshal([]byte(entrypointStr), &entrypoint); err != nil {
logrus.WithError(err).Debug("failed to parse entrypoint as JSON")
entrypoint = []string{entrypointStr}
}
}
hostname, _ := cmd.Flags().GetString("hostname")
interactive, _ := cmd.Flags().GetBool("interactive")
publishAll, _ := cmd.Flags().GetBool("publish-all")
rm, _ := cmd.Flags().GetBool("rm")
tty, _ := cmd.Flags().GetBool("tty")
return container.ContainerOpts{
Command: command,
Entrypoint: entrypoint,
Remove: rm,
Hostname: hostname,
Interactive: interactive,
Tty: tty,
Detach: false,
PublishAll: publishAll,
}
}
func create(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
// registry/pull options
pull, _ := cmd.Flags().GetString("pull")
pullPolicy, err := registry.ParsePullPolicy(pull)
if err != nil {
return err
}
pullOpts := registry.PullOpts{
Policy: pullPolicy,
Output: os.Stderr,
}
// container options
containerOpts := makeContainerOptsFromCommand(cmd, args[1:])
// shim options
shimOpts := shim.ShimOpts{}
if runtime, _ := cmd.Flags().GetString("runtime"); runtime != "" {
shimOpts.Runtime = runtime
}
_, container, err := yaman.Create(rootDir, args[0], pullOpts, containerOpts, shimOpts)
if err != nil {
return err
}
fmt.Fprintln(os.Stdout, container.ID)
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/delete.go | Go | package container
import (
"fmt"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "delete <container> [<container>...]",
Aliases: []string{"del", "rm", "remove"},
Short: "Delete one or more containers",
PreRunE: func(cmd *cobra.Command, args []string) error {
all, _ := cmd.Flags().GetBool("all")
if !all && len(args) < 1 {
return fmt.Errorf("requires at least 1 arg(s), only received %d", len(args))
}
return nil
},
Run: cli.HandleErrors(delete),
Args: cobra.MinimumNArgs(0),
ValidArgsFunction: completeManyContainerIds,
}
cmd.Flags().BoolP("all", "a", false, "delete all stopped containers")
containerCommand.AddCommand(cmd)
}
func delete(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
all, _ := cmd.Flags().GetBool("all")
if all {
args = yaman.GetContainerIds(rootDir, "")
}
for _, id := range args {
if err := yaman.Delete(rootDir, id); err != nil {
logrus.WithFields(logrus.Fields{
"id": id,
"error": err,
}).Debug("failed to delete container")
cli.PrintUserError(err)
}
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/hook.go | Go | package container
import (
"os"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "hook <hook>",
Short: "Hidden command called by the OCI runtime",
Hidden: true,
Run: cli.HandleErrors(hook),
Args: cobra.ExactArgs(1),
}
containerCommand.AddCommand(cmd)
}
func hook(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
return yaman.ProcessHook(rootDir, args[0], os.Stdin)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/inspect.go | Go | package container
import (
"encoding/json"
"os"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "inspect <container>",
Short: "Return low-level information on the container as JSON",
Run: cli.HandleErrors(inspect),
Args: cobra.ExactArgs(1),
ValidArgsFunction: completeContainerIds,
}
containerCommand.AddCommand(cmd)
}
func inspect(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
container, err := yaman.Inspect(rootDir, args[0])
if err != nil {
return err
}
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
if err := encoder.Encode(container); err != nil {
return err
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/list.go | Go | package container
import (
"fmt"
"os"
"text/tabwriter"
"time"
"github.com/docker/go-units"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "list",
Short: "List containers",
Aliases: []string{"ls"},
Run: cli.HandleErrors(list),
Args: cobra.NoArgs,
}
cmd.Flags().BoolP("all", "a", false, "show all containers and not just those running")
containerCommand.AddCommand(cmd)
}
func list(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
all, _ := cmd.Flags().GetBool("all")
list, err := yaman.ListContainers(rootDir, all)
if err != nil {
return err
}
w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0)
fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\n")
for _, container := range list {
exposedPorts := fmt.Sprint(container.ExposedPorts)
exposedPorts = exposedPorts[1 : len(exposedPorts)-1]
fmt.Fprintf(
w, "%s\t%s\t%s\t%s ago\t%s\t%s\n",
container.ID,
container.Image,
container.Command,
units.HumanDuration(time.Since(container.Created)),
container.Status,
exposedPorts,
)
}
return w.Flush()
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/logs.go | Go | package container
import (
"os"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "logs <container>",
Short: "Fetch the logs of a container",
Run: cli.HandleErrors(logs),
Args: cobra.ExactArgs(1),
ValidArgsFunction: completeContainerIds,
}
cmd.Flags().BoolP("timestamps", "t", false, "show timestamps")
containerCommand.AddCommand(cmd)
}
func logs(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
timestamps, _ := cmd.Flags().GetBool("timestamps")
opts := yaman.CopyLogsOpts{
Timestamps: timestamps,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
return yaman.CopyLogs(rootDir, args[0], opts)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/restart.go | Go | package container
import (
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "restart <container>",
Short: "Restart a container",
Run: cli.HandleErrors(restart),
Args: cobra.ExactArgs(1),
ValidArgsFunction: completeContainerIds,
}
containerCommand.AddCommand(cmd)
}
func restart(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
return yaman.Restart(rootDir, args[0])
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/run.go | Go | package container
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
"github.com/willdurand/containers/internal/yaman/registry"
"github.com/willdurand/containers/internal/yaman/shim"
)
func init() {
cmd := &cobra.Command{
Use: "run <image> [<command> [<args>...]]",
Short: "Run a command in a new container",
Run: cli.HandleErrors(run),
Args: cobra.MinimumNArgs(1),
}
cmd.Flags().BoolP("detach", "d", false, "run container in background and print container ID")
addCreateFlagsToCommand(cmd)
containerCommand.AddCommand(cmd)
}
func run(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
// registry/pull options
pull, _ := cmd.Flags().GetString("pull")
pullPolicy, err := registry.ParsePullPolicy(pull)
if err != nil {
return err
}
pullOpts := registry.PullOpts{
Policy: pullPolicy,
Output: os.Stderr,
}
// container options
containerOpts := makeContainerOptsFromCommand(cmd, args[1:])
detach, _ := cmd.Flags().GetBool("detach")
if detach {
containerOpts.Detach = true
}
// shim options
shimOpts := shim.ShimOpts{}
if runtime, _ := cmd.Flags().GetString("runtime"); runtime != "" {
shimOpts.Runtime = runtime
}
result, err := yaman.Run(rootDir, args[0], pullOpts, containerOpts, shimOpts)
if err != nil {
// If we do not have an `ExitCodeError` already, set the exit code to
// `126` to indicate a problem coming from Yaman.
switch err.(type) {
case cli.ExitCodeError:
return err
default:
return cli.ExitCodeError{Message: err.Error(), ExitCode: 126}
}
}
// In detached mode, we print the container ID to the standard output and we
// are done. The container should be running as long as it is supposed to
// (e.g., if the command exits after completion, the container might be
// exited but if the command is a daemon, the container should still be
// alive).
if detach {
fmt.Fprintln(os.Stdout, result.ContainerID)
return nil
}
os.Exit(result.ExitStatus)
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/start.go | Go | package container
import (
"os"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "start <container>",
Short: "Start a container",
Run: cli.HandleErrors(start),
Args: cobra.ExactArgs(1),
ValidArgsFunction: completeContainerIds,
}
cmd.Flags().BoolP("attach", "a", false, "attach stdio streams")
cmd.Flags().BoolP("interactive", "i", false, "keep stdin open")
containerCommand.AddCommand(cmd)
}
func start(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
attach, _ := cmd.Flags().GetBool("attach")
interactive, _ := cmd.Flags().GetBool("interactive")
opts := yaman.StartOpts{
Attach: attach,
Interactive: interactive,
}
result, err := yaman.Start(rootDir, args[0], opts)
if err != nil {
return err
}
os.Exit(result.ExitStatus)
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/container/stop.go | Go | package container
import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "stop <container> [<container>...]",
Short: "Stop one or more containers",
Run: cli.HandleErrors(stop),
Args: cobra.MinimumNArgs(1),
ValidArgsFunction: completeContainerIds,
}
containerCommand.AddCommand(cmd)
}
func stop(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
for _, id := range args {
if err := yaman.Stop(rootDir, id); err != nil {
logrus.WithFields(logrus.Fields{
"id": id,
"error": err,
}).Debug("failed to delete container")
cli.PrintUserError(err)
}
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/image/image.go | Go | package image
import "github.com/spf13/cobra"
var imageCommand = &cobra.Command{
Use: "image",
Aliases: []string{"i"},
Short: "Manage images",
}
func Register(rootCmd *cobra.Command) {
rootCmd.AddCommand(imageCommand)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/image/list.go | Go | package image
import (
"fmt"
"os"
"text/tabwriter"
"time"
"github.com/docker/go-units"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman"
)
func init() {
cmd := &cobra.Command{
Use: "list",
Aliases: []string{"ls"},
Short: "List images",
Run: cli.HandleErrors(list),
Args: cobra.NoArgs,
}
imageCommand.AddCommand(cmd)
}
func list(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
list, err := yaman.ListImages(rootDir)
if err != nil {
return err
}
w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0)
fmt.Fprint(w, "NAME\tTAG\tCREATED\tPULLED\tREGISTRY\n")
for _, img := range list {
fmt.Fprintf(
w, "%s\t%s\t%s\t%s ago\t%s\n",
img.Name,
img.Version,
img.Created.Format(time.RFC3339),
units.HumanDuration(time.Since(img.Pulled)),
img.Registry,
)
}
return w.Flush()
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/image/pull.go | Go | package image
import (
"os"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/cli"
"github.com/willdurand/containers/internal/yaman/image"
"github.com/willdurand/containers/internal/yaman/registry"
)
func init() {
cmd := &cobra.Command{
Use: "pull <image>",
Short: "Pull an image from a registry",
Run: cli.HandleErrors(pull),
Args: cobra.ExactArgs(1),
}
imageCommand.AddCommand(cmd)
}
func pull(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
img, err := image.New(rootDir, args[0])
if err != nil {
return err
}
opts := registry.PullOpts{
Policy: registry.PullAlways,
Output: os.Stdout,
}
if err := registry.Pull(img, opts); err != nil {
return err
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
cmd/yaman/main.go | Go | // Yet Another (Container) MANager.
package main
import (
"github.com/willdurand/containers/cmd/yaman/container"
"github.com/willdurand/containers/cmd/yaman/image"
"github.com/willdurand/containers/internal/cli"
)
const (
programName string = "yaman"
)
// rootCmd represents the root command.
var rootCmd = cli.NewRootCommand(
programName,
"Yet another daemonless container manager",
)
func init() {
image.Register(rootCmd)
container.Register(rootCmd)
}
func main() {
cli.Execute(rootCmd)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
extras/docker/hello-world/hello.c | C | #include <unistd.h>
const char message[] =
"\n"
"Hello from @willdurand!\n"
"\n"
"This message shows that your installation appears to be working correctly\n"
"(but that might be a lie because this is bleeding edge technology).\n"
"\n"
"To generate this message, Yaman took the following steps:\n"
" 1. Yaman pulled the \"willdurand/hello-world\" image from the Docker Hub.\n"
" 2. Yaman created a new container from that image which runs the executable\n"
" that produces the output you are currently reading. Under the hood,\n"
" a \"shim\" named Yacs has been executed. This is the tool responsible\n"
" for monitoring the container (which was created by a third tool: Yacr,\n"
" an \"OCI runtime\").\n"
" 3. Yaman connected to the container output (via the shim), which sent it\n"
" to your terminal. Amazing, right?\n"
"\n"
"To try something more ambitious, you can run an Alpine container with:\n"
" $ sudo yaman c run -it docker.io/library/alpine sh\n"
"\n"
"That's basically it because this is a learning project :D\n"
"\n"
"For more examples and ideas, visit:\n"
" https://github.com/willdurand/containers\n"
"\n";
int main() {
write(1, message, sizeof(message) - 1);
return 0;
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
extras/microvm/init.c | C | #include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mount.h>
#include <sys/reboot.h>
#include <sys/stat.h>
#include <termios.h>
#include <unistd.h>
static const char *MV_ENV_VARS[] = {"MV_INIT", "MV_HOSTNAME", "MV_DEBUG",
"MV_TTY", NULL};
static const char *BIN_SH = "/bin/sh";
static void pr_debug(const char *fmt, ...) {
if (strcmp(getenv("MV_DEBUG"), "1") != 0) {
return;
}
printf("init: ");
va_list arg;
va_start(arg, fmt);
vprintf(fmt, arg);
va_end(arg);
printf("\n");
}
static void cleanup_env() {
const char **env_var = MV_ENV_VARS;
while (*env_var != NULL) {
unsetenv(*env_var);
env_var++;
}
}
int main(int argc, char *argv[]) {
if (mkdir("/proc", 0555) != 0 && errno != EEXIST) {
perror("mkdir: /proc");
return 1;
}
if (mount("proc", "/proc", "proc", 0, NULL) != 0) {
perror("mount: /proc");
return 1;
}
if (mkdir("/dev", 0755) != 0 && errno != EEXIST) {
perror("mkdir: /dev/pts");
return 1;
}
if (mkdir("/dev/pts", 0620) != 0 && errno != EEXIST) {
perror("mkdir: /dev/pts");
return 1;
}
if (mount("devpts", "/dev/pts", "devpts", MS_NOSUID | MS_NOEXEC, NULL) != 0) {
perror("mount: /dev/pts");
return 1;
}
if (mkdir("/dev/shm", 0777) != 0 && errno != EEXIST) {
perror("mkdir: /dev/shm");
return 1;
}
if (mount("shm", "/dev/shm", "tmpfs", MS_NOSUID | MS_NOEXEC | MS_NODEV,
NULL) != 0) {
perror("mount: /dev/shm");
return 1;
}
char *hostname = getenv("MV_HOSTNAME");
if (hostname) {
pr_debug("sethostname: %s", hostname);
sethostname(hostname, strlen(hostname));
}
char *init = getenv("MV_INIT");
if (!init) {
init = (char *)BIN_SH;
}
argv[0] = init;
pr_debug("execvp: argc=%d argv0=%s", argc, argv[0]);
if (strcmp(getenv("MV_TTY"), "1") == 0) {
setsid();
int fd = open("/dev/hvc0", O_RDWR);
if (fd < 0) {
perror("open: /dev/hvc0");
return 1;
}
if (!isatty(fd)) {
perror("isatty: /dev/hvc0");
return 1;
}
dup2(fd, 0);
dup2(fd, 1);
dup2(fd, 2);
while (fd > 2) {
close(fd--);
}
// This should fix the following error:
//
// /bin/sh: can't access tty; job control turned off
//
ioctl(0, TIOCSCTTY, 1);
} else {
// Disable ECHO
struct termios term;
tcgetattr(0, &term);
term.c_lflag &= ~ECHO;
tcsetattr(0, 0, &term);
printf("init: ready\n");
}
cleanup_env();
return execvp(argv[0], argv);
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/cli/cli.go | Go | // Package cli contains the common features used to build CLI applications.
package cli
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/writer"
"github.com/spf13/cobra"
"github.com/willdurand/containers/internal/version"
)
// NewRootCommand creates a new root (base) command for a program. The caller is
// responsible for assigning other properties when needed.
func NewRootCommand(programName, shortDescription string) *cobra.Command {
rootCmd := &cobra.Command{
Use: programName,
Short: shortDescription,
Version: version.Version(),
CompletionOptions: cobra.CompletionOptions{
DisableDefaultCmd: false,
HiddenDefaultCmd: true,
DisableDescriptions: true,
},
PersistentPreRunE: makeRootPreRunE(programName),
}
rootCmd.PersistentFlags().String("root", getDefaultRootDir(programName), "root directory")
rootCmd.PersistentFlags().String("log", "", `path to the log file (default "/dev/stderr")`)
rootCmd.PersistentFlags().String("log-level", "warn", "set the logging level")
rootCmd.PersistentFlags().String("log-format", "", `set the loging format (default "text")`)
rootCmd.PersistentFlags().Bool("debug", false, "enable debug logging")
return rootCmd
}
func Execute(cmd *cobra.Command) {
if err := cmd.Execute(); err != nil {
exit(err)
}
}
func HandleErrors(f func(cmd *cobra.Command, args []string) error) func(cmd *cobra.Command, args []string) {
return func(cmd *cobra.Command, args []string) {
if err := f(cmd, args); err != nil {
logrus.Error(err)
if !logToStderr() {
PrintUserError(err)
}
exit(err)
}
}
}
func PrintUserError(err error) {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
}
func exit(err error) {
if exitCodeError, ok := err.(ExitCodeError); ok {
os.Exit(exitCodeError.ExitCode)
}
os.Exit(125)
}
// logToStderr returns true when the logger is configured to write to stderr,
// false otherwise.
func logToStderr() bool {
l, ok := logrus.StandardLogger().Out.(*os.File)
return ok && l.Fd() == os.Stderr.Fd()
}
// makeRootPreRunE creates a `PersistentPreRunE()` function that should be used
// on root commands to configure the logger and the program's root directory.
func makeRootPreRunE(programName string) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
rootDir, _ := cmd.Flags().GetString("root")
if err := makeRootDir(rootDir); err != nil {
return err
}
switch logFormat, _ := cmd.Flags().GetString("log-format"); logFormat {
case "", "text":
// do nothing
case "json":
logrus.SetFormatter(&logrus.JSONFormatter{})
default:
return fmt.Errorf("unsupported log format '%s'", logFormat)
}
if logFile, _ := cmd.Flags().GetString("log"); logFile != "" {
out, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return err
}
logrus.SetOutput(out)
}
logLevel, _ := cmd.Flags().GetString("log-level")
level, err := logrus.ParseLevel(logLevel)
if err != nil {
level = logrus.WarnLevel
}
logrus.SetLevel(level)
if debug, _ := cmd.Flags().GetBool("debug"); debug {
out, err := os.OpenFile(filepath.Join(rootDir, "debug.log"), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return err
}
logrus.SetLevel(logrus.DebugLevel)
logrus.AddHook(&writer.Hook{
Writer: out,
LogLevels: logrus.AllLevels,
})
logrus.WithFields(logrus.Fields{
"program": programName,
"args": os.Args,
}).Debug("invoking command")
}
return nil
}
}
func makeRootDir(rootDir string) error {
if _, err := os.Stat(rootDir); errors.Is(err, fs.ErrNotExist) {
if err := os.MkdirAll(rootDir, 0o700); err != nil {
return err
}
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
if xdgRuntimeDir != "" && strings.HasPrefix(rootDir, xdgRuntimeDir) {
// $XDG_RUNTIME_DIR defines the base directory relative to which
// user-specific non-essential runtime files and other file objects
// (such as sockets, named pipes, ...) should be stored. The
// directory MUST be owned by the user, and he MUST be the only one
// having read and write access to it. Its Unix access mode MUST be
// 0700. [...] Files in this directory MAY be subjected to periodic
// clean-up. To ensure that your files are not removed, they should
// have their access time timestamp modified at least once every 6
// hours of monotonic time or the 'sticky' bit should be set on the
// file.
err := os.Chmod(rootDir, os.FileMode(0o700)|os.ModeSticky)
if err != nil {
return err
}
}
}
return nil
}
func getDefaultRootDir(programName string) string {
rootDir := filepath.Join("/run", programName)
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
if xdgRuntimeDir != "" && os.Getuid() != 0 {
rootDir = filepath.Join(xdgRuntimeDir, programName)
}
envVar := fmt.Sprintf("%s_ROOT_DIR", strings.ToUpper(programName))
if dir := os.Getenv(envVar); dir != "" {
rootDir = dir
}
return rootDir
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/cli/error.go | Go | package cli
type ExitCodeError struct {
Message string
ExitCode int
}
func (e ExitCodeError) Error() string {
return e.Message
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/cmd/cmd.go | Go | package cmd
import (
"fmt"
"os/exec"
"strings"
)
// Run runs a given command and tries to return more meaningful information when
// something goes wrong.
func Run(c *exec.Cmd) error {
if _, err := c.Output(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
return fmt.Errorf("%s: %w", strings.TrimSuffix(string(exitError.Stderr), "\n"), err)
}
return err
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/constants/constants.go | Go | // Package constants provides common constant definitions.
package constants
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/constants/state.go | Go | package constants
const (
// StateCreating indicates that the container is being created.
StateCreating string = "creating"
// StateCreated indicates that the runtime has finished the create operation.
StateCreated string = "created"
// StateRunning indicates that the container process has executed the
// user-specified program but has not exited.
StateRunning string = "running"
// StateStopped indicates that the container process has exited.
StateStopped string = "stopped"
)
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/logs/errors.go | Go | package logs
import (
"bytes"
"encoding/json"
"errors"
"io/ioutil"
"os"
)
// GetBetterError parses a log file in order to return an error that is more
// informative than the default one passed as a second argument.
func GetBetterError(logFilePath string, defaultError error) error {
logFile, err := os.Open(logFilePath)
if err != nil {
return defaultError
}
defer logFile.Close()
// TODO: do not read the entire file, we only need the more recent lines
// (last 10 probably).
data, err := ioutil.ReadAll(logFile)
if err != nil {
return defaultError
}
// We parse each log line, starting with the most recents first.
lines := bytes.Split(data, []byte("\n"))
for i := len(lines) - 1; i >= 0; i-- {
log := make(map[string]string)
if err := json.Unmarshal(lines[i], &log); err != nil {
continue
}
if log["level"] != "error" {
continue
}
msg := log["msg"]
if msg == "" || msg == "exit status 1" {
continue
}
return errors.New(msg)
}
return defaultError
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/microvm/container/container.go | Go | package container
import (
"fmt"
"path/filepath"
"strings"
"github.com/willdurand/containers/internal/runtime"
)
type MicrovmContainer struct {
*runtime.BaseContainer
}
// kernelPath is the path to the kernel binary on the host. This path should be
// kept in sync with the `make -C microvm install_kernel` command.
var kernelPath = "/usr/lib/microvm/vmlinux"
func New(rootDir string, id string, bundleDir string) (*MicrovmContainer, error) {
base, err := runtime.New(rootDir, id, bundleDir)
return &MicrovmContainer{base}, err
}
func LoadWithBundleConfig(rootDir string, id string) (*MicrovmContainer, error) {
base, err := runtime.LoadWithBundleConfig(rootDir, id)
return &MicrovmContainer{base}, err
}
func (c *MicrovmContainer) PipePath() string {
return filepath.Join(c.BaseDir, "virtiocon0")
}
func (c *MicrovmContainer) PipePathIn() string {
return fmt.Sprintf("%s.in", c.PipePath())
}
func (c *MicrovmContainer) PipePathOut() string {
return fmt.Sprintf("%s.out", c.PipePath())
}
func (c *MicrovmContainer) InitDirPath() string {
return filepath.Join(c.Rootfs(), "sbin")
}
func (c *MicrovmContainer) InitFilePath() string {
return filepath.Join(c.InitDirPath(), "init")
}
func (c *MicrovmContainer) VirtiofsdSocketPath() string {
return filepath.Join(c.BaseDir, "vfsd.sock")
}
func (c *MicrovmContainer) ArgsForQEMU(pidFile string, debug, tty bool) []string {
return []string{
"-M", "microvm",
"-m", "512m",
"-no-acpi", "-no-reboot", "-no-user-config", "-nodefaults", "-display", "none",
"-device", "virtio-serial-device",
"-device", "virtconsole,chardev=virtiocon0",
"-chardev", fmt.Sprintf("socket,id=virtiofs0,path=%s", c.VirtiofsdSocketPath()),
"-device", "vhost-user-fs-device,queue-size=1024,chardev=virtiofs0,tag=/dev/root",
"-kernel", kernelPath,
"-object", fmt.Sprintf("memory-backend-memfd,id=mem,size=%s,share=on", "512m"),
"-numa", "node,memdev=mem",
"-pidfile", pidFile, "-daemonize",
"-append", c.appendLine(debug, tty),
}
}
func (c *MicrovmContainer) appendLine(debug, tty bool) string {
args := []string{
// Issue a keyboard controller reset to reboot. It's fine to reboot
// because we pass `-no-reboot` to QEMU.
"reboot=k",
// We use virtio-fs for the root filesystem.
"rootfstype=virtiofs", "root=/dev/root", "rw",
// `hvc0` is the virtio-console configured when we start QEMU.
"console=hvc0",
}
if debug {
args = append(args, "MV_DEBUG=1")
} else {
args = append(args, "quiet", "MV_DEBUG=0")
}
if tty {
args = append(args, "MV_TTY=1")
} else {
args = append(args, "MV_TTY=0")
}
args = append(
args,
fmt.Sprintf("MV_HOSTNAME=%s", c.Spec.Hostname),
fmt.Sprintf("MV_INIT=%s", strings.Join(c.Spec.Process.Args, " ")),
)
return strings.Join(append(args, c.Spec.Process.Env...), " ")
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/microvm/create.go | Go | package microvm
import (
"bytes"
"errors"
"fmt"
"io/fs"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"github.com/creack/pty"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/microvm/container"
"golang.org/x/sys/unix"
_ "embed"
)
type CreateOpts struct {
PidFile string
ConsoleSocket string
Debug bool
}
//go:embed init
var initBinary []byte
func Create(rootDir, containerId, bundle string, opts CreateOpts) error {
container, err := container.New(rootDir, containerId, bundle)
if err != nil {
return err
}
if opts.PidFile == "" {
opts.PidFile = filepath.Join(container.BaseDir, "container.pid")
}
// Prepare the VM root filesystem: we mainly want to install our own
// `init(1)` executable.
if err := os.Remove(container.InitFilePath()); err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
if err := os.MkdirAll(container.InitDirPath(), 0o755); err != nil {
return err
}
if err := ioutil.WriteFile(container.InitFilePath(), initBinary, 0o755); err != nil {
return err
}
// We need the container base directory created for `virtiofsd` and `qemu`.
if err := container.Save(); err != nil {
return err
}
// We use `virtiofsd` to "mount" the root filesystem in the VM.
virtiofsd, err := exec.LookPath("virtiofsd")
if err != nil {
return err
}
virtiofsdCmd := exec.Command(
virtiofsd,
"--syslog",
"--socket-path", container.VirtiofsdSocketPath(),
"--shared-dir", container.Rootfs(),
"--cache", "never",
"--sandbox", "none",
)
// Only useful when `--syslog` isn't specified above.
virtiofsdCmd.Stderr = os.Stderr
logrus.WithField("command", virtiofsdCmd.String()).Debug("starting virtiofsd")
if err := virtiofsdCmd.Start(); err != nil {
return fmt.Errorf("virtiofsd: %w", err)
}
defer virtiofsdCmd.Process.Release()
qemu, err := exec.LookPath("qemu-system-x86_64")
if err != nil {
return err
}
useTTY := opts.ConsoleSocket != ""
qemuCmd := exec.Command(qemu, container.ArgsForQEMU(opts.PidFile, opts.Debug, useTTY)...)
if useTTY {
pty, tty, err := pty.Open()
if err != nil {
return fmt.Errorf("failed to create pty: %w", err)
}
defer pty.Close()
defer tty.Close()
// Connect to the socket in order to send the pty file descriptor.
conn, err := net.Dial("unix", opts.ConsoleSocket)
if err != nil {
return err
}
defer conn.Close()
uc, ok := conn.(*net.UnixConn)
if !ok {
return errors.New("failed to cast unix socket")
}
defer uc.Close()
// Send file descriptor over socket.
oob := unix.UnixRights(int(pty.Fd()))
uc.WriteMsgUnix([]byte(pty.Name()), oob, nil)
qemuCmd.Args = append(
qemuCmd.Args,
"-chardev", fmt.Sprintf("tty,path=%s,id=virtiocon0", tty.Name()),
)
} else {
for _, p := range []string{container.PipePathIn(), container.PipePathOut()} {
if err := unix.Mkfifo(p, 0o600); err != nil && !errors.Is(err, fs.ErrExist) {
return err
}
}
qemuCmd.Args = append(
qemuCmd.Args,
"-chardev", fmt.Sprintf("pipe,path=%s,id=virtiocon0", container.PipePath()),
)
}
logrus.WithField("command", qemuCmd.String()).Debug("starting QEMU")
if err := qemuCmd.Run(); err != nil {
return fmt.Errorf("qemu: %w", err)
}
if !useTTY {
// If we do not have a console socket, we'll have to spawn a process to
// redirect the microvm IOs (using the named pipes created above and the
// host standard streams).
self, err := os.Executable()
if err != nil {
return err
}
redirectCmd := exec.Command(self, "--root", rootDir, "redirect-stdio", containerId)
if opts.Debug {
redirectCmd.Args = append(redirectCmd.Args, "--debug")
}
redirectCmd.Stdin = os.Stdin
redirectCmd.Stderr = os.Stderr
redirectCmd.Stdout = os.Stdout
// We need to save the container so that the `redirect-stdio` command can
// load it.
container.Save()
logrus.WithField("command", redirectCmd.String()).Debug("start redirect-stdio process")
if err := redirectCmd.Start(); err != nil {
return err
}
defer redirectCmd.Process.Release()
}
data, err := ioutil.ReadFile(opts.PidFile)
if err != nil {
return err
}
pid, err := strconv.Atoi(string(bytes.TrimSpace(data)))
if err != nil {
return err
}
container.SetPid(pid)
// Overwrite the pid file because QEMU adds a newline, which breaks the
// `strconv.Atoi()` call in `containerd`...
if err := ioutil.WriteFile(opts.PidFile, bytes.TrimSpace(data), 0o644); err != nil {
return err
}
return container.SaveAsCreated()
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/microvm/kill.go | Go | package microvm
import (
"fmt"
"syscall"
"github.com/willdurand/containers/internal/microvm/container"
)
func Kill(rootDir, containerId string, signal syscall.Signal) error {
container, err := container.LoadWithBundleConfig(rootDir, containerId)
if err != nil {
return err
}
if !container.IsCreated() && !container.IsRunning() {
return fmt.Errorf("unexpected status '%s' for container '%s'", container.State.Status, container.ID())
}
if container.State.Pid != 0 {
if err := syscall.Kill(container.State.Pid, signal); err != nil {
return fmt.Errorf("failed to send signal '%d' to container '%s': %w", signal, container.ID(), err)
}
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/microvm/start.go | Go | package microvm
import (
"fmt"
"github.com/willdurand/containers/internal/constants"
"github.com/willdurand/containers/internal/microvm/container"
)
func Start(rootDir, containerId string) error {
container, err := container.LoadWithBundleConfig(rootDir, containerId)
if err != nil {
return err
}
if !container.IsCreated() {
return fmt.Errorf("start: unexpected status '%s' for container '%s'", container.State.Status, container.ID())
}
return container.UpdateStatus(constants.StateRunning)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/microvm/state.go | Go | package microvm
import (
"encoding/json"
"io"
"github.com/willdurand/containers/internal/microvm/container"
)
func State(rootDir, containerId string, w io.Writer) error {
container, err := container.LoadWithBundleConfig(rootDir, containerId)
if err != nil {
return err
}
if err := json.NewEncoder(w).Encode(container.State); err != nil {
return err
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/runtime/container.go | Go | package runtime
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"time"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/willdurand/containers/internal/constants"
)
type BaseContainer struct {
Spec runtimespec.Spec
State runtimespec.State
CreatedAt time.Time
BaseDir string
StateFilePath string
}
func New(rootDir string, id string, bundleDir string) (*BaseContainer, error) {
containerDir := filepath.Join(rootDir, id)
if bundleDir != "" {
if _, err := os.Stat(containerDir); err == nil {
return nil, fmt.Errorf("container '%s' already exists", id)
}
if !filepath.IsAbs(bundleDir) {
absoluteDir, err := filepath.Abs(bundleDir)
if err != nil {
return nil, err
}
bundleDir = absoluteDir
}
}
spec, err := LoadSpec(bundleDir)
if bundleDir != "" && err != nil {
return nil, err
}
return &BaseContainer{
Spec: spec,
State: runtimespec.State{
Version: runtimespec.Version,
ID: id,
Status: constants.StateCreating,
Bundle: bundleDir,
},
CreatedAt: time.Now(),
BaseDir: containerDir,
StateFilePath: filepath.Join(containerDir, "state.json"),
}, nil
}
func Load(rootDir string, id string) (*BaseContainer, error) {
container, err := New(rootDir, id, "")
if err != nil {
return container, err
}
if err := container.loadContainerState(); err != nil {
if errors.Is(err, fs.ErrNotExist) {
return container, fmt.Errorf("container '%s' does not exist", id)
}
return container, err
}
if err := container.refreshContainerState(); err != nil {
return container, err
}
return container, nil
}
func LoadWithBundleConfig(rootDir string, id string) (*BaseContainer, error) {
// Create a new container without bundle, which will create the container
// state *without* the OCI bundle configuration. This is OK because we are
// going to load the state right after, which will contain the path to the
// bundle. From there, we'll be able to load the bundle config.
container, err := Load(rootDir, id)
if err != nil {
return container, err
}
spec, err := LoadSpec(container.State.Bundle)
if err != nil {
return container, err
}
container.Spec = spec
return container, nil
}
func (c *BaseContainer) ID() string {
return c.State.ID
}
func (c *BaseContainer) IsCreated() bool {
return c.State.Status == constants.StateCreated
}
func (c *BaseContainer) IsRunning() bool {
return c.State.Status == constants.StateRunning
}
func (c *BaseContainer) IsStopped() bool {
return c.State.Status == constants.StateStopped
}
func (c *BaseContainer) Rootfs() string {
rootfs := c.Spec.Root.Path
if !filepath.IsAbs(rootfs) {
rootfs = filepath.Join(c.State.Bundle, rootfs)
}
return rootfs
}
func (c *BaseContainer) UpdateStatus(newStatus string) error {
c.State.Status = newStatus
return c.Save()
}
func (c *BaseContainer) Save() error {
if err := os.MkdirAll(c.BaseDir, 0o755); err != nil {
return fmt.Errorf("failed to create container directory: %w", err)
}
if err := c.saveContainerState(); err != nil {
return err
}
return nil
}
func (c *BaseContainer) SetPid(pid int) {
c.State.Pid = pid
}
func (c *BaseContainer) SaveAsCreated() error {
return c.UpdateStatus(constants.StateCreated)
}
func (c *BaseContainer) Destroy() error {
if err := os.RemoveAll(c.BaseDir); err != nil {
return err
}
return nil
}
func (c *BaseContainer) loadContainerState() error {
data, err := ioutil.ReadFile(c.StateFilePath)
if err != nil {
return fmt.Errorf("failed to read state.json: %w", err)
}
if err := json.Unmarshal(data, &c.State); err != nil {
return fmt.Errorf("failed to parse state.json: %w", err)
}
return nil
}
func (c *BaseContainer) refreshContainerState() error {
if c.State.Pid == 0 || c.IsStopped() {
return nil
}
data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", c.State.Pid))
// One character from the string "RSDZTW" where R is running, S is sleeping in an interruptible wait, D is waiting in uninterruptible disk sleep, Z is zombie, T is traced or stopped (on a signal), and W is paging.
if err != nil || bytes.SplitN(data, []byte{' '}, 3)[2][0] == 'Z' {
return c.UpdateStatus(constants.StateStopped)
}
return nil
}
func (c *BaseContainer) saveContainerState() error {
data, err := json.Marshal(c.State)
if err != nil {
return fmt.Errorf("failed to serialize container state: %w", err)
}
if err := ioutil.WriteFile(c.StateFilePath, data, 0o644); err != nil {
return fmt.Errorf("failed to save container state: %w", err)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/runtime/runtime.go | Go | package runtime
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/willdurand/containers/internal/user"
)
func LoadSpec(bundleDir string) (runtimespec.Spec, error) {
var spec runtimespec.Spec
data, err := ioutil.ReadFile(filepath.Join(bundleDir, "config.json"))
if err != nil {
return spec, fmt.Errorf("failed to read config.json: %w", err)
}
if err := json.Unmarshal(data, &spec); err != nil {
return spec, fmt.Errorf("failed to parse config.json: %w", err)
}
if !strings.HasPrefix(spec.Version, "1.0") {
return spec, fmt.Errorf("unsupported runtime configuration version '%s'", spec.Version)
}
return spec, nil
}
func BaseSpec(rootfs string, rootless bool) (*runtimespec.Spec, error) {
mounts := []runtimespec.Mount{
{
Destination: "/proc",
Type: "proc",
Source: "proc",
},
{
Destination: "/dev",
Type: "tmpfs",
Source: "tmpfs",
Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
},
{
Destination: "/dev/pts",
Type: "devpts",
Source: "devpts",
Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"},
},
{
Destination: "/dev/shm",
Type: "tmpfs",
Source: "shm",
Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
},
{
Destination: "/dev/mqueue",
Type: "mqueue",
Source: "mqueue",
Options: []string{"nosuid", "noexec", "nodev"},
},
}
resources := &runtimespec.LinuxResources{
Devices: []runtimespec.LinuxDeviceCgroup{
{
Allow: false,
Access: "rwm",
},
},
}
namespaces := []runtimespec.LinuxNamespace{
{Type: "ipc"},
{Type: "mount"},
{Type: "network"},
{Type: "pid"},
{Type: "uts"},
}
uidMappings := []runtimespec.LinuxIDMapping{}
gidMappings := []runtimespec.LinuxIDMapping{}
if rootless {
mounts = append(mounts,
runtimespec.Mount{
Destination: "/sys",
Type: "none",
Source: "/sys",
Options: []string{"rbind", "nosuid", "noexec", "nodev", "ro"},
},
)
// No resources in rootless mode.
resources = nil
namespaces = append(namespaces, runtimespec.LinuxNamespace{Type: "user"})
uid, err := user.GetSubUid()
if err != nil {
return nil, err
}
uidMappings = append(
uidMappings,
runtimespec.LinuxIDMapping{
ContainerID: 0,
HostID: uint32(os.Getuid()),
Size: 1,
},
runtimespec.LinuxIDMapping{
ContainerID: 1,
HostID: uint32(uid.ID),
Size: uint32(uid.Size),
},
)
gid, err := user.GetSubGid()
if err != nil {
return nil, err
}
gidMappings = append(
gidMappings,
runtimespec.LinuxIDMapping{
ContainerID: 0,
HostID: uint32(os.Getgid()),
Size: 1,
},
runtimespec.LinuxIDMapping{
ContainerID: 1,
HostID: uint32(gid.ID),
Size: uint32(gid.Size),
},
)
}
return &runtimespec.Spec{
Version: runtimespec.Version,
Process: &runtimespec.Process{
Terminal: false,
User: runtimespec.User{
UID: 0,
GID: 0,
},
Args: []string{"sleep", "100"},
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
Cwd: "/",
Capabilities: &runtimespec.LinuxCapabilities{
Bounding: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
Effective: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
Inheritable: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
Permitted: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
Ambient: []string{
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE",
},
},
Rlimits: []runtimespec.POSIXRlimit{
{
Type: "RLIMIT_NOFILE",
Hard: 1024,
Soft: 1024,
},
},
NoNewPrivileges: true,
},
Root: &runtimespec.Root{
Path: rootfs,
},
Hostname: "container",
Mounts: mounts,
Linux: &runtimespec.Linux{
Resources: resources,
UIDMappings: uidMappings,
GIDMappings: gidMappings,
Namespaces: namespaces,
MaskedPaths: []string{
"/proc/acpi",
"/proc/asound",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/sys/firmware",
"/proc/scsi",
},
ReadonlyPaths: []string{
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger",
},
},
}, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/user/user.go | Go | package user
import (
"bufio"
"errors"
"io"
"os"
"os/user"
"strconv"
"strings"
)
var (
ErrInvalidEntry = errors.New("invalid entry in subordinate file")
ErrNotFound = errors.New("no subordinate IDs found for current user")
)
// SubordinateID represents a range of subordinate (user or group) IDs.
//
// See: https://man7.org/linux/man-pages/man5/subuid.5.html
// See: https://man7.org/linux/man-pages/man5/subgid.5.html
type SubordinateID struct {
User string
ID int
Size int
}
func ParseSubUid() ([]SubordinateID, error) {
file, err := os.Open("/etc/subuid")
if err != nil {
return []SubordinateID{}, err
}
defer file.Close()
return parseSubordinateIDs(file)
}
func ParseSubGid() ([]SubordinateID, error) {
file, err := os.Open("/etc/subgid")
if err != nil {
return []SubordinateID{}, err
}
defer file.Close()
return parseSubordinateIDs(file)
}
// GetSubUid returns the range of subordinate IDs for the current user.
func GetSubUid() (SubordinateID, error) {
ids, err := ParseSubUid()
if err != nil {
return SubordinateID{}, err
}
user, err := user.Current()
if err != nil {
return SubordinateID{}, err
}
for _, id := range ids {
if user.Username == id.User || user.Uid == id.User {
return id, nil
}
}
return SubordinateID{}, ErrNotFound
}
// GetSubGid returns the range of subordinate IDs for the current user.
func GetSubGid() (SubordinateID, error) {
ids, err := ParseSubGid()
if err != nil {
return SubordinateID{}, err
}
user, err := user.Current()
if err != nil {
return SubordinateID{}, err
}
for _, id := range ids {
if user.Username == id.User || user.Gid == id.User {
return id, nil
}
}
return SubordinateID{}, ErrNotFound
}
func parseSubordinateIDs(r io.Reader) ([]SubordinateID, error) {
var ids []SubordinateID
scanner := bufio.NewScanner(r)
for scanner.Scan() {
parts := strings.Split(scanner.Text(), ":")
if len(parts) != 3 {
return ids, ErrInvalidEntry
}
if parts[0] == "" {
return ids, ErrInvalidEntry
}
id, err := strconv.Atoi(parts[1])
if err != nil {
return ids, ErrInvalidEntry
}
size, err := strconv.Atoi(parts[2])
if err != nil {
return ids, ErrInvalidEntry
}
ids = append(ids, SubordinateID{
User: parts[0],
ID: id,
Size: size,
})
}
return ids, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/user/user_test.go | Go | package user
import (
"bytes"
"testing"
)
func TestParseSubordinateIDs(t *testing.T) {
var buffer bytes.Buffer
buffer.WriteString("vagrant:100000:65536\n")
buffer.WriteString("ubuntu:165536:65536\n")
ids, err := parseSubordinateIDs(&buffer)
if err != nil {
t.Error("failed to parse subuid")
}
if len(ids) != 2 {
t.Errorf("expected %d IDs, got: %d", 2, len(ids))
}
for i, expected := range []SubordinateID{
{User: "vagrant", ID: 100000, Size: 65536},
{User: "ubuntu", ID: 165536, Size: 65536},
} {
if ids[i].User != expected.User {
t.Errorf("expected: %s, got: %s", expected.User, ids[i].User)
}
if ids[i].ID != expected.ID {
t.Errorf("expected: %d, got: %d", expected.ID, ids[i].ID)
}
if ids[i].Size != expected.Size {
t.Errorf("expected: %d, got: %d", expected.Size, ids[i].Size)
}
}
}
func TestParseSubordinateIDsEmptyFile(t *testing.T) {
var buffer bytes.Buffer
ids, err := parseSubordinateIDs(&buffer)
if err != nil {
t.Error("failed to parse subuid")
}
if len(ids) != 0 {
t.Errorf("expected %d IDs, got: %d", 0, len(ids))
}
}
func TestParseSubordinateIDsInvalidFile(t *testing.T) {
for _, tc := range []struct {
content string
err error
}{
{content: "vagrant:\n", err: ErrInvalidEntry},
{content: "::\n", err: ErrInvalidEntry},
{content: "vagrant::\n", err: ErrInvalidEntry},
{content: "vagrant:1:\n", err: ErrInvalidEntry},
} {
var buffer bytes.Buffer
buffer.WriteString(tc.content)
_, err := parseSubordinateIDs(&buffer)
if err != tc.err {
t.Errorf("expected error: %v, got: %v", tc.err, err)
}
}
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/version/version.go | Go | // Package version provides a function to print the version of this project,
// and it is usually exposed in a `version` command for each CLI application.
package version
import (
"runtime"
"strings"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
)
var (
// VersionString represents the project's version.
VersionString string = "0.4.0-dev"
// GitCommit is the hash of the git commit at build time. It is set by the Makefile.
GitCommit string = "n/a"
)
// Version returns a formatted string with version information (like git commit,
// OCI specification an go versions).
func Version() string {
return strings.Join([]string{
VersionString,
"commit: " + GitCommit,
"spec: " + runtimespec.Version,
"go: " + runtime.Version(),
}, "\n")
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/container/container.go | Go | package container
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/runtime"
"github.com/willdurand/containers/internal/yacr/ipc"
)
type YacrContainer struct {
*runtime.BaseContainer
}
func New(rootDir string, id string, bundleDir string) (*YacrContainer, error) {
base, err := runtime.New(rootDir, id, bundleDir)
return &YacrContainer{base}, err
}
func Load(rootDir string, id string) (*YacrContainer, error) {
base, err := runtime.Load(rootDir, id)
return &YacrContainer{base}, err
}
func LoadWithBundleConfig(rootDir string, id string) (*YacrContainer, error) {
base, err := runtime.LoadWithBundleConfig(rootDir, id)
return &YacrContainer{base}, err
}
func LoadFromContainer(BaseDir string, id string) (*YacrContainer, error) {
container := &YacrContainer{
BaseContainer: &runtime.BaseContainer{},
}
c, err := runtime.LoadWithBundleConfig(BaseDir, id)
if err != nil {
return container, err
}
container.BaseContainer.Spec = c.Spec
container.BaseContainer.State = c.State
// See: https://github.com/opencontainers/runtime-spec/blob/a3c33d663ebc56c4d35dbceaa447c7bf37f6fab3/runtime.md#state
container.State.Pid = os.Getpid()
container.CreatedAt = c.CreatedAt
container.BaseDir = c.BaseDir
container.StateFilePath = ""
return container, nil
}
func (c *YacrContainer) GetInitSockAddr(mustExist bool) (string, error) {
initSockAddr := filepath.Join(c.BaseDir, "init.sock")
return initSockAddr, ipc.EnsureValidSockAddr(initSockAddr, mustExist)
}
func (c *YacrContainer) GetSockAddr(mustExist bool) (string, error) {
sockAddr := filepath.Join(c.BaseDir, "ipc.sock")
return sockAddr, ipc.EnsureValidSockAddr(sockAddr, mustExist)
}
func (c *YacrContainer) ExecuteHooks(name string) error {
if c.Spec.Hooks == nil {
return nil
}
hooks := map[string][]runtimespec.Hook{
"Prestart": c.Spec.Hooks.Prestart,
"CreateRuntime": c.Spec.Hooks.CreateRuntime,
"CreateContainer": c.Spec.Hooks.CreateContainer,
"StartContainer": c.Spec.Hooks.StartContainer,
"Poststart": c.Spec.Hooks.Poststart,
"Poststop": c.Spec.Hooks.Poststop,
}[name]
if len(hooks) == 0 {
logrus.WithFields(logrus.Fields{
"id": c.ID(),
"name:": name,
}).Debug("no hooks")
return nil
}
logrus.WithFields(logrus.Fields{
"id": c.ID(),
"name:": name,
"state": c.State,
"hooks": hooks,
}).Debug("executing hooks")
s, err := json.Marshal(c.State)
if err != nil {
return err
}
for _, hook := range hooks {
var stdout, stderr bytes.Buffer
cmd := exec.Cmd{
Path: hook.Path,
Args: hook.Args,
Env: hook.Env,
Stdin: bytes.NewReader(s),
Stdout: &stdout,
Stderr: &stderr,
}
if err := cmd.Run(); err != nil {
logrus.WithFields(logrus.Fields{
"id": c.ID(),
"name:": name,
"error": err,
"stderr": stderr.String(),
"stdout": stdout.String(),
}).Error("failed to execute hooks")
return fmt.Errorf("failed to execute %s hook '%s': %w", name, cmd.String(), err)
}
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/create.go | Go | package yacr
import (
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"strconv"
"syscall"
"github.com/creack/pty"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/cmd"
"github.com/willdurand/containers/internal/yacr/container"
"github.com/willdurand/containers/internal/yacr/ipc"
"golang.org/x/sys/unix"
)
type CreateOpts struct {
ID string
Bundle string
PidFile string
ConsoleSocket string
NoPivot bool
LogFile string
LogFormat string
Debug bool
}
func Create(rootDir string, opts CreateOpts) error {
if opts.Bundle == "" {
return fmt.Errorf("invalid bundle")
}
container, err := container.New(rootDir, opts.ID, opts.Bundle)
if err != nil {
return err
}
// TODO: error when there is no linux configuration
if err := container.Save(); err != nil {
return err
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
}).Debug("new container created")
// Create an initial socket that we pass to the container. When the
// container starts, it should inform the host (this process). After that,
// we discard this socket and connect to the container's socket, which is
// needed for the `start` command (at least).
initSockAddr, err := container.GetInitSockAddr(false)
if err != nil {
return err
}
initListener, err := net.Listen("unix", initSockAddr)
if err != nil {
return fmt.Errorf("listen error: %w", err)
}
defer initListener.Close()
// Prepare a command to re-execute itself in order to create the container
// process.
containerArgs := []string{
"create", "container", opts.ID,
"--root", rootDir,
"--bundle", opts.Bundle,
}
if opts.LogFile != "" {
containerArgs = append([]string{"--log", opts.LogFile}, containerArgs...)
}
if opts.LogFormat != "" {
containerArgs = append([]string{"--log-format", opts.LogFormat}, containerArgs...)
}
if opts.Debug {
containerArgs = append([]string{"--debug"}, containerArgs...)
}
if opts.NoPivot {
containerArgs = append([]string{"--no-pivot"}, containerArgs...)
}
var cloneFlags uintptr
for _, ns := range container.Spec.Linux.Namespaces {
switch ns.Type {
case runtimespec.UTSNamespace:
cloneFlags |= syscall.CLONE_NEWUTS
case runtimespec.PIDNamespace:
cloneFlags |= syscall.CLONE_NEWPID
case runtimespec.MountNamespace:
cloneFlags |= syscall.CLONE_NEWNS
case runtimespec.UserNamespace:
cloneFlags |= syscall.CLONE_NEWUSER
case runtimespec.NetworkNamespace:
cloneFlags |= syscall.CLONE_NEWNET
case runtimespec.IPCNamespace:
cloneFlags |= syscall.CLONE_NEWIPC
case runtimespec.CgroupNamespace:
logrus.Info("skipping cgroup namespace")
default:
return fmt.Errorf("unsupported namespace: %s", ns.Type)
}
}
env := os.Environ()
if cloneFlags&syscall.CLONE_NEWUSER != syscall.CLONE_NEWUSER {
// When we don't have a user namespace, there is no need to re-exec because
// we won't configure the uid/gid maps.
env = append(env, "_YACR_CONTAINER_REEXEC=1")
}
self, err := os.Executable()
if err != nil {
return fmt.Errorf("failed to retrieve executable: %w", err)
}
containerProcess := &exec.Cmd{
Path: self,
Args: append([]string{"yacr"}, containerArgs...),
SysProcAttr: &syscall.SysProcAttr{
Cloneflags: uintptr(cloneFlags),
},
Env: env,
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"process": containerProcess.String(),
}).Debug("container process configured")
if container.Spec.Process.Terminal {
// See: https://github.com/opencontainers/runc/blob/016a0d29d1750180b2a619fc70d6fe0d80111be0/docs/terminals.md#detached-new-terminal
if err := ipc.EnsureValidSockAddr(opts.ConsoleSocket, true); err != nil {
return err
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"consoleSocket": opts.ConsoleSocket,
}).Debug("start container process with pty")
ptm, err := pty.Start(containerProcess)
if err != nil {
return fmt.Errorf("failed to create container (1): %w", err)
}
defer ptm.Close()
// Connect to the socket in order to send the PTY file descriptor.
conn, err := net.Dial("unix", opts.ConsoleSocket)
if err != nil {
return fmt.Errorf("failed to dial console socket: %w", err)
}
defer conn.Close()
uc, ok := conn.(*net.UnixConn)
if !ok {
return errors.New("failed to cast unix socket")
}
defer uc.Close()
// Send file descriptor over socket.
oob := unix.UnixRights(int(ptm.Fd()))
uc.WriteMsgUnix([]byte(ptm.Name()), oob, nil)
} else {
logrus.WithFields(logrus.Fields{
"id": container.ID(),
}).Debug("start container process without pty")
containerProcess.Stdin = os.Stdin
containerProcess.Stdout = os.Stdout
containerProcess.Stderr = os.Stderr
if err := containerProcess.Start(); err != nil {
return fmt.Errorf("failed to create container (2): %w", err)
}
}
if cloneFlags&syscall.CLONE_NEWUSER == syscall.CLONE_NEWUSER {
newuidmap, err := exec.LookPath("newuidmap")
if err != nil {
return err
}
var uidMap []string
for _, m := range container.Spec.Linux.UIDMappings {
uidMap = append(uidMap, []string{
strconv.Itoa(int(m.ContainerID)),
strconv.Itoa(int(m.HostID)),
strconv.Itoa(int(m.Size)),
}...)
}
newuidmapCmd := exec.Command(newuidmap, append(
[]string{strconv.Itoa(containerProcess.Process.Pid)}, uidMap...,
)...)
logrus.WithField("command", newuidmapCmd.String()).Debug("configuring uidmap")
if err := cmd.Run(newuidmapCmd); err != nil {
return fmt.Errorf("newuidmap failed: %w", err)
}
newgidmap, err := exec.LookPath("newgidmap")
if err != nil {
return err
}
var gidMap []string
for _, m := range container.Spec.Linux.GIDMappings {
gidMap = append(gidMap, []string{
strconv.Itoa(int(m.ContainerID)),
strconv.Itoa(int(m.HostID)),
strconv.Itoa(int(m.Size)),
}...)
}
newgidmapCmd := exec.Command(newgidmap, append(
[]string{strconv.Itoa(containerProcess.Process.Pid)}, gidMap...,
)...)
logrus.WithField("command", newgidmapCmd.String()).Debug("configuring gidmap")
if err := cmd.Run(newgidmapCmd); err != nil {
return fmt.Errorf("newgidmap failed: %w", err)
}
}
// Wait until the container has "booted".
initConn, err := initListener.Accept()
if err != nil {
return fmt.Errorf("init accept error: %w", err)
}
defer initConn.Close()
if err := ipc.AwaitMessage(initConn, ipc.CONTAINER_BOOTED); err != nil {
return err
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
}).Debug("container booted")
initConn.Close()
initListener.Close()
syscall.Unlink(initSockAddr)
// Connect to the container.
sockAddr, err := container.GetSockAddr(true)
if err != nil {
return err
}
conn, err := net.Dial("unix", sockAddr)
if err != nil {
return fmt.Errorf("failed to dial container socket: %w", err)
}
defer conn.Close()
// Wait until the container reached the "before pivot_root" step so that we
// can run `CreateRuntime` hooks.
if err := ipc.AwaitMessage(conn, ipc.CONTAINER_BEFORE_PIVOT); err != nil {
return fmt.Errorf("before_pivot: %w", err)
}
containerPid := containerProcess.Process.Pid
container.SetPid(containerPid)
// Write the container PID to the pid file if supplied.
if opts.PidFile != "" {
if err := ioutil.WriteFile(opts.PidFile, []byte(strconv.Itoa(containerPid)), 0o644); err != nil {
return fmt.Errorf("failed to write to pid file: %w", err)
}
}
// Hooks to be run after the container has been created but before
// `pivot_root`.
// See: https://github.com/opencontainers/runtime-spec/blob/27924127bf391ea7691924c6dcb01f3369d69fe2/config.md#createruntime-hooks
if err := container.ExecuteHooks("CreateRuntime"); err != nil {
return err
}
// Notify the container that it can continue its initialization.
if err := ipc.SendMessage(conn, ipc.OK); err != nil {
return err
}
// Wait until the container is ready (i.e. the container waits for the
// "start" command).
if err := ipc.AwaitMessage(conn, ipc.CONTAINER_WAIT_START); err != nil {
return err
}
// Update state.
if err := container.SaveAsCreated(); err != nil {
return err
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
}).Info("container created")
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/create_container.go | Go | package yacr
import (
"errors"
"fmt"
"io/fs"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"time"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/yacr/container"
"github.com/willdurand/containers/internal/yacr/ipc"
"golang.org/x/sys/unix"
)
func CreateContainer(rootDir string, opts CreateOpts) error {
if os.Getenv("_YACR_CONTAINER_REEXEC") != "1" {
// Re-exec to take uid/gid map into account.
logrus.Debug("re-executing create container")
// HACK: Pretty sure this is going to come back to bite us in the future
// but... it works for now.
time.Sleep(50 * time.Millisecond)
env := append(os.Environ(), "_YACR_CONTAINER_REEXEC=1")
if err := syscall.Exec("/proc/self/exe", os.Args, env); err != nil {
return err
}
}
container, err := container.LoadFromContainer(rootDir, opts.ID)
if err != nil {
return err
}
initSockAddr, err := container.GetInitSockAddr(true)
if err != nil {
return err
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"initSockAddr": initSockAddr,
}).Debug("booting")
// Connect to the initial socket to tell the host (runtime) that this
// process has booted.
initConn, err := net.Dial("unix", initSockAddr)
if err != nil {
return fmt.Errorf("failed to dial init socket: %w", err)
}
defer initConn.Close()
// Create a new socket to allow communication with this container.
sockAddr, err := container.GetSockAddr(false)
if err != nil {
return err
}
listener, err := net.Listen("unix", sockAddr)
if err != nil {
return fmt.Errorf("listen error: %w", err)
}
defer listener.Close()
// Notify the host that we are alive.
if err := ipc.SendMessage(initConn, ipc.CONTAINER_BOOTED); err != nil {
return err
}
initConn.Close()
// Accept connection from the host to continue the creation of this
// container.
conn, err := listener.Accept()
if err != nil {
return fmt.Errorf("accept error: %w", err)
}
defer conn.Close()
// TODO: send errors to the host.
rootfs := container.Rootfs()
if _, err := os.Stat(rootfs); errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("rootfs does not exist: %w", err)
}
mountFlag := syscall.MS_PRIVATE
if opts.NoPivot {
mountFlag = syscall.MS_SLAVE
}
// Prevent mount propagation back to other namespaces.
if err := syscall.Mount("none", "/", "", uintptr(mountFlag|syscall.MS_REC), ""); err != nil {
return fmt.Errorf("failed to prevent mount propagation: %w", err)
}
if !opts.NoPivot {
// This seems to be needed for `pivot_root`.
if err := syscall.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("failed to bind-mount rootfs: %w", err)
}
}
mounts := container.Spec.Mounts
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"rootfs": rootfs,
"mounts": mounts,
}).Debug("mount")
for _, m := range mounts {
// Create destination if it does not exist yet.
dest := filepath.Join(rootfs, m.Destination)
if _, err := os.Stat(dest); os.IsNotExist(err) {
if err := os.MkdirAll(dest, 0o755); err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
}
// TODO: add support for all `m.Options`
flags := syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
// HACK: this is mainly used to support default "rootless" specs (created
// with `runc spec --rootless`).
if len(m.Options) > 0 && m.Options[0] == "rbind" {
m.Type = "bind"
flags |= unix.MS_REC
}
if m.Type == "bind" {
flags |= syscall.MS_BIND
}
data := ""
switch m.Destination {
case "/dev", "/run":
flags = syscall.MS_NOSUID | syscall.MS_STRICTATIME
data = "mode=755,size=65536k"
case "/dev/pts":
flags &= ^syscall.MS_NODEV
data = "newinstance,ptmxmode=0666,mode=0620"
case "/dev/shm":
data = "mode=1777,size=65536k"
case "/sys", "/sys/fs/cgroup":
flags |= syscall.MS_RDONLY
}
if err := syscall.Mount(m.Source, dest, m.Type, uintptr(flags), data); err != nil {
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"source": m.Source,
"destination": dest,
"type": m.Type,
"options": m.Options,
"error": err,
}).Error("failed to mount filesystem")
// TODO: handle `cgroup`
if !errors.Is(err, syscall.EPERM) {
return fmt.Errorf("failed to mount: %w", err)
}
}
}
// See: https://github.com/opencontainers/runtime-spec/blob/27924127bf391ea7691924c6dcb01f3369d69fe2/config-linux.md#default-devices
for _, dev := range []string{
"/dev/null",
"/dev/zero",
"/dev/full",
"/dev/random",
"/dev/urandom",
"/dev/tty",
} {
dest := filepath.Join(rootfs, dev)
f, err := os.Create(dest)
if err != nil && !errors.Is(err, fs.ErrExist) {
return fmt.Errorf("failed to create device destination: %w", err)
}
if f != nil {
f.Close()
}
if err := syscall.Mount(dev, dest, "bind", unix.MS_BIND, ""); err != nil {
return fmt.Errorf("failed to mount device: %w", err)
}
}
for _, link := range [][2]string{
{"/proc/self/fd", "/dev/fd"},
{"/proc/self/fd/0", "/dev/stdin"},
{"/proc/self/fd/1", "/dev/stdout"},
{"/proc/self/fd/2", "/dev/stderr"},
} {
src := link[0]
dst := filepath.Join(rootfs, link[1])
if err := os.Symlink(src, dst); err != nil && !errors.Is(err, fs.ErrExist) {
return fmt.Errorf("failed to create symlink: %w", err)
}
}
// if container.Spec.Process.Terminal {
// TODO: `/dev/console` is set up if terminal is enabled in the config by bind mounting the pseudoterminal pty to `/dev/console`.
// }
// TODO: create symlink for `/dev/ptmx`
// TODO: linux devices
// Notify the host that we are about to execute `pivot_root`.
if err := ipc.SendMessage(conn, ipc.CONTAINER_BEFORE_PIVOT); err != nil {
return err
}
if err := ipc.AwaitMessage(conn, ipc.OK); err != nil {
return fmt.Errorf("create: %w", err)
}
// Hooks to be run after the container has been created but before
// pivot_root or any equivalent operation has been called. These hooks MUST
// be called after the `CreateRuntime` hooks.
// See: https://github.com/opencontainers/runtime-spec/blob/27924127bf391ea7691924c6dcb01f3369d69fe2/config.md#createcontainer-hooks
if err := container.ExecuteHooks("CreateContainer"); err != nil {
logrus.WithError(err).Error("CreateContainer hook failed")
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
}).Debug("pivot root")
// Change root filesystem.
if opts.NoPivot {
if err := syscall.Chroot(rootfs); err != nil {
return fmt.Errorf("failed to change root filesystem: %w", err)
}
} else {
pivotDir := filepath.Join(rootfs, ".pivot_root")
if err := os.Mkdir(pivotDir, 0o777); err != nil {
return fmt.Errorf("failed to create '.pivot_root': %w", err)
}
if err := syscall.PivotRoot(rootfs, pivotDir); err != nil {
return fmt.Errorf("pivot_root failed: %w", err)
}
if err := syscall.Chdir("/"); err != nil {
return fmt.Errorf("chdir failed: %w", err)
}
pivotDir = filepath.Join("/", ".pivot_root")
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
return fmt.Errorf("failed to unmount '.pivot_root': %w", err)
}
os.Remove(pivotDir)
}
// Change current working directory.
if err := syscall.Chdir(container.Spec.Process.Cwd); err != nil {
return fmt.Errorf("failed to change directory: %w", err)
}
// Set up new hostname.
if err := syscall.Sethostname([]byte(container.Spec.Hostname)); err != nil {
return fmt.Errorf("failed to set hostname: %w", err)
}
// Avoid leaked file descriptors.
if err := closeExecFrom(3); err != nil {
return fmt.Errorf("failed to close exec fds: %w", err)
}
// At this point, the container has been created and when the host receives
// the message below, it will exits (success).
if err := ipc.SendMessage(conn, ipc.CONTAINER_WAIT_START); err != nil {
return err
}
conn.Close()
// Wait until the "start" command connects to this container in order start
// the container process.
conn, err = listener.Accept()
if err != nil {
return fmt.Errorf("accept error: %w", err)
}
defer conn.Close()
logrus.WithFields(logrus.Fields{
"id": container.ID(),
}).Debug("waiting for start command")
if err := ipc.AwaitMessage(conn, ipc.START_CONTAINER); err != nil {
return err
}
// Hooks to be run after the start operation is called but before the
// container process is started.
// See: https://github.com/opencontainers/runtime-spec/blob/27924127bf391ea7691924c6dcb01f3369d69fe2/config.md#startcontainer-hooks
if err := container.ExecuteHooks("StartContainer"); err != nil {
logrus.WithError(err).Error("StartContainer hook failed")
}
process := container.Spec.Process
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"processArgs": process.Args,
}).Info("executing process")
argv0, err := exec.LookPath(process.Args[0])
if err != nil {
if err := ipc.SendMessage(conn, fmt.Sprintf("failed to retrieve executable: %s", err)); err != nil {
return err
}
return err
}
if err := ipc.SendMessage(conn, ipc.OK); err != nil {
return err
}
conn.Close()
listener.Close()
if err := syscall.Exec(argv0, process.Args, process.Env); err != nil {
return fmt.Errorf("failed to exec %v: %w", process.Args, err)
}
return nil
}
func closeExecFrom(minFd int) error {
fdDir, err := os.Open("/proc/self/fd")
if err != nil {
return err
}
defer fdDir.Close()
names, err := fdDir.Readdirnames(-1)
if err != nil {
return err
}
for _, name := range names {
fd, err := strconv.Atoi(name)
if err != nil || fd < minFd {
continue
}
unix.CloseOnExec(fd)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/delete.go | Go | package yacr
import (
"fmt"
"os"
"path/filepath"
"syscall"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/yacr/container"
)
func Delete(rootDir, containerId string, force bool) error {
container, err := container.LoadWithBundleConfig(rootDir, containerId)
if err != nil {
if force {
logrus.WithFields(logrus.Fields{
"id": containerId,
}).Debug("force deleted container")
os.RemoveAll(filepath.Join(rootDir, containerId))
return nil
}
return err
}
if !force && !container.IsStopped() {
return fmt.Errorf("unexpected status '%s' for container '%s'", container.State.Status, container.ID())
}
// Attempt to unmount all mountpoints recursively.
err = syscall.Unmount(container.Rootfs(), syscall.MNT_DETACH)
if err == nil {
logrus.WithField("id", container.ID()).Debug("unmounted rootfs")
// On Gitpod with containerd alone and `--snapshotter=native`,
// it seems to use shiftfs and there is a problem with empty
// directories not removed that causes containerd to not delete
// the rootfs directory. That's a problem because we cannot
// the same `ctr run` twice or more...
// Let's try to delete the directories if they still exist.
for i := len(container.Spec.Mounts) - 1; i >= 0; i-- {
mountpoint := container.Rootfs() + container.Spec.Mounts[i].Destination
if err := os.Remove(mountpoint); err != nil {
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"mountpoint": mountpoint,
"error": err,
}).Debug("rmdir()")
}
}
} else {
for _, dev := range []string{
"/dev/null",
"/dev/zero",
"/dev/full",
"/dev/random",
"/dev/urandom",
"/dev/tty",
} {
mountpoint := filepath.Join(container.Spec.Root.Path, dev)
if err := syscall.Unmount(mountpoint, 0); err != nil {
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"mountpoint": mountpoint,
"error": err,
}).Warn("unmount() failed")
}
}
for i := len(container.Spec.Mounts) - 1; i >= 0; i-- {
mountpoint := container.Rootfs() + container.Spec.Mounts[i].Destination
if err := syscall.Unmount(mountpoint, syscall.MNT_DETACH); err != nil {
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"mountpoint": mountpoint,
"error": err,
}).Warn("unmount() failed")
}
}
}
if err := container.Destroy(); err != nil {
return err
}
// See: https://github.com/opencontainers/runtime-spec/blob/27924127bf391ea7691924c6dcb01f3369d69fe2/config.md#poststop
if err := container.ExecuteHooks("Poststop"); !force && err != nil {
return err
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
}).Info("ok")
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/ipc/messages.go | Go | package ipc
const (
CONTAINER_BOOTED string = "container:booted"
CONTAINER_BEFORE_PIVOT string = "container:before-pivot"
CONTAINER_WAIT_START string = "container:wait-start"
START_CONTAINER string = "start-container"
OK string = "ok"
)
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/ipc/utils.go | Go | package ipc
import (
"errors"
"fmt"
"io/fs"
"net"
"os"
)
func EnsureValidSockAddr(sockAddr string, mustExist bool) error {
if sockAddr == "" {
return fmt.Errorf("socket address '%s' is empty", sockAddr)
}
if len(sockAddr) > 108 {
// LOL: https://github.com/moby/moby/pull/13408
return fmt.Errorf("socket address '%s' is too long", sockAddr)
}
if _, err := os.Stat(sockAddr); mustExist && errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("socket address '%s' does not exist", sockAddr)
}
return nil
}
func AwaitMessage(conn net.Conn, expectedMessage string) error {
buf := make([]byte, len(expectedMessage))
n, err := conn.Read(buf)
if err != nil {
return fmt.Errorf("failed to read from socket: %w", err)
}
msg := string(buf[0:n])
if msg != expectedMessage {
return fmt.Errorf("received unexpected message: %s", msg)
}
return nil
}
func SendMessage(conn net.Conn, message string) error {
if _, err := conn.Write([]byte(message)); err != nil {
return fmt.Errorf("failed to send message '%s': %w", message, err)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/kill.go | Go | package yacr
import (
"fmt"
"syscall"
"github.com/docker/docker/pkg/signal"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/yacr/container"
)
func Kill(rootDir string, args []string) error {
containerId := args[0]
sig := syscall.SIGTERM
if len(args) > 1 {
if s, err := signal.ParseSignal(args[1]); err == nil {
sig = s
}
}
container, err := container.LoadWithBundleConfig(rootDir, containerId)
if err != nil {
return fmt.Errorf("%w", err)
}
if !container.IsCreated() && !container.IsRunning() {
return fmt.Errorf("unexpected status '%s' for container '%s'", container.State.Status, container.ID())
}
if container.State.Pid != 0 {
if err := syscall.Kill(container.State.Pid, syscall.Signal(sig)); err != nil {
return fmt.Errorf("failed to send signal '%d' to container '%s': %w", sig, container.ID(), err)
}
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
"signal": sig,
}).Info("ok")
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/list.go | Go | package yacr
import (
"fmt"
"io/ioutil"
"time"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/yacr/container"
)
type ContainerListItem struct {
ID string
Status string
CreatedAt time.Time
PID int
BundlePath string
}
type ContainerList []ContainerListItem
func List(rootDir string) (ContainerList, error) {
var list ContainerList
files, err := ioutil.ReadDir(rootDir)
if err != nil {
return list, fmt.Errorf("failed to read root directory: %w", err)
}
for _, f := range files {
if !f.IsDir() {
continue
}
container, err := container.Load(rootDir, f.Name())
if err != nil {
logrus.WithFields(logrus.Fields{
"id": f.Name(),
"error": err,
}).Debug("failed to load container")
continue
}
state := container.State
pid := state.Pid
if container.IsStopped() {
pid = 0
}
list = append(list, ContainerListItem{
ID: container.ID(),
Status: state.Status,
CreatedAt: container.CreatedAt,
PID: pid,
BundlePath: state.Bundle,
})
}
return list, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/start.go | Go | package yacr
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"net"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/constants"
"github.com/willdurand/containers/internal/yacr/container"
"github.com/willdurand/containers/internal/yacr/ipc"
)
func Start(rootDir, containerId string) error {
container, err := container.LoadWithBundleConfig(rootDir, containerId)
if err != nil {
return err
}
if !container.IsCreated() {
return fmt.Errorf("start: unexpected status '%s' for container '%s'", container.State.Status, container.ID())
}
// Connect to the container.
sockAddr, err := container.GetSockAddr(true)
if err != nil {
return err
}
conn, err := net.Dial("unix", sockAddr)
if err != nil {
return fmt.Errorf("start: failed to dial container socket: %w", err)
}
defer conn.Close()
// Hooks to be run before the container process is executed.
// See: https://github.com/opencontainers/runtime-spec/blob/27924127bf391ea7691924c6dcb01f3369d69fe2/config.md#prestart
if err := container.ExecuteHooks("Prestart"); err != nil {
return err
}
if err := ipc.SendMessage(conn, ipc.START_CONTAINER); err != nil {
return err
}
container.UpdateStatus(constants.StateRunning)
// See: https://github.com/opencontainers/runtime-spec/blob/27924127bf391ea7691924c6dcb01f3369d69fe2/config.md#poststart
if err := container.ExecuteHooks("Poststart"); err != nil {
return err
}
// The container process should send a "OK" right before it calls exec(3) OR
// an error if something went wrong.
msg, err := ioutil.ReadAll(conn)
if err != nil {
return err
}
if !bytes.Equal(msg, []byte(ipc.OK)) {
return errors.New(string(msg))
}
logrus.WithFields(logrus.Fields{
"id": container.ID(),
}).Info("start: ok")
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacr/state.go | Go | package yacr
import (
"encoding/json"
"io"
"github.com/willdurand/containers/internal/yacr/container"
)
func State(rootDir, containerId string, w io.Writer) error {
container, err := container.LoadWithBundleConfig(rootDir, containerId)
if err != nil {
return err
}
if err := json.NewEncoder(w).Encode(container.State); err != nil {
return err
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacs/container.go | Go | package yacs
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/cmd"
"github.com/willdurand/containers/internal/logs"
"github.com/willdurand/containers/internal/yacs/log"
"github.com/willdurand/containers/thirdparty/runc/libcontainer/utils"
"golang.org/x/sys/unix"
)
const (
consoleSocketName = "console.sock"
containerPidFileName = "container.pid"
)
// createContainer creates a new container when the shim is started.
//
// The container is created but not started. This function also creates pipes to
// capture the container `stdout` and `stderr` streams and write their contents
// to files.
func (y *Yacs) createContainer() {
// Create FIFOs for the container standard IOs.
for _, name := range []string{"0", "1", "2"} {
if err := unix.Mkfifo(filepath.Join(y.stdioDir, name), 0o600); err != nil && !errors.Is(err, fs.ErrExist) {
y.containerReady <- fmt.Errorf("mkfifo: %w", err)
return
}
}
// We use `O_RDWR` to get non-blocking behavior, see:
// https://github.com/golang/go/issues/33050#issuecomment-510308419
sin, err := os.OpenFile(filepath.Join(y.stdioDir, "0"), os.O_RDWR, 0)
if err != nil {
y.containerReady <- fmt.Errorf("open stdin: %w", err)
return
}
defer closeFifo(sin)
sout, err := os.OpenFile(filepath.Join(y.stdioDir, "1"), os.O_RDWR, 0)
if err != nil {
y.containerReady <- fmt.Errorf("open stdout: %w", err)
return
}
defer closeFifo(sout)
serr, err := os.OpenFile(filepath.Join(y.stdioDir, "2"), os.O_RDWR, 0)
if err != nil {
y.containerReady <- fmt.Errorf("open stderr: %w", err)
return
}
defer closeFifo(serr)
// Prepare the arguments for the OCI runtime.
runtimeArgs := append(
[]string{y.runtime},
append(y.runtimeArgs(), []string{
"create", y.containerID,
"--bundle", y.bundleDir,
"--pid-file", y.containerPidFilePath(),
}...)...,
)
if y.containerSpec.Process.Terminal {
runtimeArgs = append(runtimeArgs, "--console-socket", y.consoleSocketPath())
}
// By default, we pass the standard input but the outputs are configured
// depending on whether the container should create a PTY or not.
createCommand := exec.Cmd{
Path: y.runtimePath,
Args: runtimeArgs,
}
// When the container should create a terminal, the shim should open a unix
// socket and wait until it receives a file descriptor that corresponds to
// the PTY "master" end.
if y.containerSpec.Process.Terminal {
ln, err := net.Listen("unix", y.consoleSocketPath())
if err != nil {
y.containerReady <- fmt.Errorf("listen (console socket): %w", err)
}
defer ln.Close()
go func() {
conn, err := ln.Accept()
if err != nil {
logrus.WithError(err).Panic("failed to accept connections on console socket")
}
defer conn.Close()
unixconn, ok := conn.(*net.UnixConn)
if !ok {
logrus.WithError(err).Panic("failed to cast to unixconn")
}
socket, err := unixconn.File()
if err != nil {
logrus.WithError(err).Panic("failed to retrieve socket file")
}
defer socket.Close()
ptm, err := utils.RecvFd(socket)
if err != nil {
logrus.WithError(err).Panic("failed to receive file descriptor")
}
// Now we can redirect the streams: first the standard input to the PTY
// input, then the PTY output to the standard output.
go io.Copy(ptm, sin)
go io.Copy(sout, ptm)
}()
} else {
// We only use the log file when the container didn't set up a terminal
// because that's already complicated enough. That being said, maybe we
// should log the PTY output as well in the future?
logFile, err := log.NewFile(y.containerLogFilePath)
if err != nil {
y.containerReady <- fmt.Errorf("open (log file): %w", err)
return
}
defer logFile.Close()
// We create a pipe to pump the stdout from the container and then we write
// the content to both the log file and the stdout FIFO.
outRead, outWrite, err := os.Pipe()
if err != nil {
y.containerReady <- fmt.Errorf("stdout pipe: %w", err)
return
}
defer outWrite.Close()
createCommand.Stdout = outWrite
go copyStd("stdout", outRead, logFile, sout)
// We create a pipe to pump the stderr from the container and then we write
// the content to both the log file and the stderr FIFO.
errRead, errWrite, err := os.Pipe()
if err != nil {
y.containerReady <- fmt.Errorf("stderr pipe: %w", err)
return
}
defer errWrite.Close()
createCommand.Stderr = errWrite
go copyStd("stderr", errRead, logFile, serr)
inRead, inWrite, err := os.Pipe()
if err != nil {
y.containerReady <- fmt.Errorf("stdin pipe: %w", err)
return
}
defer inRead.Close()
createCommand.Stdin = inRead
go func() {
defer inWrite.Close()
scanner := bufio.NewScanner(sin)
for scanner.Scan() {
data := scanner.Bytes()
if bytes.Equal(data, []byte("THIS_IS_NOT_HOW_WE_SHOULD_CLOSE_A_PIPE")) {
break
}
inWrite.Write(data)
inWrite.Write([]byte("\n"))
}
}()
}
logrus.WithFields(logrus.Fields{
"command": createCommand.String(),
}).Info("creating container")
if err := createCommand.Run(); err != nil {
y.containerReady <- logs.GetBetterError(y.runtimeLogFilePath(), err)
return
}
logrus.Debug("container created")
// The runtime should have written the container's PID to a file because
// that's how the runtime passes this value to the shim. The shim needs the
// PID to be able to interact with the container directly.
containerPidFilePath := y.containerPidFilePath()
data, err := os.ReadFile(containerPidFilePath)
if err != nil {
logrus.WithError(err).Panicf("failed to read '%s'", containerPidFilePath)
}
containerPid, err := strconv.Atoi(string(bytes.TrimSpace(data)))
if err != nil {
logrus.WithError(err).Panicf("failed to parse pid from '%s'", containerPidFilePath)
}
// At this point, the shim knows that the runtime has successfully created a
// container. The shim's API can be used to interact with the container now.
y.setContainerStatus(&ContainerStatus{PID: containerPid})
y.containerReady <- nil
// Wait for the termination of the container process.
var wstatus syscall.WaitStatus
var rusage syscall.Rusage
_, err = syscall.Wait4(containerPid, &wstatus, 0, &rusage)
if err != nil {
logrus.WithError(err).Panic("wait4() failed")
}
y.setContainerStatus(&ContainerStatus{
PID: containerPid,
WaitStatus: &wstatus,
})
logrus.WithFields(logrus.Fields{
"exitStatus": y.containerStatus.ExitStatus(),
}).Info("container exited")
// Close stdio streams in case a container manager is attached (this will
// notify this manager that the container has exited).
sin.Close()
sout.Close()
serr.Close()
if y.exitCommand != "" {
exit := exec.Command(y.exitCommand, y.exitCommandArgs...)
logrus.WithField("command", exit.String()).Debug("execute exit command")
if err := cmd.Run(exit); err != nil {
logrus.WithError(err).Warn("exit command failed")
}
}
}
// containerPidFilePath returns the path to the file that contains the PID of
// the container. Usually, this path should be passed to the OCI runtime with a
// CLI flag (`--pid-file`).
func (y *Yacs) containerPidFilePath() string {
return filepath.Join(y.baseDir, containerPidFileName)
}
// consoleSocketPath returns the path to the console socket that is used by the
// container when it must create a PTY.
func (y *Yacs) consoleSocketPath() string {
return filepath.Join(y.baseDir, consoleSocketName)
}
// setContainerStatus sets an instance of `ContainerStatus` to the shim
// configuration.
func (y *Yacs) setContainerStatus(status *ContainerStatus) {
y.containerStatus = status
}
// copyStd copies the content of `src` into the provided log file and FIFO. This
// is a ugly version of a `MultiWriter` + `Copy()` for Yacs.
func copyStd(name string, src *os.File, logFile *log.LogFile, fifo *os.File) {
defer src.Close()
scanner := bufio.NewScanner(src)
for scanner.Scan() {
m := scanner.Text()
fifo.WriteString(m + "\n")
logFile.WriteMessage(name, m)
}
}
func closeFifo(f *os.File) {
f.Close()
if err := os.Remove(f.Name()); err != nil {
logrus.WithError(err).WithField("name", f.Name()).Warn("failed to remove FIFO")
}
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacs/container_status.go | Go | package yacs
import (
"encoding/json"
"syscall"
)
// ContainerStatus represents the container process status and especially the
// (wait) status after the process has exited.
type ContainerStatus struct {
PID int
WaitStatus *syscall.WaitStatus
}
func (s *ContainerStatus) Exited() bool {
return s.WaitStatus != nil
}
// ExitStatus returns the exit status (code) of the container process when it
// has exited. When the process hasn't been started yet or is still running,
// `-1` is returned.
func (s *ContainerStatus) ExitStatus() int {
if !s.Exited() {
return -1
}
return s.WaitStatus.ExitStatus()
}
// MarshalJSON returns the JSON encoding of the container status when the
// container process has exited. When the process hasn't been started yet or is
// still running, an empty JSON object is returned.
func (s *ContainerStatus) MarshalJSON() ([]byte, error) {
if s.WaitStatus == nil {
return json.Marshal(map[string]interface{}{})
}
return json.Marshal(map[string]interface{}{
"pid": s.PID,
"exited": s.Exited(),
"exitStatus": s.ExitStatus(),
"waitStatus": s.WaitStatus,
})
}
func (s *ContainerStatus) UnmarshalJSON(data []byte) error {
var v map[string]interface{}
if err := json.Unmarshal(data, &v); err != nil {
return err
}
if pid, ok := v["pid"].(float64); ok {
s.PID = int(pid)
}
if waitStatus, ok := v["waitStatus"].(float64); ok {
wstatus := new(syscall.WaitStatus)
*wstatus = syscall.WaitStatus(waitStatus)
s.WaitStatus = wstatus
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacs/container_status_test.go | Go | package yacs
import (
"encoding/json"
"syscall"
"testing"
)
func TestJSON(t *testing.T) {
wstatus := syscall.WaitStatus(0)
s1 := &ContainerStatus{
PID: 123,
WaitStatus: &wstatus,
}
if !s1.Exited() {
t.Error("s1 is not exited")
}
data, err := json.Marshal(s1)
if err != nil {
t.Error(err)
}
s2 := new(ContainerStatus)
if json.Unmarshal(data, &s2); err != nil {
t.Error(err)
}
if s1.PID != s2.PID {
t.Errorf("%d != %d", s1.PID, s2.PID)
}
if !s2.Exited() {
t.Error("s1 is not exited")
}
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacs/http_server.go | Go | package yacs
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"path/filepath"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/constants"
)
const shimSocketName = "shim.sock"
var (
ErrNotCreated = errors.New("container is not created")
ErrNotRunning = errors.New("container is not running")
ErrNotStopped = errors.New("container is not stopped")
)
// YacsState represents the "public" state of the shim.
type YacsState struct {
ID string
Runtime string
State runtimespec.State
Status *ContainerStatus
}
// createHttpServer creates a HTTP server to expose an API to interact with the
// shim.
func (y *Yacs) createHttpServer() {
server := http.Server{}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
switch r.Method {
case "GET":
y.sendShimStateOrHttpError(w)
case "POST":
y.processCommand(w, r)
case "DELETE":
w.Write([]byte("BYE\n"))
cancel()
default:
msg := fmt.Sprintf("invalid method: '%s'", r.Method)
http.Error(w, msg, http.StatusMethodNotAllowed)
}
})
http.HandleFunc("/logs", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, y.containerLogFilePath)
})
listener, err := net.Listen("unix", y.SocketPath())
if err != nil {
y.httpServerReady <- fmt.Errorf("listen: %w", err)
return
}
// At this point, we can tell the parent that we are ready to accept
// connections. The parent will print the socket address and exit.
y.httpServerReady <- nil
go func() {
if err := server.Serve(listener); err != nil && err != http.ErrServerClosed {
logrus.WithError(err).Error("serve() failed")
}
}()
<-ctx.Done()
server.Shutdown(ctx)
logrus.Debug("stopped http server")
y.terminate()
}
// SocketPath returns the path to the unix socket used to communicate with the
// shim.
func (y *Yacs) SocketPath() string {
return filepath.Join(y.baseDir, shimSocketName)
}
// processCommand processes an API command. If the command is valid, the OCI
// runtime is usually called and the state of the shim is returned. When
// something goes wrong, an error is returned instead.
func (y *Yacs) processCommand(w http.ResponseWriter, r *http.Request) {
state, err := y.State()
if err != nil {
writeHttpError(w, err)
return
}
cmd := r.FormValue("cmd")
switch cmd {
case "start":
if state.Status != constants.StateCreated {
writeHttpError(w, ErrNotCreated)
return
}
if err := y.Start(); err != nil {
writeHttpError(w, err)
return
}
case "kill":
if state.Status != constants.StateRunning {
writeHttpError(w, ErrNotRunning)
return
}
if err := y.Kill(r.FormValue("signal")); err != nil {
writeHttpError(w, err)
return
}
case "delete":
if state.Status != constants.StateStopped {
writeHttpError(w, ErrNotStopped)
return
}
if err := y.Delete(false); err != nil {
writeHttpError(w, err)
return
}
// We cannot return the state anymore given we just deleted the
// container.
w.WriteHeader(http.StatusNoContent)
return
default:
msg := fmt.Sprintf("invalid command '%s'", cmd)
http.Error(w, msg, http.StatusBadRequest)
return
}
y.sendShimStateOrHttpError(w)
}
// sendShimStateOrHttpError sends a HTTP response with the shim state, unless
// there is an error in which case the error is returned to the client.
func (y *Yacs) sendShimStateOrHttpError(w http.ResponseWriter) {
state, err := y.State()
if err != nil {
writeHttpError(w, err)
return
}
if err := json.NewEncoder(w).Encode(YacsState{
ID: y.containerID,
Runtime: y.runtime,
State: *state,
Status: y.containerStatus,
}); err != nil {
writeHttpError(w, err)
}
}
func writeHttpError(w http.ResponseWriter, err error) {
status := http.StatusInternalServerError
if errors.Is(err, ErrContainerNotExist) {
status = http.StatusNotFound
} else if errors.Is(err, ErrNotRunning) || errors.Is(err, ErrNotStopped) || errors.Is(err, ErrNotCreated) {
status = http.StatusBadRequest
}
http.Error(w, err.Error(), status)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacs/log/file.go | Go | package log
import (
"encoding/json"
"os"
"sync"
"time"
"github.com/sirupsen/logrus"
)
type LogFile struct {
sync.Mutex
file *os.File
}
func NewFile(name string) (*LogFile, error) {
file, err := os.OpenFile(name, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return nil, err
}
return &LogFile{file: file}, nil
}
func (l *LogFile) WriteMessage(s, m string) {
l.Lock()
defer l.Unlock()
data, err := json.Marshal(map[string]interface{}{
"t": time.Now().UTC(),
"m": m,
"s": s,
})
if err == nil {
if _, err := l.file.Write(append(data, '\n')); err != nil {
logrus.WithFields(logrus.Fields{
"s": s,
"error": err,
}).Warn("failed to write to container log file")
}
}
}
func (l *LogFile) Close() error {
return l.file.Close()
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacs/runtime.go | Go | package yacs
import (
"bytes"
"encoding/json"
"errors"
"os/exec"
"path/filepath"
"strings"
"syscall"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
const (
runtimeLogFileName = "runtime.log"
)
var (
ErrContainerNotExist = errors.New("container does not exist")
)
// State calls the OCI runtime and returns the runtime state or an error.
func (y *Yacs) State() (*runtimespec.State, error) {
output, err := y.executeRuntime("state", y.containerID)
if err != nil {
return nil, err
}
state := new(runtimespec.State)
if json.Unmarshal(output, state); err != nil {
return nil, err
}
return state, nil
}
// Starts calls the OCI runtime to start the container.
func (y *Yacs) Start() error {
_, err := y.executeRuntime("start", y.containerID)
return err
}
// Kill calls the OCI runtime to send a signal to the container.
func (y *Yacs) Kill(signal string) error {
_, err := y.executeRuntime("kill", y.containerID, signal)
return err
}
// Sigkill calls the OCI runtime to send a `SIGKILL` signal to the container. If
// that does not work, e.g., because the container is not running, a `SIGKILL`
// is sent directly using a syscall.
func (y *Yacs) Sigkill() error {
if err := y.Kill("SIGKILL"); err != nil {
return syscall.Kill(y.containerStatus.PID, syscall.SIGKILL)
}
return nil
}
// Delete calls the OCI runtime to delete a container. It can be used to force
// dele the container as well.
func (y *Yacs) Delete(force bool) error {
deleteArgs := []string{"delete", y.containerID}
if force {
deleteArgs = append(deleteArgs, "--force")
}
_, err := y.executeRuntime(deleteArgs...)
return err
}
// executeRuntime calls the OCI runtime with the arguments passed to it.
func (y *Yacs) executeRuntime(args ...string) ([]byte, error) {
c := exec.Command(y.runtimePath, append(y.runtimeArgs(), args...)...)
logrus.WithField("command", c.String()).Debug("call OCI runtime")
output, err := c.Output()
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
// HACK: we should probably not parse the error message like that...
// Note that this should work with `runc` too, though.
if bytes.Contains(exitError.Stderr, []byte("does not exist")) {
return output, ErrContainerNotExist
}
// Adjust error with the stderr output instead of a generic message
// like "exit status 1".
msg := strings.TrimPrefix(string(exitError.Stderr), "Error: ")
err = errors.New(msg)
}
}
return output, err
}
// runtimeArgs returns a list of common OCI runtime arguments.
func (y *Yacs) runtimeArgs() []string {
args := []string{
// We specify a log file so that the container's stderr is "clean" (because
// the default log file is `/dev/stderr`).
"--log", y.runtimeLogFilePath(),
// We set the format to JSON because we might need to read this log file
// in case of an error when creating the container.
"--log-format", "json",
}
// Forward the debug state to the OCI runtime.
if logrus.IsLevelEnabled(logrus.DebugLevel) {
args = append(args, "--debug")
}
return args
}
func (y *Yacs) runtimeLogFilePath() string {
return filepath.Join(y.baseDir, runtimeLogFileName)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacs/sync_pipe.go | Go | package yacs
import (
"errors"
"io/fs"
"os"
"path/filepath"
"syscall"
"golang.org/x/sys/unix"
)
// syncPipeName is the name of the named pipe used by both the parent and child
// process to communicate when Yacs is executed. We need this pipe because we
// "fork" in order to spawn a daemon process but we don't want the parent
// process to exit too early. In fact, the parent process should wait until the
// child (daemon) process is fully initialized.
const syncPipeName = "sync-pipe"
// maybeMkfifo creates a new FIFO unless it already exists. In most cases, this
// function should return `nil` unless there is an actual error.
func (y *Yacs) maybeMkfifo() error {
if err := unix.Mkfifo(y.syncPipePath(), 0o600); err != nil && !errors.Is(err, fs.ErrExist) {
return err
}
return nil
}
// createSyncPipe creates the sync pipe and opens it in "write only" mode. The
// child (daemon) process should create this pipe.
func (y *Yacs) createSyncPipe() (*os.File, error) {
if err := y.maybeMkfifo(); err != nil {
return nil, err
}
return os.OpenFile(y.syncPipePath(), syscall.O_CREAT|syscall.O_WRONLY|syscall.O_CLOEXEC, 0)
}
// openSyncPipe opens the named pipe. This should be called by the parent
// process and this call is blocking.
func (y *Yacs) openSyncPipe() (*os.File, error) {
if err := y.maybeMkfifo(); err != nil {
return nil, err
}
return os.Open(y.syncPipePath())
}
// syncPipePath returns the path to the sync (named) pipe.
func (y *Yacs) syncPipePath() string {
return filepath.Join(y.baseDir, syncPipeName)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yacs/yacs.go | Go | package yacs
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"syscall"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/willdurand/containers/internal/runtime"
"golang.org/x/sys/unix"
)
const (
containerLogFileName = "container.log"
shimPidFileName = "shim.pid"
)
// Yacs is a container shim.
type Yacs struct {
baseDir string
bundleDir string
containerExited chan interface{}
containerLogFilePath string
containerID string
containerReady chan error
containerSpec runtimespec.Spec
containerStatus *ContainerStatus
exitCommand string
exitCommandArgs []string
httpServerReady chan error
runtime string
runtimePath string
stdioDir string
}
// NewShimFromFlags creates a new shim from a set of (command) flags. This
// function also verifies that required flags have non-empty valuey.
func NewShimFromFlags(flags *pflag.FlagSet) (*Yacs, error) {
for _, param := range []string{
"bundle",
"container-id",
"runtime",
} {
if v, err := flags.GetString(param); err != nil || v == "" {
return nil, fmt.Errorf("missing or invalid value for '--%s'", param)
}
}
bundleDir, _ := flags.GetString("bundle")
spec, err := runtime.LoadSpec(bundleDir)
if err != nil {
return nil, err
}
containerId, _ := flags.GetString("container-id")
containerLogFile, _ := flags.GetString("container-log-file")
exitCommand, _ := flags.GetString("exit-command")
exitCommandArgs, _ := flags.GetStringArray("exit-command-arg")
runtime, _ := flags.GetString("runtime")
stdioDir, _ := flags.GetString("stdio-dir")
rootDir, _ := flags.GetString("root")
baseDir, _ := flags.GetString("base-dir")
if baseDir == "" {
baseDir = filepath.Join(rootDir, containerId)
}
if err := os.MkdirAll(baseDir, 0o755); err != nil {
return nil, fmt.Errorf("failed to create container directory: %w", err)
}
if stdioDir == "" {
stdioDir = baseDir
}
runtimePath, err := exec.LookPath(runtime)
if err != nil {
return nil, fmt.Errorf("runtime '%s' not found", runtime)
}
if containerLogFile == "" {
containerLogFile = filepath.Join(baseDir, containerLogFileName)
}
return &Yacs{
containerID: containerId,
containerLogFilePath: containerLogFile,
baseDir: baseDir,
bundleDir: bundleDir,
containerExited: make(chan interface{}),
containerReady: make(chan error),
containerSpec: spec,
containerStatus: nil,
exitCommand: exitCommand,
exitCommandArgs: exitCommandArgs,
httpServerReady: make(chan error),
runtime: runtime,
runtimePath: runtimePath,
stdioDir: stdioDir,
}, nil
}
// Run starts the Yacs daemon. It creates a container and then the HTTP API.
//
// When everything is initialized, a message is written to the sync pipe so that
// the "parent" process can exit. Errors are also reported to the parent via the
// sync pipe.
//
// Assuming the initialization was successful, the `Run` method waits for the
// termination of the container process.
func (y *Yacs) Run() error {
logrus.Info("the yacs daemon has started")
// Make this daemon a subreaper so that it "adopts" orphaned descendants,
// see: https://man7.org/linux/man-pages/man2/prctl.2.html
if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0); err != nil {
return fmt.Errorf("prctl: %w", err)
}
// Call the OCI runtime to create the container.
go y.createContainer()
syncPipe, err := y.createSyncPipe()
if err != nil {
return fmt.Errorf("sync pipe: %w", err)
}
defer syncPipe.Close()
err = <-y.containerReady
if err != nil {
syncPipe.WriteString(err.Error())
return err
}
// When the container has been created, we can set up the HTTP API to be
// able to interact with the shim and control the container.
go y.createHttpServer()
err = <-y.httpServerReady
if err != nil {
logrus.WithError(err).Error("failed to create http server")
syncPipe.WriteString(err.Error())
return err
}
// Notify the "parent" process that the initialization has completed
// successfully.
_, err = syncPipe.WriteString("OK")
if err != nil {
return err
}
logrus.Debug("shim successfully started")
syncPipe.Close()
<-y.containerExited
return nil
}
// Err returns an error when the `Run` method has failed.
//
// This method should be used by the "parent" process. It reads data from the
// sync pipe and transforms it in an error unless the "child" process wrote a
// "OK" message.
func (y *Yacs) Err() error {
syncPipe, err := y.openSyncPipe()
if err != nil {
return fmt.Errorf("open sync pipe: %w", err)
}
data, err := ioutil.ReadAll(syncPipe)
if err == nil {
if !bytes.Equal(data, []byte("OK")) {
return errors.New(string(data))
}
}
return err
}
// terminate is called when Yacs should be terminated. It will send a SIGKILL
// to the container first if it is still alive. Then, it returns the exit
// command if provided, and delete the container using the OCI runtime. After
// that, the files created by the shim are also deleted.
func (y *Yacs) terminate() {
logrus.Debug("cleaning up before exiting")
if err := syscall.Kill(y.containerStatus.PID, 0); err == nil {
logrus.Debug("container still alive, sending SIGKILL")
if err := y.Sigkill(); err != nil {
logrus.WithError(err).Error("failed to kill container")
}
}
if err := y.Delete(true); err != nil {
logrus.WithError(err).Error("failed to force delete container")
}
if err := os.RemoveAll(y.baseDir); err != nil {
logrus.WithError(err).Warn("failed to remove base directory")
}
close(y.containerExited)
}
// PidFilePath returns the path to the file that contains the PID of the shim.
func (y *Yacs) PidFilePath() string {
return filepath.Join(y.baseDir, shimPidFileName)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container/container.go | Go | package container
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/google/uuid"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/cmd"
"github.com/willdurand/containers/internal/runtime"
"github.com/willdurand/containers/internal/yaman/image"
"github.com/willdurand/containers/internal/yaman/network"
)
type ContainerOpts struct {
Command []string
Entrypoint []string
Remove bool
Hostname string
Interactive bool
Tty bool
Detach bool
PublishAll bool
}
type Container struct {
ID string
BaseDir string
Image *image.Image
Config *runtimespec.Spec
Opts ContainerOpts
ExposedPorts []network.ExposedPort
CreatedAt time.Time
StartedAt time.Time
ExitedAt time.Time
LogFilePath string
UseFuse bool
}
const (
logFileName = "container.log"
)
func New(rootDir string, img *image.Image, opts ContainerOpts) (*Container, error) {
id := strings.ReplaceAll(uuid.NewString(), "-", "")
baseDir := filepath.Join(GetBaseDir(rootDir), id)
ctr := &Container{
ID: id,
BaseDir: baseDir,
Image: img,
Opts: opts,
LogFilePath: filepath.Join(baseDir, logFileName),
}
if err := ctr.Refresh(); err != nil {
return nil, err
}
ports, err := ctr.getExposedPorts()
if err != nil {
return nil, err
}
ctr.ExposedPorts = ports
return ctr, nil
}
// Rootfs returns the absolute path to the root filesystem.
func (c *Container) Rootfs() string {
return filepath.Join(c.BaseDir, "rootfs")
}
// Command returns the container's command, which is what gets executed in the
// container when it starts.
func (c *Container) Command() []string {
var args []string
if len(c.Opts.Entrypoint) > 0 {
args = c.Opts.Entrypoint
} else {
args = c.Image.Config.Config.Entrypoint
}
if len(c.Opts.Command) > 0 {
args = append(args, c.Opts.Command...)
} else {
args = append(args, c.Image.Config.Config.Cmd...)
}
return args
}
// Mount creates a bundle configuration for the container and mounts its root
// filesystem.
func (c *Container) Mount() error {
for _, dir := range []string{
c.BaseDir,
c.datadir(),
c.workdir(),
c.Rootfs(),
} {
if err := os.MkdirAll(dir, 0o755); err != nil {
return err
}
}
mountData := fmt.Sprintf(
"lowerdir=%s,upperdir=%s,workdir=%s",
c.lowerdir(),
c.datadir(),
c.workdir(),
)
fuse, err := exec.LookPath("fuse-overlayfs")
// We need `fuse-overlayfs` if we want to use it but when Yaman is executed
// with elevated privileges, we can safely use the native OverlayFS.
c.UseFuse = err == nil && os.Getuid() != 0
logrus.WithFields(logrus.Fields{
"data": mountData,
"target": c.Rootfs(),
"fuse": c.UseFuse,
}).Debug("mount overlay")
if c.UseFuse {
if err := cmd.Run(exec.Command(fuse, "-o", mountData, c.Rootfs())); err != nil {
return fmt.Errorf("failed to mount overlay (fuse): %w", err)
}
} else {
if err := syscall.Mount("overlay", c.Rootfs(), "overlay", 0, mountData); err != nil {
return fmt.Errorf("failed to mount overlay (native): %w", err)
}
}
// Convert image config into a runtime config.
// See: https://github.com/opencontainers/image-spec/blob/main/conversion.md
cwd := "/"
if c.Image.Config.Config.WorkingDir != "" {
cwd = c.Image.Config.Config.WorkingDir
}
hostname := c.Opts.Hostname
if hostname == "" {
hostname = c.ID
}
c.Config, err = runtime.BaseSpec(c.Rootfs(), os.Getuid() != 0)
if err != nil {
return err
}
c.Config.Process = &runtimespec.Process{
Terminal: c.Opts.Tty,
User: runtimespec.User{
UID: 0,
GID: 0,
},
Args: c.Command(),
Env: c.Image.Config.Config.Env,
Cwd: cwd,
}
c.Config.Hostname = hostname
self, err := os.Executable()
if err != nil {
return err
}
c.Config.Hooks = &runtimespec.Hooks{
CreateRuntime: []runtimespec.Hook{
{
Path: self,
Args: []string{self, "container", "hook", "network-setup"},
},
},
}
data, err := json.Marshal(c.Config)
if err != nil {
return err
}
if err := os.WriteFile(filepath.Join(c.BaseDir, "config.json"), data, 0o644); err != nil {
return err
}
return nil
}
// Unmount unmounts the root filesystem of the container.
func (c *Container) Unmount() error {
if c.UseFuse {
if err := cmd.Run(exec.Command("fusermount3", "-u", c.Rootfs())); err != nil {
logrus.WithError(err).Debug("failed to unmount rootfs (fuse)")
}
} else {
if err := syscall.Unmount(c.Rootfs(), 0); err != nil {
// This likely happens because the rootfs has been previously unmounted.
logrus.WithError(err).Debug("failed to unmount rootfs (native)")
}
}
return nil
}
// IsCreated returns `true` when the container has been created, and `false`
// otherwise.
func (c *Container) IsCreated() bool {
return !c.CreatedAt.IsZero()
}
// IsStarted returns `true` when the container has started, and `false` otherwise.
func (c *Container) IsStarted() bool {
return !c.StartedAt.IsZero()
}
// IsExited returns `true` when the container has exited, and `false` otherwise.
func (c *Container) IsExited() bool {
return !c.ExitedAt.IsZero()
}
// Delete removes the container base directory and all its files.
func (c *Container) Delete() error {
if err := os.RemoveAll(c.BaseDir); err != nil {
return err
}
logrus.WithField("id", c.ID).Debug("container deleted")
return nil
}
// Refresh reloads the missing container properties (from disk).
func (c *Container) Refresh() error {
if err := c.Image.Refresh(); err != nil {
return err
}
return nil
}
func (c *Container) getExposedPorts() ([]network.ExposedPort, error) {
ports := make([]network.ExposedPort, 0)
exposedPorts, err := c.Image.ExposedPorts()
if err != nil {
return ports, err
}
for _, port := range exposedPorts {
if c.Opts.PublishAll {
hostPort, err := network.GetRandomPort()
if err != nil {
return ports, err
}
port.HostPort = hostPort
}
ports = append(ports, port)
}
return ports, nil
}
func (c *Container) lowerdir() string {
return strings.Join(c.Image.LayerDirs(), ":")
}
func (c *Container) datadir() string {
return filepath.Join(c.BaseDir, "data")
}
func (c *Container) workdir() string {
return filepath.Join(c.BaseDir, "work")
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container/utils.go | Go | package container
import "path/filepath"
func GetBaseDir(rootDir string) string {
return filepath.Join(rootDir, "containers")
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_attach.go | Go | package yaman
import (
"github.com/willdurand/containers/internal/yaman/shim"
)
type AttachOpts struct {
Stdin bool
Stdout bool
Stderr bool
}
func Attach(rootDir, id string, opts AttachOpts) error {
shim, err := shim.Load(rootDir, id)
if err != nil {
return err
}
return shim.Attach(opts.Stdin, opts.Stdout, opts.Stderr)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_cleanup.go | Go | package yaman
import "github.com/willdurand/containers/internal/yaman/shim"
func CleanUp(rootDir, id string) error {
shim, err := shim.Load(rootDir, id)
if err != nil {
return err
}
if err := shim.Terminate(); err != nil {
return err
}
if shim.Container.Opts.Remove {
return shim.Delete()
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_create.go | Go | package yaman
import (
"github.com/willdurand/containers/internal/yaman/container"
"github.com/willdurand/containers/internal/yaman/image"
"github.com/willdurand/containers/internal/yaman/registry"
"github.com/willdurand/containers/internal/yaman/shim"
)
func Create(rootDir, imageName string, pullOpts registry.PullOpts, containerOpts container.ContainerOpts, shimOpts shim.ShimOpts) (*shim.Shim, *container.Container, error) {
img, err := image.New(rootDir, imageName)
if err != nil {
return nil, nil, err
}
if err := registry.Pull(img, pullOpts); err != nil {
return nil, nil, err
}
container, err := container.New(rootDir, img, containerOpts)
if err != nil {
return nil, nil, err
}
defer func() {
if !container.IsCreated() {
container.Delete()
}
}()
shim := shim.New(container, shimOpts)
if err := shim.Create(rootDir); err != nil {
return nil, nil, err
}
return shim, container, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_delete.go | Go | package yaman
import "github.com/willdurand/containers/internal/yaman/shim"
// Delete deletes a container.
func Delete(rootDir, id string) error {
shim, err := shim.Load(rootDir, id)
if err != nil {
return err
}
return shim.Delete()
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_hook.go | Go | package yaman
import (
"encoding/json"
"io"
"io/ioutil"
"path/filepath"
"strconv"
"time"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/willdurand/containers/internal/yaman/network"
"github.com/willdurand/containers/internal/yaman/shim"
)
func ProcessHook(rootDir, hookName string, r io.Reader) error {
var state runtimespec.State
if err := json.NewDecoder(r).Decode(&state); err != nil {
return err
}
shim, err := shim.Load(rootDir, state.ID)
if err != nil {
return err
}
logger := logrus.WithField("id", state.ID)
switch hookName {
case "network-setup":
slirp4netns, err := network.NewSlirp4netns(state.Pid, shim.Slirp4netnsApiSocketPath())
if err != nil {
return err
}
pid, err := slirp4netns.Start()
if err != nil {
return err
}
// Write PID file for later. Note that we could have used the exit-fd as
// well since this PID file is mainly used to terminate the slirp4netns
// process when we clean-up the container.
if err := ioutil.WriteFile(
shim.Slirp4netnsPidFilePath(),
[]byte(strconv.Itoa(pid)),
0o644,
); err != nil {
return err
}
// Configure DNS inside the container.
if err := ioutil.WriteFile(
filepath.Join(shim.Container.Rootfs(), "etc", "resolv.conf"),
[]byte("nameserver 10.0.2.3\n"),
0o644,
); err != nil {
logger.WithError(err).Warn("failed to write /etc/resolv.conf")
}
// Expose ports
if len(shim.Container.ExposedPorts) > 0 {
// TODO: use ready-FD instead...
time.Sleep(50 * time.Millisecond)
if err := slirp4netns.ExposePorts(shim.Container.ExposedPorts); err != nil {
return err
}
}
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_inspect.go | Go | package yaman
import (
"time"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/willdurand/containers/internal/yacs"
"github.com/willdurand/containers/internal/yaman/container"
"github.com/willdurand/containers/internal/yaman/image"
"github.com/willdurand/containers/internal/yaman/network"
"github.com/willdurand/containers/internal/yaman/shim"
)
// ContainerInspect is a data transfer structure and represents the result of
// the `inspect` command.
type ContainerInspect struct {
Id string
Root string
Config runtimespec.Spec
Options container.ContainerOpts
Created time.Time
Started time.Time
Exited time.Time
ExposedPorts []network.ExposedPort
Image struct {
image.Image
Config imagespec.Image
Manifest imagespec.Manifest
}
Shim struct {
yacs.YacsState
Options shim.ShimOpts
SocketPath string
}
}
// Inspect returns low-level information about a container.
func Inspect(rootDir, id string) (ContainerInspect, error) {
var inspect ContainerInspect
shim, err := shim.Load(rootDir, id)
if err != nil {
return inspect, err
}
inspect.Id = shim.Container.ID
inspect.Root = shim.Container.BaseDir
inspect.Config = *shim.Container.Config
inspect.Options = shim.Container.Opts
inspect.Created = shim.Container.CreatedAt
inspect.Started = shim.Container.StartedAt
inspect.Exited = shim.Container.ExitedAt
inspect.ExposedPorts = shim.Container.ExposedPorts
inspect.Image.Image = *shim.Container.Image
inspect.Image.Config = *shim.Container.Image.Config
inspect.Image.Manifest = *shim.Container.Image.Manifest
if state, err := shim.GetState(); err == nil {
inspect.Shim.YacsState = *state
}
inspect.Shim.Options = shim.Opts
inspect.Shim.SocketPath = shim.SocketPath
return inspect, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_list.go | Go | package yaman
import (
"fmt"
"io/ioutil"
"regexp"
"sort"
"strings"
"time"
"github.com/docker/go-units"
"github.com/willdurand/containers/internal/constants"
"github.com/willdurand/containers/internal/yaman/container"
"github.com/willdurand/containers/internal/yaman/network"
"github.com/willdurand/containers/internal/yaman/shim"
)
// ContainerListItem contains the data about a container for the user.
type ContainerListItem struct {
ID string
Image string
Command string
Status string
Created time.Time
ExposedPorts []network.ExposedPort
}
// ContainerList contains the list of containers to show to the user.
type ContainerList []ContainerListItem
var validId = regexp.MustCompile("[a-z0-9]{32}")
// GetContainerIds returns a list of container IDs managed by Yaman.
func GetContainerIds(rootDir, prefix string) []string {
var ids []string
files, err := ioutil.ReadDir(container.GetBaseDir(rootDir))
if err != nil {
return ids
}
for _, f := range files {
id := f.Name()
if validId.MatchString(id) && strings.HasPrefix(id, prefix) {
ids = append(ids, id)
}
}
return ids
}
// ListContainers returns the list of containers running by default.
//
// Optionally, it can return all containers managed by Yaman (running or not).
// This function does not return `Container` instances but rather data transfer
// objects for a "user interface".
func ListContainers(rootDir string, all bool) (ContainerList, error) {
var list ContainerList
for _, id := range GetContainerIds(rootDir, "") {
shim, err := shim.Load(rootDir, id)
if err != nil {
continue
}
state, err := shim.GetState()
if err != nil {
return nil, err
}
if !all && state.State.Status != constants.StateRunning {
continue
}
status := state.State.Status
if state.Status.Exited() {
status = fmt.Sprintf(
"Exited (%d) %s ago",
state.Status.ExitStatus(),
units.HumanDuration(time.Since(shim.Container.ExitedAt)),
)
}
list = append(list, ContainerListItem{
ID: shim.Container.ID,
Image: shim.Container.Image.FQIN(),
Command: strings.Join(shim.Container.Command(), " "),
Status: status,
Created: shim.Container.CreatedAt,
ExposedPorts: shim.Container.ExposedPorts,
})
}
sort.Slice(list, func(i, j int) bool {
return list[j].Created.Before(list[i].Created)
})
return list, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_logs.go | Go | package yaman
import (
"io"
"github.com/willdurand/containers/internal/yaman/shim"
)
// CopyLogsOpts contains the options for the `CopyLogs` function.
type CopyLogsOpts struct {
Timestamps bool
Stdout io.Writer
Stderr io.Writer
}
// CopyLogs copies the logs of a container to the writers specified in the
// options.
func CopyLogs(rootDir, id string, opts CopyLogsOpts) error {
shim, err := shim.Load(rootDir, id)
if err != nil {
return err
}
return shim.CopyLogs(opts.Stdout, opts.Stderr, opts.Timestamps)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_restart.go | Go | package yaman
import (
"github.com/willdurand/containers/internal/yaman/shim"
)
func Restart(rootDir, id string) error {
shim, err := shim.Load(rootDir, id)
if err != nil {
return err
}
if err := shim.Recreate(rootDir); err != nil {
return err
}
sOpts := StartOpts{
Attach: false,
Interactive: false,
}
_, err = Start(rootDir, id, sOpts)
return err
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_run.go | Go | package yaman
import (
"github.com/willdurand/containers/internal/yaman/container"
"github.com/willdurand/containers/internal/yaman/registry"
"github.com/willdurand/containers/internal/yaman/shim"
)
// RunResult represents the return value of the `Run` function.
type RunResult struct {
ContainerID string
ExitStatus int
}
// Run runs a command in a new container. We return the ID of the container on
// success and an error otherwise.
func Run(rootDir, imageName string, pullOpts registry.PullOpts, containerOpts container.ContainerOpts, shimOpts shim.ShimOpts) (RunResult, error) {
var result RunResult
_, container, err := Create(rootDir, imageName, pullOpts, containerOpts, shimOpts)
if err != nil {
return result, err
}
startOpts := StartOpts{
Attach: !containerOpts.Detach,
Interactive: containerOpts.Interactive,
}
sr, err := Start(rootDir, container.ID, startOpts)
if err != nil {
return result, err
}
result = RunResult{
ContainerID: container.ID,
ExitStatus: sr.ExitStatus,
}
return result, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_start.go | Go | package yaman
import (
"github.com/willdurand/containers/internal/yaman/shim"
)
type StartOpts struct {
Attach bool
Interactive bool
}
type StartResult struct {
ExitStatus int
}
func Start(rootDir, id string, opts StartOpts) (StartResult, error) {
var result StartResult
shim, err := shim.Load(rootDir, id)
if err != nil {
return result, err
}
attachDone := make(chan error)
if opts.Attach || opts.Interactive {
// Attach before starting the container to make sure we can receive all
// the data when the container starts.
go func() {
attachDone <- shim.Attach(
opts.Interactive && shim.Container.Opts.Interactive,
true,
true,
)
}()
} else {
close(attachDone)
}
if err := shim.StartContainer(); err != nil {
return result, err
}
err = <-attachDone
if err != nil {
return result, err
}
state, err := shim.GetState()
if err != nil {
return result, err
}
result = StartResult{
ExitStatus: state.Status.ExitStatus(),
}
return result, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/container_stop.go | Go | package yaman
import "github.com/willdurand/containers/internal/yaman/shim"
func Stop(rootDir, id string) error {
shim, err := shim.Load(rootDir, id)
if err != nil {
return err
}
return shim.StopContainer()
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/image/image.go | Go | package image
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/willdurand/containers/internal/yaman/network"
)
// Image represents a OCI image.
type Image struct {
Hostname string
Name string
Version string
BaseDir string
Manifest *imagespec.Manifest
Config *imagespec.Image
}
const defaultImageVersion = "latest"
// imageNamePattern is the regular expression used to validate an OCI image
// name according to the OCI specification.
var imageNamePattern = regexp.MustCompile("^[a-z0-9]+([._-][a-z0-9]+)*(/[a-z0-9]+([._-][a-z0-9]+)*)*$")
// New creates a new image given a directory (to store the image) and the name
// of the image, which must be fully qualified.
func New(rootDir, name string) (*Image, error) {
parts := strings.Split(name, "/")
if len(parts) != 3 {
return nil, fmt.Errorf("image name must be fully qualified")
}
hostName := parts[0]
userName := parts[1]
parts = strings.Split(parts[2], ":")
imageName := userName + "/" + parts[0]
if !isNameValid(imageName) {
return nil, fmt.Errorf("invalid image name")
}
imageVersion := defaultImageVersion
if len(parts) > 1 && parts[1] != "" {
imageVersion = parts[1]
}
img := &Image{
Hostname: hostName,
Name: imageName,
Version: imageVersion,
BaseDir: filepath.Join(GetBaseDir(rootDir), hostName, imageName, imageVersion),
}
return img, nil
}
// GetBaseDir returns the base directory where all images are stored (locally).
func GetBaseDir(rootDir string) string {
return filepath.Join(rootDir, "images")
}
// FQIN returns the Fully Qualified Image Name of an image.
func (i *Image) FQIN() string {
return i.Hostname + "/" + i.Name + ":" + i.Version
}
// LayerDirs returns a list of absolute (directory) paths pointing to the
// different layers of the image. This list is ordered so that the last layer
// directory in the list is the lowest directory (which matches the expectation
// of `lowerdir` in Overlay).
func (i *Image) LayerDirs() []string {
var dirs []string
// Note: The order of lower directories is the rightmost is the lowest, thus
// the upper directory is on top of the first directory in the left-to-right
// list of lower directories; NOT on top of the last directory in the list,
// as the order might seem to suggest.
//
// Source: https://wiki.archlinux.org/title/Overlay_filesystem
for idx := len(i.Config.RootFS.DiffIDs) - 1; idx >= 0; idx-- {
digest := i.Config.RootFS.DiffIDs[idx]
dirs = append(dirs, filepath.Join(i.LayersDir(), digest.Encoded()))
}
return dirs
}
// ManifestFilePath returns the path to the `manifest.json` file.
func (i *Image) ManifestFilePath() string {
return filepath.Join(i.BaseDir, "manifest.json")
}
// ConfigFilePath returns the path to the `config.json` file.
func (i *Image) ConfigFilePath() string {
return filepath.Join(i.BlobsDir(), "config.json")
}
// BlobsDir returns the path to layers should be written to.
func (i *Image) BlobsDir() string {
return filepath.Join(i.BaseDir, "blobs")
}
// LayersDir returns the path to layers should be written to.
func (i *Image) LayersDir() string {
return filepath.Join(i.BaseDir, "layers")
}
// Refresh reloads the missing image properties (from disk).
func (i *Image) Refresh() error {
if err := i.loadConfig(); err != nil {
return err
}
if err := i.loadManifest(); err != nil {
return err
}
return nil
}
// ExposedPorts returns the list of exposed ports defined in the image.
func (i *Image) ExposedPorts() ([]network.ExposedPort, error) {
return network.ParseExposedPorts(i.Config.Config.ExposedPorts)
}
// loadConfig loads the image config from disk.
func (i *Image) loadConfig() error {
file := filepath.Join(i.ConfigFilePath())
data, err := os.ReadFile(file)
if err != nil {
return err
}
i.Config = new(imagespec.Image)
if err := json.Unmarshal(data, i.Config); err != nil {
return err
}
return nil
}
// loadManifest loads the manifest data of an image from disk.
func (i *Image) loadManifest() error {
data, err := os.ReadFile(i.ManifestFilePath())
if err != nil {
return err
}
i.Manifest = new(imagespec.Manifest)
if err := json.Unmarshal(data, i.Manifest); err != nil {
return err
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/image/image_test.go | Go | package image
import "testing"
func TestIsNameValid(t *testing.T) {
for _, tc := range []struct {
name string
expected bool
}{
{"", false},
{"alpine:latest", false},
{"alpine", true},
{"library/alpine", true},
{"gcr.io/project/image", true},
} {
if valid := isNameValid(tc.name); valid != tc.expected {
t.Errorf("name: %s - got: %t, want: %t", tc.name, valid, tc.expected)
}
}
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/image/utils.go | Go | package image
// isNameValid validates the name of an image.
func isNameValid(name string) bool {
return imageNamePattern.MatchString(name)
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/image_list.go | Go | package yaman
import (
"errors"
"io/fs"
"io/ioutil"
"path/filepath"
"sort"
"strings"
"time"
"github.com/willdurand/containers/internal/yaman/image"
)
type ImageListItem struct {
Registry string
Name string
Version string
Created time.Time
Pulled time.Time
}
type ImageList []ImageListItem
func ListImages(rootDir string) (ImageList, error) {
var list ImageList
imagesDir := image.GetBaseDir(rootDir)
// This is what we are going to traverse:
//
// /run/yaman/images
// └── docker.io
// └── library
// └─── alpine
// └── latest
// ├── blobs
// │ ├── config.json
// │ └── sha256:2408cc74d12b6cd092bb8b516ba7d5e290f485d3eb9672efc00f0583730179e8
// ├── layers
// │ └── 24302eb7d9085da80f016e7e4ae55417e412fb7e0a8021e95e3b60c67cde557d
// └── manifest.json
//
// There is only one image with the following properties:
//
// - hostname: docker.io
// - user: library
// - image: alpine
// - version: latest
//
hostnames, err := ioutil.ReadDir(imagesDir)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return list, err
}
for _, hostname := range hostnames {
hostnameDir := filepath.Join(imagesDir, hostname.Name())
users, err := ioutil.ReadDir(hostnameDir)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return list, err
}
for _, user := range users {
userDir := filepath.Join(hostnameDir, user.Name())
images, err := ioutil.ReadDir(userDir)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return list, err
}
for _, i := range images {
imageName := i.Name()
imageDir := filepath.Join(userDir, imageName)
versions, err := ioutil.ReadDir(imageDir)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return list, err
}
for _, version := range versions {
fullyQualifiedImageName := strings.Join([]string{
hostname.Name(),
user.Name(),
imageName,
}, "/") + ":" + version.Name()
img, err := image.New(rootDir, fullyQualifiedImageName)
if err != nil {
return list, err
}
// We need to refresh the image to load the config/manifest.
if err := img.Refresh(); err != nil {
return list, err
}
list = append(list, ImageListItem{
Registry: img.Hostname,
Name: img.Name,
Version: img.Version,
Created: *img.Config.Created,
Pulled: version.ModTime(),
})
}
}
}
}
sort.Slice(list, func(i, j int) bool {
return list[j].Pulled.Before(list[i].Pulled)
})
return list, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/network/exposed_port.go | Go | package network
import (
"errors"
"fmt"
"strconv"
"strings"
)
var ErrInvalidExposedPort = errors.New("invalid exposed port")
type ExposedPort struct {
Proto string
HostAddr string
HostPort int
GuestAddr string
GuestPort int
}
// String returns a human-readable representation of an exposed port.
func (p ExposedPort) String() string {
if p.HostPort == 0 {
return fmt.Sprintf("%d/%s", p.GuestPort, p.Proto)
}
return fmt.Sprintf("%s:%d->%d/%s", p.HostAddr, p.HostPort, p.GuestPort, p.Proto)
}
// ParseExposedPorts parses the exposed ports listed in an image configuration
// and returns a list of exposed ports.
//
// If the configuration is invalid, an error will be returned.
func ParseExposedPorts(exposedPorts map[string]struct{}) ([]ExposedPort, error) {
ports := make([]ExposedPort, 0)
for host := range exposedPorts {
parts := strings.Split(host, "/")
if len(parts) != 2 {
return ports, ErrInvalidExposedPort
}
guestPort, err := strconv.Atoi(parts[0])
if err != nil {
return ports, ErrInvalidExposedPort
}
ports = append(ports, ExposedPort{
Proto: parts[1],
HostAddr: "0.0.0.0",
HostPort: 0,
GuestPort: guestPort,
})
}
return ports, nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/network/exposed_port_test.go | Go | package network
import "testing"
func TestExposedPort(t *testing.T) {
for _, tc := range []struct {
port ExposedPort
expected string
}{
{
port: ExposedPort{
Proto: "tcp",
HostAddr: "1.2.3.4",
HostPort: 1234,
GuestPort: 4567,
},
expected: "1.2.3.4:1234->4567/tcp",
},
{
port: ExposedPort{
Proto: "tcp",
HostAddr: "0.0.0.0",
HostPort: 0,
GuestPort: 4567,
},
expected: "4567/tcp",
},
} {
if tc.port.String() != tc.expected {
t.Errorf("expected: %s, got: %s", tc.expected, tc.port.String())
}
}
}
func TestParseExposedPorts(t *testing.T) {
exposedPorts := map[string]struct{}{
"1234/tcp": {},
"53/udp": {},
}
ports, err := ParseExposedPorts(exposedPorts)
if err != nil {
t.Errorf("expected no error, got: %v", err)
}
if len(ports) != len(exposedPorts) {
t.Errorf("expected %d ports, got: %d", len(exposedPorts), len(ports))
}
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
internal/yaman/network/slirp4netns.go | Go | package network
import (
"encoding/json"
"fmt"
"net"
"os/exec"
"strconv"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type Slirp4netnsCommand struct {
Execute string `json:"execute"`
Args map[string]interface{} `json:"arguments,omitempty"`
}
type Slirp4netns struct {
pid int
binaryPath string
socketPath string
}
func NewSlirp4netns(pid int, socketPath string) (*Slirp4netns, error) {
slirp4netns, err := exec.LookPath("slirp4netns")
if err != nil {
return nil, err
}
return &Slirp4netns{
pid: pid,
binaryPath: slirp4netns,
socketPath: socketPath,
}, nil
}
func (s *Slirp4netns) Start() (int, error) {
cmd := exec.Command(s.binaryPath, []string{
"--configure",
"--mtu=65520",
"--disable-host-loopback",
"--api-socket", s.socketPath,
strconv.Itoa(s.pid),
"en0",
}...)
logrus.WithField("command", cmd.String()).Debug("starting slirp4netns")
if err := cmd.Start(); err != nil {
return 0, err
}
defer cmd.Process.Release()
return cmd.Process.Pid, nil
}
func (s *Slirp4netns) ExposePorts(ports []ExposedPort) error {
for _, port := range ports {
if port.HostPort == 0 {
continue
}
if err := s.addHostFwd(port); err != nil {
return err
}
}
return nil
}
func (s *Slirp4netns) addHostFwd(port ExposedPort) error {
cmd := Slirp4netnsCommand{
Execute: "add_hostfwd",
Args: map[string]interface{}{
"proto": port.Proto,
"host_addr": port.HostAddr,
"host_port": port.HostPort,
// TODO: add support for "guest_addr"
"guest_port": port.GuestPort,
},
}
return s.executeCommand(cmd)
}
func (s *Slirp4netns) executeCommand(cmd Slirp4netnsCommand) error {
data, err := json.Marshal(cmd)
if err != nil {
return err
}
conn, err := net.Dial("unix", s.socketPath)
if err != nil {
return err
}
defer func() {
if err := conn.Close(); err != nil {
logrus.WithError(err).Error("failed to close slirp4netns socket")
}
}()
if _, err := conn.Write(data); err != nil {
return err
}
if err := conn.(*net.UnixConn).CloseWrite(); err != nil {
return errors.New("failed to close write slirp4netns socket")
}
buf := make([]byte, 2048)
len, err := conn.Read(buf)
if err != nil {
return err
}
var response map[string]interface{}
if err := json.Unmarshal(buf[0:len], &response); err != nil {
return err
}
if err, ok := response["error"]; ok {
return fmt.Errorf("%s failed: %s", cmd.Execute, err)
}
return nil
}
| willdurand/containers | 9 | 📦 This is a repository with some code I wrote to learn more about containers. | Go | willdurand | William Durand | mozilla |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.