index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
44,191,906
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_serial_io.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests scenario for the Firecracker serial console."""
import fcntl
import os
import platform
import subprocess
import termios
import time
from framework import utils
from framework.microvm import Serial
from framework.state_machine import TestState
PLATFORM = platform.machine()
class WaitTerminal(TestState): # pylint: disable=too-few-public-methods
"""Initial state when we wait for the login prompt."""
def handle_input(self, serial, input_char) -> TestState:
"""Handle input and return next state."""
if self.match(input_char):
serial.tx("id")
return WaitIDResult("uid=0(root) gid=0(root) groups=0(root)")
return self
class WaitIDResult(TestState): # pylint: disable=too-few-public-methods
"""Wait for the console to show the result of the 'id' shell command."""
def handle_input(self, unused_serial, input_char) -> TestState:
"""Handle input and return next state."""
if self.match(input_char):
return TestFinished()
return self
class TestFinished(TestState): # pylint: disable=too-few-public-methods
"""Test complete and successful."""
def handle_input(self, unused_serial, _) -> TestState:
"""Return self since the test is about to end."""
return self
def test_serial_after_snapshot(uvm_plain, microvm_factory):
"""
Serial I/O after restoring from a snapshot.
"""
microvm = uvm_plain
microvm.jailer.daemonize = False
microvm.spawn()
microvm.basic_config(
vcpu_count=2,
mem_size_mib=256,
boot_args="console=ttyS0 reboot=k panic=1 pci=off",
)
serial = Serial(microvm)
serial.open()
microvm.start()
# looking for the # prompt at the end
serial.rx("ubuntu-fc-uvm:~#")
# Create snapshot.
snapshot = microvm.snapshot_full()
# Kill base microVM.
microvm.kill()
# Load microVM clone from snapshot.
vm = microvm_factory.build()
vm.jailer.daemonize = False
vm.spawn()
vm.restore_from_snapshot(snapshot, resume=True)
serial = Serial(vm)
serial.open()
# We need to send a newline to signal the serial to flush
# the login content.
serial.tx("")
# looking for the # prompt at the end
serial.rx("ubuntu-fc-uvm:~#")
serial.tx("pwd")
res = serial.rx("#")
assert "/root" in res
def test_serial_console_login(test_microvm_with_api):
"""
Test serial console login.
"""
microvm = test_microvm_with_api
microvm.jailer.daemonize = False
microvm.spawn()
# We don't need to monitor the memory for this test because we are
# just rebooting and the process dies before pmap gets the RSS.
microvm.memory_monitor = None
# Set up the microVM with 1 vCPU and a serial console.
microvm.basic_config(
vcpu_count=1, boot_args="console=ttyS0 reboot=k panic=1 pci=off"
)
microvm.start()
serial = Serial(microvm)
serial.open()
current_state = WaitTerminal("ubuntu-fc-uvm:")
while not isinstance(current_state, TestFinished):
output_char = serial.rx_char()
current_state = current_state.handle_input(serial, output_char)
def get_total_mem_size(pid):
"""Get total memory usage for a process."""
cmd = f"pmap {pid} | tail -n 1 | sed 's/^ //' | tr -s ' ' | cut -d' ' -f2"
rc, stdout, stderr = utils.run_cmd(cmd)
assert rc == 0
assert stderr == ""
return stdout
def send_bytes(tty, bytes_count, timeout=60):
"""Send data to the terminal."""
start = time.time()
for _ in range(bytes_count):
fcntl.ioctl(tty, termios.TIOCSTI, "\n")
current = time.time()
if current - start > timeout:
break
def test_serial_dos(test_microvm_with_api):
"""
Test serial console behavior under DoS.
"""
microvm = test_microvm_with_api
microvm.jailer.daemonize = False
microvm.spawn()
# Set up the microVM with 1 vCPU and a serial console.
microvm.basic_config(
vcpu_count=1,
add_root_device=False,
boot_args="console=ttyS0 reboot=k panic=1 pci=off",
)
microvm.start()
# Open an fd for firecracker process terminal.
tty_path = f"/proc/{microvm.jailer_clone_pid}/fd/0"
tty_fd = os.open(tty_path, os.O_RDWR)
# Check if the total memory size changed.
before_size = get_total_mem_size(microvm.jailer_clone_pid)
send_bytes(tty_fd, 100000000, timeout=1)
after_size = get_total_mem_size(microvm.jailer_clone_pid)
assert before_size == after_size, (
"The memory size of the "
"Firecracker process "
"changed from {} to {}.".format(before_size, after_size)
)
def test_serial_block(test_microvm_with_api):
"""
Test that writing to stdout never blocks the vCPU thread.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.daemonize = False
test_microvm.spawn()
# Set up the microVM with 1 vCPU so we make sure the vCPU thread
# responsible for the SSH connection will also run the serial.
test_microvm.basic_config(
vcpu_count=1,
mem_size_mib=512,
boot_args="console=ttyS0 reboot=k panic=1 pci=off",
)
test_microvm.add_net_iface()
test_microvm.start()
# Get an initial reading of missed writes to the serial.
fc_metrics = test_microvm.flush_metrics()
init_count = fc_metrics["uart"]["missed_write_count"]
screen_pid = test_microvm.screen_pid
# Stop `screen` process which captures stdout so we stop consuming stdout.
subprocess.check_call("kill -s STOP {}".format(screen_pid), shell=True)
# Generate a random text file.
exit_code, _, _ = test_microvm.ssh.run(
"base64 /dev/urandom | head -c 100000 > /tmp/file.txt"
)
# Dump output to terminal
exit_code, _, _ = test_microvm.ssh.run("cat /tmp/file.txt > /dev/ttyS0")
assert exit_code == 0
# Check that the vCPU isn't blocked.
exit_code, _, _ = test_microvm.ssh.run("cd /")
assert exit_code == 0
# Check the metrics to see if the serial missed bytes.
fc_metrics = test_microvm.flush_metrics()
last_count = fc_metrics["uart"]["missed_write_count"]
# Should be significantly more than before the `cat` command.
assert last_count - init_count > 10000
REGISTER_FAILED_WARNING = "Failed to register serial input fd: event_manager: failed to manage epoll file descriptor: Operation not permitted (os error 1)"
def test_no_serial_fd_error_when_daemonized(uvm_plain):
"""
Tests that when running firecracker daemonized, the serial device
does not try to register stdin to epoll (which would fail due to stdin no
longer being pointed at a terminal).
Regression test for #4037.
"""
test_microvm = uvm_plain
test_microvm.spawn()
test_microvm.add_net_iface()
test_microvm.basic_config(
vcpu_count=1,
mem_size_mib=512,
)
test_microvm.start()
test_microvm.ssh.run("true")
assert REGISTER_FAILED_WARNING not in test_microvm.log_data
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,907
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_balloon.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for guest-side operations on /balloon resources."""
import logging
import time
import pytest
from retry import retry
from framework.utils import get_free_mem_ssh, run_cmd
STATS_POLLING_INTERVAL_S = 1
@retry(delay=0.5, tries=10)
def get_stable_rss_mem_by_pid(pid, percentage_delta=1):
"""
Get the RSS memory that a guest uses, given the pid of the guest.
Wait till the fluctuations in RSS drop below percentage_delta. If timeout
is reached before the fluctuations drop, raise an exception.
"""
# All values are reported as KiB
def get_rss_from_pmap():
_, output, _ = run_cmd("pmap -X {}".format(pid))
return int(output.split("\n")[-2].split()[1], 10)
first_rss = get_rss_from_pmap()
time.sleep(1)
second_rss = get_rss_from_pmap()
print(f"RSS readings: {first_rss}, {second_rss}")
abs_diff = abs(first_rss - second_rss)
abs_delta = 100 * abs_diff / first_rss
assert abs_delta < percentage_delta or abs_diff < 2**10
return second_rss
def make_guest_dirty_memory(ssh_connection, should_oom=False, amount_mib=32):
"""Tell the guest, over ssh, to dirty `amount` pages of memory."""
logger = logging.getLogger("make_guest_dirty_memory")
cmd = f"/usr/local/bin/fillmem {amount_mib}"
exit_code, stdout, stderr = ssh_connection.run(cmd)
# add something to the logs for troubleshooting
if exit_code != 0:
logger.error("while running: %s", cmd)
logger.error("stdout: %s", stdout)
logger.error("stderr: %s", stderr)
cmd = "cat /tmp/fillmem_output.txt"
tries = 3
while tries > 0:
# it may take a bit of time to dirty the memory and the OOM to kick-in
time.sleep(0.5)
_, stdout, _ = ssh_connection.run(cmd)
if stdout != "":
break
tries -= 1
if should_oom:
assert "OOM Killer stopped the program with signal 9, exit code 0" in stdout
else:
assert exit_code == 0, stderr
assert "Memory filling was successful" in stdout, stdout
def _test_rss_memory_lower(test_microvm, stable_delta=1):
"""Check inflating the balloon makes guest use less rss memory."""
# Get the firecracker pid, and open an ssh connection.
firecracker_pid = test_microvm.jailer_clone_pid
ssh_connection = test_microvm.ssh
# Using deflate_on_oom, get the RSS as low as possible
test_microvm.api.balloon.patch(amount_mib=200)
# Get initial rss consumption.
init_rss = get_stable_rss_mem_by_pid(firecracker_pid, percentage_delta=stable_delta)
# Get the balloon back to 0.
test_microvm.api.balloon.patch(amount_mib=0)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid, percentage_delta=stable_delta)
# Dirty memory, then inflate balloon and get ballooned rss consumption.
make_guest_dirty_memory(ssh_connection, amount_mib=32)
test_microvm.api.balloon.patch(amount_mib=200)
balloon_rss = get_stable_rss_mem_by_pid(
firecracker_pid, percentage_delta=stable_delta
)
# Check that the ballooning reclaimed the memory.
assert balloon_rss - init_rss <= 15000
# pylint: disable=C0103
def test_rss_memory_lower(test_microvm_with_api):
"""
Test that inflating the balloon makes guest use less rss memory.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.add_net_iface()
# Add a memory balloon.
test_microvm.api.balloon.put(
amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=0
)
# Start the microvm.
test_microvm.start()
_test_rss_memory_lower(test_microvm)
# pylint: disable=C0103
def test_inflate_reduces_free(test_microvm_with_api):
"""
Check that the output of free in guest changes with inflate.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.add_net_iface()
# Install deflated balloon.
test_microvm.api.balloon.put(
amount_mib=0, deflate_on_oom=False, stats_polling_interval_s=1
)
# Start the microvm
test_microvm.start()
firecracker_pid = test_microvm.jailer_clone_pid
# Get the free memory before ballooning.
available_mem_deflated = get_free_mem_ssh(test_microvm.ssh)
# Inflate 64 MB == 16384 page balloon.
test_microvm.api.balloon.patch(amount_mib=64)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Get the free memory after ballooning.
available_mem_inflated = get_free_mem_ssh(test_microvm.ssh)
# Assert that ballooning reclaimed about 64 MB of memory.
assert available_mem_inflated <= available_mem_deflated - 85 * 64000 / 100
# pylint: disable=C0103
@pytest.mark.parametrize("deflate_on_oom", [True, False])
def test_deflate_on_oom(test_microvm_with_api, deflate_on_oom):
"""
Verify that setting the `deflate_on_oom` option works correctly.
https://github.com/firecracker-microvm/firecracker/blob/main/docs/ballooning.md
deflate_on_oom=True
should not result in an OOM kill
deflate_on_oom=False
should result in an OOM kill
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.add_net_iface()
# Add a deflated memory balloon.
test_microvm.api.balloon.put(
amount_mib=0, deflate_on_oom=deflate_on_oom, stats_polling_interval_s=0
)
# Start the microvm.
test_microvm.start()
firecracker_pid = test_microvm.jailer_clone_pid
# We get an initial reading of the RSS, then calculate the amount
# we need to inflate the balloon with by subtracting it from the
# VM size and adding an offset of 10 MiB in order to make sure we
# get a lower reading than the initial one.
initial_rss = get_stable_rss_mem_by_pid(firecracker_pid)
inflate_size = 256 - int(initial_rss / 1024) + 10
# Inflate the balloon
test_microvm.api.balloon.patch(amount_mib=inflate_size)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Check that using memory leads an out of memory error (or not).
make_guest_dirty_memory(test_microvm.ssh, should_oom=not deflate_on_oom)
# pylint: disable=C0103
def test_reinflate_balloon(test_microvm_with_api):
"""
Verify that repeatedly inflating and deflating the balloon works.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.add_net_iface()
# Add a deflated memory balloon.
test_microvm.api.balloon.put(
amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=0
)
# Start the microvm.
test_microvm.start()
firecracker_pid = test_microvm.jailer_clone_pid
# First inflate the balloon to free up the uncertain amount of memory
# used by the kernel at boot and establish a baseline, then give back
# the memory.
# wait until boot completes:
test_microvm.ssh.run("true")
test_microvm.api.balloon.patch(amount_mib=200)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
test_microvm.api.balloon.patch(amount_mib=0)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Get the guest to dirty memory.
make_guest_dirty_memory(test_microvm.ssh, amount_mib=32)
first_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Now inflate the balloon.
test_microvm.api.balloon.patch(amount_mib=200)
second_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Now deflate the balloon.
test_microvm.api.balloon.patch(amount_mib=0)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Now have the guest dirty memory again.
make_guest_dirty_memory(test_microvm.ssh, amount_mib=32)
third_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Now inflate the balloon again.
test_microvm.api.balloon.patch(amount_mib=200)
fourth_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Check that the memory used is the same after regardless of the previous
# inflate history of the balloon (with the third reading being allowed
# to be smaller than the first, since memory allocated at booting up
# is probably freed after the first inflation.
assert (third_reading - first_reading) <= 20000
assert abs(second_reading - fourth_reading) <= 20000
# pylint: disable=C0103
def test_size_reduction(test_microvm_with_api):
"""
Verify that ballooning reduces RSS usage on a newly booted guest.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.add_net_iface()
# Add a memory balloon.
test_microvm.api.balloon.put(
amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=0
)
# Start the microvm.
test_microvm.start()
firecracker_pid = test_microvm.jailer_clone_pid
# Check memory usage.
first_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Have the guest drop its caches.
test_microvm.ssh.run("sync; echo 3 > /proc/sys/vm/drop_caches")
time.sleep(5)
# We take the initial reading of the RSS, then calculate the amount
# we need to inflate the balloon with by subtracting it from the
# VM size and adding an offset of 10 MiB in order to make sure we
# get a lower reading than the initial one.
inflate_size = 256 - int(first_reading / 1024) + 10
# Now inflate the balloon.
test_microvm.api.balloon.patch(amount_mib=inflate_size)
# Check memory usage again.
second_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# There should be a reduction of at least 10MB.
assert first_reading - second_reading >= 10000
# pylint: disable=C0103
def test_stats(test_microvm_with_api):
"""
Verify that balloon stats work as expected.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.add_net_iface()
# Add a memory balloon with stats enabled.
test_microvm.api.balloon.put(
amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=1
)
# Start the microvm.
test_microvm.start()
firecracker_pid = test_microvm.jailer_clone_pid
# Get an initial reading of the stats.
initial_stats = test_microvm.api.balloon_stats.get().json()
# Dirty 10MB of pages.
make_guest_dirty_memory(test_microvm.ssh, amount_mib=10)
time.sleep(1)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Make sure that the stats catch the page faults.
after_workload_stats = test_microvm.api.balloon_stats.get().json()
assert initial_stats.get("minor_faults", 0) < after_workload_stats["minor_faults"]
assert initial_stats.get("major_faults", 0) < after_workload_stats["major_faults"]
# Now inflate the balloon with 10MB of pages.
test_microvm.api.balloon.patch(amount_mib=10)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Get another reading of the stats after the polling interval has passed.
inflated_stats = test_microvm.api.balloon_stats.get().json()
# Ensure the stats reflect inflating the balloon.
assert after_workload_stats["free_memory"] > inflated_stats["free_memory"]
assert after_workload_stats["available_memory"] > inflated_stats["available_memory"]
# Deflate the balloon.check that the stats show the increase in
# available memory.
test_microvm.api.balloon.patch(amount_mib=0)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Get another reading of the stats after the polling interval has passed.
deflated_stats = test_microvm.api.balloon_stats.get().json()
# Ensure the stats reflect deflating the balloon.
assert inflated_stats["free_memory"] < deflated_stats["free_memory"]
assert inflated_stats["available_memory"] < deflated_stats["available_memory"]
def test_stats_update(test_microvm_with_api):
"""
Verify that balloon stats update correctly.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.add_net_iface()
# Add a memory balloon with stats enabled.
test_microvm.api.balloon.put(
amount_mib=0,
deflate_on_oom=True,
stats_polling_interval_s=STATS_POLLING_INTERVAL_S,
)
# Start the microvm.
test_microvm.start()
firecracker_pid = test_microvm.jailer_clone_pid
# Dirty 30MB of pages.
make_guest_dirty_memory(test_microvm.ssh, amount_mib=30)
# This call will internally wait for rss to become stable.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Get an initial reading of the stats.
initial_stats = test_microvm.api.balloon_stats.get().json()
# Inflate the balloon to trigger a change in the stats.
test_microvm.api.balloon.patch(amount_mib=10)
# Wait out the polling interval, then get the updated stats.
time.sleep(STATS_POLLING_INTERVAL_S)
next_stats = test_microvm.api.balloon_stats.get().json()
assert initial_stats["available_memory"] != next_stats["available_memory"]
# Inflate the balloon more to trigger a change in the stats.
test_microvm.api.balloon.patch(amount_mib=30)
# Change the polling interval.
test_microvm.api.balloon_stats.patch(stats_polling_interval_s=60)
# The polling interval change should update the stats.
final_stats = test_microvm.api.balloon_stats.get().json()
assert next_stats["available_memory"] != final_stats["available_memory"]
def test_balloon_snapshot(microvm_factory, guest_kernel, rootfs):
"""
Test that the balloon works after pause/resume.
"""
vm = microvm_factory.build(guest_kernel, rootfs)
vm.spawn()
vm.basic_config(
vcpu_count=2,
mem_size_mib=256,
)
vm.add_net_iface()
# Add a memory balloon with stats enabled.
vm.api.balloon.put(
amount_mib=0,
deflate_on_oom=True,
stats_polling_interval_s=STATS_POLLING_INTERVAL_S,
)
vm.start()
# Dirty 60MB of pages.
make_guest_dirty_memory(vm.ssh, amount_mib=60)
time.sleep(1)
# Get the firecracker pid, and open an ssh connection.
firecracker_pid = vm.jailer_clone_pid
# Check memory usage.
first_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Now inflate the balloon with 20MB of pages.
vm.api.balloon.patch(amount_mib=20)
# Check memory usage again.
second_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# There should be a reduction in RSS, but it's inconsistent.
# We only test that the reduction happens.
assert first_reading > second_reading
snapshot = vm.snapshot_full()
microvm = microvm_factory.build()
microvm.spawn()
microvm.restore_from_snapshot(snapshot)
microvm.resume()
# Attempt to connect to resumed microvm.
microvm.ssh.run("true")
# Get the firecracker from snapshot pid, and open an ssh connection.
firecracker_pid = microvm.jailer_clone_pid
# Wait out the polling interval, then get the updated stats.
time.sleep(STATS_POLLING_INTERVAL_S)
stats_after_snap = microvm.api.balloon_stats.get().json()
# Check memory usage.
third_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# Dirty 60MB of pages.
make_guest_dirty_memory(microvm.ssh, amount_mib=60)
# Check memory usage.
fourth_reading = get_stable_rss_mem_by_pid(firecracker_pid)
assert fourth_reading > third_reading
# Inflate the balloon with another 20MB of pages.
microvm.api.balloon.patch(amount_mib=40)
fifth_reading = get_stable_rss_mem_by_pid(firecracker_pid)
# There should be a reduction in RSS, but it's inconsistent.
# We only test that the reduction happens.
assert fourth_reading > fifth_reading
# Get the stats after we take a snapshot and dirty some memory,
# then reclaim it.
latest_stats = microvm.api.balloon_stats.get().json()
# Ensure the stats are still working after restore and show
# that the balloon inflated.
assert stats_after_snap["available_memory"] > latest_stats["available_memory"]
def test_snapshot_compatibility(microvm_factory, guest_kernel, rootfs):
"""
Test that the balloon serializes correctly.
"""
vm = microvm_factory.build(guest_kernel, rootfs)
vm.spawn()
vm.basic_config(
vcpu_count=2,
mem_size_mib=256,
)
# Add a memory balloon with stats enabled.
vm.api.balloon.put(amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=1)
vm.start()
vm.snapshot_full()
def test_memory_scrub(microvm_factory, guest_kernel, rootfs):
"""
Test that the memory is zeroed after deflate.
"""
microvm = microvm_factory.build(guest_kernel, rootfs)
microvm.spawn()
microvm.basic_config(vcpu_count=2, mem_size_mib=256)
microvm.add_net_iface()
# Add a memory balloon with stats enabled.
microvm.api.balloon.put(
amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=1
)
microvm.start()
# Dirty 60MB of pages.
make_guest_dirty_memory(microvm.ssh, amount_mib=60)
# Now inflate the balloon with 60MB of pages.
microvm.api.balloon.patch(amount_mib=60)
# Get the firecracker pid, and open an ssh connection.
firecracker_pid = microvm.jailer_clone_pid
# Wait for the inflate to complete.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
# Deflate the balloon completely.
microvm.api.balloon.patch(amount_mib=0)
# Wait for the deflate to complete.
_ = get_stable_rss_mem_by_pid(firecracker_pid)
exit_code, _, _ = microvm.ssh.run("/usr/local/bin/readmem {} {}".format(60, 1))
assert exit_code == 0
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,908
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_block_performance.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Performance benchmark for block device emulation."""
import concurrent
import json
import os
import shutil
from enum import Enum
from pathlib import Path
import pytest
import framework.stats as st
import host_tools.drive as drive_tools
from framework.stats.baseline import Provider as BaselineProvider
from framework.stats.metadata import DictProvider as DictMetadataProvider
from framework.utils import (
CmdBuilder,
get_cpu_percent,
get_kernel_version,
run_cmd,
summarize_cpu_percent,
)
from integration_tests.performance.configs import defs
TEST_ID = "block_performance"
kernel_version = get_kernel_version(level=1)
CONFIG_NAME_REL = "test_{}_config_{}.json".format(TEST_ID, kernel_version)
CONFIG_NAME_ABS = defs.CFG_LOCATION / CONFIG_NAME_REL
FIO = "fio"
# Measurements tags.
CPU_UTILIZATION_VMM = "cpu_utilization_vmm"
CPU_UTILIZATION_VMM_SAMPLES_TAG = "cpu_utilization_vmm_samples"
CPU_UTILIZATION_VCPUS_TOTAL = "cpu_utilization_vcpus_total"
# size of the block device used in the test, in MB
BLOCK_DEVICE_SIZE_MB = 2048
# How many fio workloads should be spawned per vcpu
LOAD_FACTOR = 1
# Time (in seconds) for which fio "warms up"
WARMUP_SEC = 10
# Time (in seconds) for which fio runs after warmup is done
RUNTIME_SEC = 300
# pylint: disable=R0903
class BlockBaselinesProvider(BaselineProvider):
"""Implementation of a baseline provider for the block performance test."""
def __init__(self, env_id, fio_id, raw_baselines):
"""Block baseline provider initialization."""
super().__init__(raw_baselines)
self._tag = "baselines/{}/" + env_id + "/{}/" + fio_id
def get(self, metric_name: str, statistic_name: str) -> dict:
"""Return the baseline value corresponding to the key."""
key = self._tag.format(metric_name, statistic_name)
baseline = self._baselines.get(key)
if baseline:
target = baseline.get("target")
delta_percentage = baseline.get("delta_percentage")
return {
"target": target,
"delta": delta_percentage * target / 100,
}
return None
def run_fio(env_id, basevm, mode, bs):
"""Run a fio test in the specified mode with block size bs."""
logs_path = f"{basevm.jailer.chroot_base_with_id()}/{env_id}/{mode}{bs}"
# Compute the fio command. Pin it to the first guest CPU.
cmd = (
CmdBuilder(FIO)
.with_arg(f"--name={mode}-{bs}")
.with_arg(f"--rw={mode}")
.with_arg(f"--bs={bs}")
.with_arg("--filename=/dev/vdb")
.with_arg("--time_base=1")
.with_arg(f"--size={BLOCK_DEVICE_SIZE_MB}M")
.with_arg("--direct=1")
.with_arg("--ioengine=libaio")
.with_arg("--iodepth=32")
.with_arg(f"--ramp_time={WARMUP_SEC}")
.with_arg(f"--numjobs={basevm.vcpus_count}")
# Set affinity of the entire fio process to a set of vCPUs equal in size to number of workers
.with_arg(
f"--cpus_allowed={','.join(str(i) for i in range(basevm.vcpus_count))}"
)
# Instruct fio to pin one worker per vcpu
.with_arg("--cpus_allowed_policy=split")
.with_arg("--randrepeat=0")
.with_arg(f"--runtime={RUNTIME_SEC}")
.with_arg(f"--write_bw_log={mode}{bs}")
.with_arg("--log_avg_msec=1000")
.with_arg("--output-format=json+")
.build()
)
rc, _, stderr = basevm.ssh.run("echo 'none' > /sys/block/vdb/queue/scheduler")
assert rc == 0, stderr
assert stderr == ""
# First, flush all guest cached data to host, then drop guest FS caches.
rc, _, stderr = basevm.ssh.run("sync")
assert rc == 0, stderr
assert stderr == ""
rc, _, stderr = basevm.ssh.run("echo 3 > /proc/sys/vm/drop_caches")
assert rc == 0, stderr
assert stderr == ""
# Then, flush all host cached data to hardware, also drop host FS caches.
run_cmd("sync")
run_cmd("echo 3 > /proc/sys/vm/drop_caches")
# Start the CPU load monitor.
with concurrent.futures.ThreadPoolExecutor() as executor:
cpu_load_future = executor.submit(
get_cpu_percent,
basevm.jailer_clone_pid,
RUNTIME_SEC,
omit=WARMUP_SEC,
)
# Print the fio command in the log and run it
rc, _, stderr = basevm.ssh.run(f"cd /tmp; {cmd}")
assert rc == 0, stderr
assert stderr == ""
if os.path.isdir(logs_path):
shutil.rmtree(logs_path)
os.makedirs(logs_path)
basevm.ssh.scp_get("/tmp/*.log", logs_path)
rc, _, stderr = basevm.ssh.run("rm /tmp/*.log")
assert rc == 0, stderr
return cpu_load_future.result()
class DataDirection(Enum):
"""Operation type."""
READ = 0
WRITE = 1
TRIM = 2
def __str__(self):
"""Representation as string."""
# pylint: disable=W0143
if self.value == 0:
return "read"
# pylint: disable=W0143
if self.value == 1:
return "write"
# pylint: disable=W0143
if self.value == 2:
return "trim"
return ""
def read_values(cons, numjobs, env_id, mode, bs, measurement, logs_path):
"""Read the values for each measurement.
The values are logged once every second. The time resolution is in msec.
The log file format documentation can be found here:
https://fio.readthedocs.io/en/latest/fio_doc.html#log-file-formats
"""
values = {}
for job_id in range(numjobs):
file_path = (
Path(logs_path)
/ env_id
/ f"{mode}{bs}"
/ f"{mode}{bs}_{measurement}.{job_id + 1}.log"
)
lines = file_path.read_text(encoding="utf-8").splitlines()
direction_count = 1
for idx in range(0, len(lines), direction_count):
value_idx = idx // direction_count
for direction in range(direction_count):
data = lines[idx + direction].split(sep=",")
data_dir = DataDirection(int(data[2].strip()))
measurement_id = f"{measurement}_{str(data_dir)}"
if measurement_id not in values:
values[measurement_id] = {}
if value_idx not in values[measurement_id]:
values[measurement_id][value_idx] = []
values[measurement_id][value_idx].append(int(data[1].strip()))
for measurement_id, data in values.items():
for time in data:
# Discard data points which were not measured by all jobs.
if len(data[time]) != numjobs:
continue
yield from [
(f"{measurement_id}_{vcpu}", throughput, "Megabits/Second")
for vcpu, throughput in enumerate(data[time])
]
value = sum(data[time])
cons.consume_data(measurement_id, value)
def consume_fio_output(cons, cpu_load, numjobs, mode, bs, env_id, logs_path):
"""Consumer function."""
vmm_util, vcpu_util = summarize_cpu_percent(cpu_load)
cons.consume_stat("Avg", CPU_UTILIZATION_VMM, vmm_util)
cons.consume_stat("Avg", CPU_UTILIZATION_VCPUS_TOTAL, vcpu_util)
for thread_name, data in cpu_load.items():
yield from [
(f"cpu_utilization_{thread_name}", x, "Percent")
for x in list(data.values())[0]
]
yield from read_values(cons, numjobs, env_id, mode, bs, "bw", logs_path)
@pytest.mark.nonci
@pytest.mark.timeout(RUNTIME_SEC * 1000) # 1.40 hours
@pytest.mark.parametrize("vcpus", [1, 2], ids=["1vcpu", "2vcpu"])
@pytest.mark.parametrize("fio_mode", ["randread", "randwrite"])
@pytest.mark.parametrize("fio_block_size", [4096], ids=["bs4096"])
def test_block_performance(
microvm_factory,
guest_kernel,
rootfs,
vcpus,
fio_mode,
fio_block_size,
io_engine,
st_core,
):
"""
Execute block device emulation benchmarking scenarios.
"""
guest_mem_mib = 1024
vm = microvm_factory.build(guest_kernel, rootfs, monitor_memory=False)
vm.spawn(log_level="Info")
vm.basic_config(vcpu_count=vcpus, mem_size_mib=guest_mem_mib)
vm.add_net_iface()
# Add a secondary block device for benchmark tests.
fs = drive_tools.FilesystemFile(
os.path.join(vm.fsfiles, "scratch"), BLOCK_DEVICE_SIZE_MB
)
vm.add_drive("scratch", fs.path, io_engine=io_engine)
vm.start()
# Get names of threads in Firecracker.
current_cpu_id = 0
vm.pin_vmm(current_cpu_id)
current_cpu_id += 1
vm.pin_api(current_cpu_id)
for vcpu_id in range(vm.vcpus_count):
current_cpu_id += 1
vm.pin_vcpu(vcpu_id, current_cpu_id)
# define test dimensions
st_core.name = TEST_ID
microvm_cfg = f"{vcpus}vcpu_{guest_mem_mib}mb.json"
st_core.custom.update(
{
"guest_config": microvm_cfg.removesuffix(".json"),
"io_engine": io_engine,
}
)
env_id = f"{st_core.env_id_prefix}/{io_engine.lower()}_{microvm_cfg}"
fio_id = f"{fio_mode}-bs{fio_block_size}"
st_prod = st.producer.LambdaProducer(
func=run_fio,
func_kwargs={
"env_id": env_id,
"basevm": vm,
"mode": fio_mode,
"bs": fio_block_size,
},
)
raw_baselines = json.loads(CONFIG_NAME_ABS.read_text("utf-8"))
st_cons = st.consumer.LambdaConsumer(
metadata_provider=DictMetadataProvider(
raw_baselines["measurements"],
BlockBaselinesProvider(env_id, fio_id, raw_baselines),
),
func=consume_fio_output,
func_kwargs={
"numjobs": vm.vcpus_count,
"mode": fio_mode,
"bs": fio_block_size,
"env_id": env_id,
"logs_path": vm.jailer.chroot_base_with_id(),
},
)
st_core.add_pipe(st_prod, st_cons, tag=f"{env_id}/{fio_id}")
# Gather results and verify pass criteria.
st_core.run_exercise()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,909
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_cpu_features_aarch64.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the CPU features for aarch64."""
import platform
import re
import pytest
import framework.utils_cpuid as cpuid_utils
from framework.utils_cpu_templates import nonci_on_arm
PLATFORM = platform.machine()
DEFAULT_G2_FEATURES = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm lrcpc dcpop asimddp ssbs"
)
DEFAULT_G2_FEATURES_NO_SSBS = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm lrcpc dcpop asimddp"
)
DEFAULT_G3_FEATURES_4_14 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm ssbs"
)
DEFAULT_G3_FEATURES_5_10 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm ssbs dcpodp i8mm bf16 dgh rng"
)
DEFAULT_G3_FEATURES_NO_SSBS_4_14 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm dcpodp i8mm bf16 dgh rng"
)
DEFAULT_G3_FEATURES_WITH_SVE_AND_PAC_4_14 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm ssbs"
)
DEFAULT_G3_FEATURES_NO_SSBS_4_14 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm"
)
DEFAULT_G3_FEATURES_NO_SSBS_5_10 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm dcpodp i8mm bf16 dgh rng"
)
DEFAULT_G3_FEATURES_WITH_SVE_AND_PAC_5_10 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 sve asimdfhm dit uscat ilrcpc flagm ssbs paca pacg dcpodp svei8mm svebf16 i8mm bf16 dgh rng"
)
DEFAULT_G3_FEATURES_V1N1 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm lrcpc dcpop asimddp ssbs"
)
def _check_cpu_features_arm(test_microvm, guest_kv, template_name=None):
expected_cpu_features = {"Flags": []}
match (cpuid_utils.get_instance_type(), guest_kv, template_name):
case ("m6g.metal", _, "aarch64_remove_ssbs"):
expected_cpu_features["Flags"] = DEFAULT_G2_FEATURES_NO_SSBS
case ("m6g.metal", _, "aarch64_v1n1"):
expected_cpu_features["Flags"] = DEFAULT_G2_FEATURES
case ("m6g.metal", _, None):
expected_cpu_features["Flags"] = DEFAULT_G2_FEATURES
case ("c7g.metal", "4.14", "aarch64_remove_ssbs"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_NO_SSBS_4_14
case ("c7g.metal", "5.10", "aarch64_remove_ssbs"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_NO_SSBS_5_10
case ("c7g.metal", "4.14", "aarch64_with_sve_and_pac"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_WITH_SVE_AND_PAC_4_14
case ("c7g.metal", "5.10", "aarch64_with_sve_and_pac"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_WITH_SVE_AND_PAC_5_10
case ("c7g.metal", _, "aarch64_v1n1"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_V1N1
case ("c7g.metal", "4.14", None):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_4_14
case ("c7g.metal", "5.10", None):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_5_10
cpuid_utils.check_guest_cpuid_output(
test_microvm, "lscpu", None, ":", expected_cpu_features
)
def get_cpu_template_dir(cpu_template):
"""
Utility function to return a valid string which will be used as
name of the directory where snapshot artifacts are stored during
snapshot test and loaded from during restore test.
"""
return cpu_template if cpu_template else "none"
@pytest.mark.skipif(
PLATFORM != "aarch64",
reason="This is aarch64 specific test.",
)
def test_default_cpu_features(microvm_factory, guest_kernel, rootfs_ubuntu_22):
"""
Check the CPU features for a microvm with the specified config.
"""
vm = microvm_factory.build(guest_kernel, rootfs_ubuntu_22, monitor_memory=False)
vm.spawn()
vm.basic_config()
vm.add_net_iface()
vm.start()
guest_kv = re.search(r"vmlinux-(\d+\.\d+)", guest_kernel.name).group(1)
_check_cpu_features_arm(vm, guest_kv)
@pytest.mark.skipif(
PLATFORM != "aarch64",
reason="This is aarch64 specific test.",
)
@nonci_on_arm
def test_cpu_features_with_static_template(
microvm_factory, guest_kernel, rootfs_ubuntu_22, cpu_template
):
"""
Check the CPU features for a microvm with the specified config.
"""
vm = microvm_factory.build(guest_kernel, rootfs_ubuntu_22, monitor_memory=False)
vm.spawn()
vm.basic_config(cpu_template=cpu_template)
vm.add_net_iface()
vm.start()
guest_kv = re.search(r"vmlinux-(\d+\.\d+)", guest_kernel.name).group(1)
_check_cpu_features_arm(vm, guest_kv, "aarch64_v1n1")
@pytest.mark.skipif(
PLATFORM != "aarch64",
reason="This is aarch64 specific test.",
)
@nonci_on_arm
def test_cpu_features_with_custom_template(
microvm_factory, guest_kernel, rootfs_ubuntu_22, custom_cpu_template
):
"""
Check the CPU features for a microvm with the specified config.
"""
vm = microvm_factory.build(guest_kernel, rootfs_ubuntu_22, monitor_memory=False)
vm.spawn()
vm.basic_config()
vm.api.cpu_config.put(**custom_cpu_template["template"])
vm.add_net_iface()
vm.start()
guest_kv = re.search(r"vmlinux-(\d+\.\d+)", guest_kernel.name).group(1)
_check_cpu_features_arm(vm, guest_kv, custom_cpu_template["name"])
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,910
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/baseline.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Module for common statistic tests baselines providers."""
from abc import ABC, abstractmethod
from collections import defaultdict
from framework.properties import global_props
from framework.utils import DictQuery
# pylint: disable=R0903
class Provider(ABC):
"""Baselines provider abstract class."""
def __init__(self, raw_baselines: dict):
"""Block baseline provider initialization."""
self._baselines = DictQuery(read_baseline(raw_baselines))
@abstractmethod
def get(self, metric_name: str, statistic_name: str) -> dict:
"""
Return the baselines corresponding to given metric (e.g. 'vmm_cpu_utilization') and statistic (e.g. 'Avg') combination.
"""
def read_baseline(data: dict):
"""
Read baseline data from a dictionary
"""
baselines = defaultdict(dict)
for instance, cpus in data["hosts"]["instances"].items():
for cpu in cpus["cpus"]:
cpu_model = cpu["model"]
for baseline, val in cpu["baselines"].items():
baselines[instance, cpu_model][baseline] = val
return {
"baselines": baselines.get((global_props.instance, global_props.cpu_model)),
"model": global_props.cpu_model,
}
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,911
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_initrd.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for initrd."""
from framework.microvm import Serial
INITRD_FILESYSTEM = "rootfs"
def test_microvm_initrd_with_serial(uvm_with_initrd):
"""
Test that a boot using initrd successfully loads the root filesystem.
"""
vm = uvm_with_initrd
vm.jailer.daemonize = False
vm.spawn()
vm.memory_monitor = None
vm.basic_config(
add_root_device=False,
vcpu_count=1,
boot_args="console=ttyS0 reboot=k panic=1 pci=off",
use_initrd=True,
)
vm.start()
serial = Serial(vm)
serial.open()
serial.rx(token="# ")
serial.tx("mount |grep rootfs")
serial.rx(token=f"rootfs on / type {INITRD_FILESYSTEM}")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,912
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/utils_imdsv2.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""A simple IMDSv2 client
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-categories.html
Important! For this client to work in a container scenario, make sure your
instances are set with an adequate hop limit (2 for example). See
`ec2:MetadataHttpPutResponseHopLimit`
"""
import time
import requests
IMDSV2_HDR_TOKEN_TTL = "X-aws-ec2-metadata-token-ttl-seconds"
IMDSV2_HDR_TOKEN = "X-aws-ec2-metadata-token"
class IMDSv2Client:
"""
A simple IMDSv2 client.
>>> IMDSv2Client().get("/meta-data/instance-type")
"""
def __init__(self, endpoint="http://169.254.169.254", version="latest"):
self.endpoint = endpoint
self.version = version
self.ttl = 21600
self.token_expiry_time = 0
self.token = None
def get_token(self):
"""Get a token from IMDSv2"""
if self.token_expiry_time < time.time():
headers = {IMDSV2_HDR_TOKEN_TTL: str(self.ttl)}
# To get a token, docs say to always use latest
url = f"{self.endpoint}/latest/api/token"
res = requests.put(url, headers=headers, timeout=2)
self.token = res.content
self.token_expiry_time = time.time() + self.ttl
return self.token
def get(self, path):
"""
Get a metadata path from IMDSv2
>>> IMDSv2Client().get("/meta-data/instance-type")
>>> m5d.metal
"""
headers = {IMDSV2_HDR_TOKEN: self.get_token()}
url = f"{self.endpoint}/{self.version}{path}"
res = requests.get(url, headers=headers, timeout=2)
if res.status_code != 200:
raise Exception(f"IMDSv2 returned {res.status_code} for {url}")
return res.text
IMDS_V2 = IMDSv2Client()
imdsv2_get = IMDS_V2.get
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,913
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_snapshot_editor.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for snapshot-editor tool."""
import platform
import pytest
import host_tools.cargo_build as host
from framework import utils
PLATFORM = platform.machine()
MIDR_EL1 = hex(0x603000000013C000)
@pytest.mark.skipif(
PLATFORM != "aarch64",
reason="This is aarch64 specific test.",
)
def test_remove_regs(uvm_nano, microvm_factory):
"""
This test verifies `remove-regs` method of `snapshot-editor`.
Here we create snapshot and try to romeve MIDR_EL1 register
from it. Then we try to restore uVM from the snapshot.
"""
vm = uvm_nano
vm.add_net_iface()
vm.start()
snapshot = vm.snapshot_full()
snap_editor = host.get_binary("snapshot-editor")
# Test that MIDR_EL1 is in the snapshot
cmd = [
str(snap_editor),
"info-vmstate",
"vcpu-states",
"--vmstate-path",
str(snapshot.vmstate),
]
_, stdout, _ = utils.run_cmd(cmd)
assert MIDR_EL1 in stdout
# Remove MIDR_EL1 register from the snapshot
cmd = [
str(snap_editor),
"edit-vmstate",
"remove-regs",
"--vmstate-path",
str(snapshot.vmstate),
"--output-path",
str(snapshot.vmstate),
str(MIDR_EL1),
]
utils.run_cmd(cmd)
# Test that MIDR_EL1 is not in the snapshot
cmd = [
str(snap_editor),
"info-vmstate",
"vcpu-states",
"--vmstate-path",
str(snapshot.vmstate),
]
_, stdout, _ = utils.run_cmd(cmd)
assert MIDR_EL1 not in stdout
# test that we can restore from a snapshot
new_vm = microvm_factory.build()
new_vm.spawn()
new_vm.restore_from_snapshot(snapshot, resume=True)
# Attempt to connect to resumed microvm.
# Verify if guest can run commands.
exit_code, _, _ = new_vm.ssh.run("ls")
assert exit_code == 0
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,914
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_mmds.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that verify MMDS related functionality."""
# pylint: disable=too-many-lines
import random
import string
import time
import pytest
from framework.properties import global_props
from framework.utils import (
configure_mmds,
generate_mmds_get_request,
generate_mmds_session_token,
populate_data_store,
run_guest_cmd,
)
# Minimum lifetime of token.
MIN_TOKEN_TTL_SECONDS = 1
# Maximum lifetime of token.
MAX_TOKEN_TTL_SECONDS = 21600
# Default IPv4 value for MMDS.
DEFAULT_IPV4 = "169.254.169.254"
# MMDS versions supported.
MMDS_VERSIONS = ["V2", "V1"]
def _validate_mmds_snapshot(
basevm,
microvm_factory,
version,
target_fc_version=None,
fc_binary_path=None,
jailer_binary_path=None,
):
"""Test MMDS behaviour across snap-restore."""
ipv4_address = "169.254.169.250"
# Configure MMDS version with custom IPv4 address.
configure_mmds(
basevm,
version=version,
iface_ids=["eth0"],
ipv4_address=ipv4_address,
fc_version=target_fc_version,
)
expected_mmds_config = {
"version": version,
"ipv4_address": ipv4_address,
"network_interfaces": ["eth0"],
}
response = basevm.api.vm_config.get()
assert response.json()["mmds-config"] == expected_mmds_config
data_store = {"latest": {"meta-data": {"ami-id": "ami-12345678"}}}
populate_data_store(basevm, data_store)
basevm.start()
ssh_connection = basevm.ssh
run_guest_cmd(ssh_connection, f"ip route add {ipv4_address} dev eth0", "")
# Generate token if needed.
token = None
if version == "V2":
token = generate_mmds_session_token(ssh_connection, ipv4_address, token_ttl=60)
# Fetch metadata.
cmd = generate_mmds_get_request(
ipv4_address,
token=token,
)
run_guest_cmd(ssh_connection, cmd, data_store, use_json=True)
# Create snapshot.
snapshot = basevm.snapshot_full(target_version=target_fc_version)
# Resume microVM and ensure session token is still valid on the base.
response = basevm.resume()
# Fetch metadata again using the same token.
run_guest_cmd(ssh_connection, cmd, data_store, use_json=True)
# Kill base microVM.
basevm.kill()
# Load microVM clone from snapshot.
microvm = microvm_factory.build(
fc_binary_path=fc_binary_path, jailer_binary_path=jailer_binary_path
)
microvm.spawn()
microvm.restore_from_snapshot(snapshot)
microvm.resume()
ssh_connection = microvm.ssh
# Check the reported MMDS config.
response = microvm.api.vm_config.get()
assert response.json()["mmds-config"] == expected_mmds_config
if version == "V1":
# Verify that V2 requests don't work
assert (
generate_mmds_session_token(ssh_connection, ipv4_address, token_ttl=60)
== "Not allowed HTTP method."
)
token = None
else:
# Attempting to reuse the token across a restore must fail.
cmd = generate_mmds_get_request(ipv4_address, token=token)
run_guest_cmd(ssh_connection, cmd, "MMDS token not valid.")
# Generate token.
token = generate_mmds_session_token(ssh_connection, ipv4_address, token_ttl=60)
# Data store is empty after a restore.
cmd = generate_mmds_get_request(ipv4_address, token=token)
run_guest_cmd(ssh_connection, cmd, "null")
# Now populate the store.
populate_data_store(microvm, data_store)
# Fetch metadata.
run_guest_cmd(ssh_connection, cmd, data_store, use_json=True)
@pytest.mark.parametrize("version", MMDS_VERSIONS)
def test_custom_ipv4(test_microvm_with_api, version):
"""
Test the API for MMDS custom ipv4 support.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
data_store = {
"latest": {
"meta-data": {
"ami-id": "ami-12345678",
"reservation-id": "r-fea54097",
"local-hostname": "ip-10-251-50-12.ec2.internal",
"public-hostname": "ec2-203-0-113-25.compute-1.amazonaws.com",
"network": {
"interfaces": {
"macs": {
"02:29:96:8f:6a:2d": {
"device-number": "13345342",
"local-hostname": "localhost",
"subnet-id": "subnet-be9b61d",
}
}
}
},
}
}
}
populate_data_store(test_microvm, data_store)
# Attach network device.
test_microvm.add_net_iface()
# Invalid values IPv4 address.
with pytest.raises(RuntimeError):
test_microvm.api.mmds_config.put(ipv4_address="", network_interfaces=["eth0"])
with pytest.raises(RuntimeError):
test_microvm.api.mmds_config.put(
ipv4_address="1.1.1.1", network_interfaces=["eth0"]
)
ipv4_address = "169.254.169.250"
# Configure MMDS with custom IPv4 address.
configure_mmds(
test_microvm, iface_ids=["eth0"], version=version, ipv4_address=ipv4_address
)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = test_microvm.ssh
run_guest_cmd(ssh_connection, f"ip route add {ipv4_address} dev eth0", "")
token = None
if version == "V2":
# Generate token.
token = generate_mmds_session_token(ssh_connection, ipv4_address, token_ttl=60)
pre = generate_mmds_get_request(
ipv4_address,
token=token,
)
cmd = pre + "latest/meta-data/ami-id"
run_guest_cmd(ssh_connection, cmd, "ami-12345678", use_json=True)
# The request is still valid if we append a
# trailing slash to a leaf node.
cmd = pre + "latest/meta-data/ami-id/"
run_guest_cmd(ssh_connection, cmd, "ami-12345678", use_json=True)
cmd = (
pre + "latest/meta-data/network/interfaces/macs/" "02:29:96:8f:6a:2d/subnet-id"
)
run_guest_cmd(ssh_connection, cmd, "subnet-be9b61d", use_json=True)
# Test reading a non-leaf node WITHOUT a trailing slash.
cmd = pre + "latest/meta-data"
run_guest_cmd(ssh_connection, cmd, data_store["latest"]["meta-data"], use_json=True)
# Test reading a non-leaf node with a trailing slash.
cmd = pre + "latest/meta-data/"
run_guest_cmd(ssh_connection, cmd, data_store["latest"]["meta-data"], use_json=True)
@pytest.mark.parametrize("version", MMDS_VERSIONS)
def test_json_response(test_microvm_with_api, version):
"""
Test the MMDS json response.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
data_store = {
"latest": {
"meta-data": {
"ami-id": "ami-12345678",
"reservation-id": "r-fea54097",
"local-hostname": "ip-10-251-50-12.ec2.internal",
"public-hostname": "ec2-203-0-113-25.compute-1.amazonaws.com",
"dummy_res": ["res1", "res2"],
},
"Limits": {"CPU": 512, "Memory": 512},
"Usage": {"CPU": 12.12},
}
}
# Attach network device.
test_microvm.add_net_iface()
# Configure MMDS version.
configure_mmds(test_microvm, iface_ids=["eth0"], version=version)
# Populate data store with contents.
populate_data_store(test_microvm, data_store)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = test_microvm.ssh
cmd = "ip route add {} dev eth0".format(DEFAULT_IPV4)
run_guest_cmd(ssh_connection, cmd, "")
token = None
if version == "V2":
# Generate token.
token = generate_mmds_session_token(ssh_connection, DEFAULT_IPV4, token_ttl=60)
pre = generate_mmds_get_request(DEFAULT_IPV4, token)
cmd = pre + "latest/meta-data/"
run_guest_cmd(ssh_connection, cmd, data_store["latest"]["meta-data"], use_json=True)
cmd = pre + "latest/meta-data/ami-id/"
run_guest_cmd(ssh_connection, cmd, "ami-12345678", use_json=True)
cmd = pre + "latest/meta-data/dummy_res/0"
run_guest_cmd(ssh_connection, cmd, "res1", use_json=True)
cmd = pre + "latest/Usage/CPU"
run_guest_cmd(ssh_connection, cmd, 12.12, use_json=True)
cmd = pre + "latest/Limits/CPU"
run_guest_cmd(ssh_connection, cmd, 512, use_json=True)
@pytest.mark.parametrize("version", MMDS_VERSIONS)
def test_mmds_response(test_microvm_with_api, version):
"""
Test MMDS responses to various datastore requests.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
data_store = {
"latest": {
"meta-data": {
"ami-id": "ami-12345678",
"reservation-id": "r-fea54097",
"local-hostname": "ip-10-251-50-12.ec2.internal",
"public-hostname": "ec2-203-0-113-25.compute-1.amazonaws.com",
"dummy_obj": {
"res_key": "res_value",
},
"dummy_array": ["arr_val1", "arr_val2"],
},
"Limits": {"CPU": 512, "Memory": 512},
"Usage": {"CPU": 12.12},
}
}
# Attach network device.
test_microvm.add_net_iface()
# Configure MMDS version.
configure_mmds(test_microvm, iface_ids=["eth0"], version=version)
# Populate data store with contents.
populate_data_store(test_microvm, data_store)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = test_microvm.ssh
cmd = "ip route add {} dev eth0".format(DEFAULT_IPV4)
run_guest_cmd(ssh_connection, cmd, "")
token = None
if version == "V2":
# Generate token.
token = generate_mmds_session_token(ssh_connection, DEFAULT_IPV4, token_ttl=60)
pre = generate_mmds_get_request(DEFAULT_IPV4, token=token, app_json=False)
cmd = pre + "latest/meta-data/"
expected = (
"ami-id\n"
"dummy_array\n"
"dummy_obj/\n"
"local-hostname\n"
"public-hostname\n"
"reservation-id"
)
run_guest_cmd(ssh_connection, cmd, expected)
cmd = pre + "latest/meta-data/ami-id/"
run_guest_cmd(ssh_connection, cmd, "ami-12345678")
cmd = pre + "latest/meta-data/dummy_array/0"
run_guest_cmd(ssh_connection, cmd, "arr_val1")
cmd = pre + "latest/Usage/CPU"
run_guest_cmd(
ssh_connection,
cmd,
"Cannot retrieve value. The value has" " an unsupported type.",
)
cmd = pre + "latest/Limits/CPU"
run_guest_cmd(
ssh_connection,
cmd,
"Cannot retrieve value. The value has" " an unsupported type.",
)
@pytest.mark.parametrize("version", MMDS_VERSIONS)
def test_larger_than_mss_payloads(test_microvm_with_api, version):
"""
Test MMDS content for payloads larger than MSS.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
test_microvm.add_net_iface()
# Configure MMDS version.
configure_mmds(test_microvm, iface_ids=["eth0"], version=version)
# The MMDS is empty at this point.
response = test_microvm.api.mmds.get()
assert response.json() == {}
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
# Make sure MTU is 1500 bytes.
ssh_connection = test_microvm.ssh
run_guest_cmd(ssh_connection, "ip link set dev eth0 mtu 1500", "")
cmd = 'ip a s eth0 | grep -i mtu | tr -s " " | cut -d " " -f 4,5'
run_guest_cmd(ssh_connection, cmd, "mtu 1500\n")
# These values are usually used by booted up guest network interfaces.
mtu = 1500
ipv4_packet_headers_len = 20
tcp_segment_headers_len = 20
mss = mtu - ipv4_packet_headers_len - tcp_segment_headers_len
# Generate a random MMDS content, double of MSS.
letters = string.ascii_lowercase
larger_than_mss = "".join(random.choice(letters) for i in range(2 * mss))
mss_equal = "".join(random.choice(letters) for i in range(mss))
lower_than_mss = "".join(random.choice(letters) for i in range(mss - 2))
data_store = {
"larger_than_mss": larger_than_mss,
"mss_equal": mss_equal,
"lower_than_mss": lower_than_mss,
}
test_microvm.api.mmds.put(**data_store)
response = test_microvm.api.mmds.get()
assert response.json() == data_store
run_guest_cmd(ssh_connection, f"ip route add {DEFAULT_IPV4} dev eth0", "")
token = None
if version == "V2":
# Generate token.
token = generate_mmds_session_token(ssh_connection, DEFAULT_IPV4, token_ttl=60)
pre = generate_mmds_get_request(DEFAULT_IPV4, token=token, app_json=False)
cmd = pre + "larger_than_mss"
run_guest_cmd(ssh_connection, cmd, larger_than_mss)
cmd = pre + "mss_equal"
run_guest_cmd(ssh_connection, cmd, mss_equal)
cmd = pre + "lower_than_mss"
run_guest_cmd(ssh_connection, cmd, lower_than_mss)
@pytest.mark.parametrize("version", MMDS_VERSIONS)
def test_mmds_dummy(test_microvm_with_api, version):
"""
Test the API and guest facing features of the microVM MetaData Service.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
test_microvm.add_net_iface()
# Configure MMDS version.
configure_mmds(test_microvm, iface_ids=["eth0"], version=version)
# The MMDS is empty at this point.
response = test_microvm.api.mmds.get()
assert response.json() == {}
# Test that patch return NotInitialized when the MMDS is not initialized.
dummy_json = {"latest": {"meta-data": {"ami-id": "dummy"}}}
with pytest.raises(RuntimeError, match="The MMDS data store is not initialized."):
test_microvm.api.mmds.patch(**dummy_json)
# Test that using the same json with a PUT request, the MMDS data-store is
# created.
response = test_microvm.api.mmds.put(**dummy_json)
response = test_microvm.api.mmds.get()
assert response.json() == dummy_json
response = test_microvm.api.mmds.get()
assert response.json() == dummy_json
dummy_json = {
"latest": {
"meta-data": {
"ami-id": "another_dummy",
"secret_key": "eaasda48141411aeaeae",
}
}
}
response = test_microvm.api.mmds.patch(**dummy_json)
response = test_microvm.api.mmds.get()
assert response.json() == dummy_json
@pytest.mark.parametrize("version", MMDS_VERSIONS)
def test_guest_mmds_hang(test_microvm_with_api, version):
"""
Test the MMDS json endpoint when Content-Length larger than actual length.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
test_microvm.add_net_iface()
# Configure MMDS version.
configure_mmds(test_microvm, iface_ids=["eth0"], version=version)
data_store = {"latest": {"meta-data": {"ami-id": "ami-12345678"}}}
populate_data_store(test_microvm, data_store)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = test_microvm.ssh
run_guest_cmd(ssh_connection, f"ip route add {DEFAULT_IPV4} dev eth0", "")
get_cmd = "curl -m 2 -s"
get_cmd += " -X GET"
get_cmd += ' -H "Content-Length: 100"'
get_cmd += ' -H "Accept: application/json"'
get_cmd += ' -d "some body"'
get_cmd += f" http://{DEFAULT_IPV4}/"
if version == "V1":
_, stdout, _ = ssh_connection.run(get_cmd)
assert "Invalid request" in stdout
else:
# Generate token.
token = generate_mmds_session_token(ssh_connection, DEFAULT_IPV4, token_ttl=60)
get_cmd += ' -H "X-metadata-token: {}"'.format(token)
_, stdout, _ = ssh_connection.run(get_cmd)
assert "Invalid request" in stdout
# Do the same for a PUT request.
cmd = "curl -m 2 -s"
cmd += " -X PUT"
cmd += ' -H "Content-Length: 100"'
cmd += ' -H "X-metadata-token: {}"'.format(token)
cmd += ' -H "Accept: application/json"'
cmd += ' -d "some body"'
cmd += " http://{}/".format(DEFAULT_IPV4)
_, stdout, _ = ssh_connection.run(cmd)
assert "Invalid request" in stdout
@pytest.mark.parametrize("version", MMDS_VERSIONS)
def test_mmds_limit_scenario(test_microvm_with_api, version):
"""
Test the MMDS json endpoint when data store size reaches the limit.
"""
test_microvm = test_microvm_with_api
# Set a large enough limit for the API so that requests actually reach the
# MMDS server.
test_microvm.jailer.extra_args.update(
{"http-api-max-payload-size": "512000", "mmds-size-limit": "51200"}
)
test_microvm.spawn()
# Attach network device.
test_microvm.add_net_iface()
# Configure MMDS version.
configure_mmds(test_microvm, iface_ids=["eth0"], version=version)
dummy_json = {"latest": {"meta-data": {"ami-id": "dummy"}}}
# Populate data-store.
response = test_microvm.api.mmds.put(**dummy_json)
# Send a request that will exceed the data store.
aux = "a" * 51200
large_json = {"latest": {"meta-data": {"ami-id": "smth", "secret_key": aux}}}
with pytest.raises(RuntimeError, match="413"):
response = test_microvm.api.mmds.put(**large_json)
response = test_microvm.api.mmds.get()
assert response.json() == dummy_json
# Send a request that will fill the data store.
aux = "a" * 51137
dummy_json = {"latest": {"meta-data": {"ami-id": "smth", "secret_key": aux}}}
test_microvm.api.mmds.patch(**dummy_json)
# Try to send a new patch thaw will increase the data store size. Since the
# actual size is equal with the limit this request should fail with
# PayloadTooLarge.
aux = "b" * 10
dummy_json = {"latest": {"meta-data": {"ami-id": "smth", "secret_key2": aux}}}
with pytest.raises(RuntimeError, match="413"):
response = test_microvm.api.mmds.patch(**dummy_json)
# Check that the patch actually failed and the contents of the data store
# has not changed.
response = test_microvm.api.mmds.get()
assert str(response.json()).find(aux) == -1
# Delete something from the mmds so we will be able to send new data.
dummy_json = {"latest": {"meta-data": {"ami-id": "smth", "secret_key": "a"}}}
test_microvm.api.mmds.patch(**dummy_json)
# Check that the size has shrunk.
response = test_microvm.api.mmds.get()
assert len(str(response.json()).replace(" ", "")) == 59
# Try to send a new patch, this time the request should succeed.
aux = "a" * 100
dummy_json = {"latest": {"meta-data": {"ami-id": "smth", "secret_key": aux}}}
response = test_microvm.api.mmds.patch(**dummy_json)
# Check that the size grew as expected.
response = test_microvm.api.mmds.get()
assert len(str(response.json()).replace(" ", "")) == 158
@pytest.mark.parametrize("version", MMDS_VERSIONS)
def test_mmds_snapshot(uvm_nano, microvm_factory, version, firecracker_release):
"""
Test MMDS behavior by restoring a snapshot on current FC versions.
Ensures that the version is persisted or initialised with the default if
the firecracker version does not support it.
"""
uvm_nano.add_net_iface()
_validate_mmds_snapshot(
uvm_nano,
microvm_factory,
version,
target_fc_version=firecracker_release.snapshot_version,
fc_binary_path=firecracker_release.path,
jailer_binary_path=firecracker_release.jailer,
)
def test_mmds_older_snapshot(
microvm_factory, guest_kernel, rootfs, firecracker_release
):
"""
Test MMDS behavior restoring older snapshots in the current version.
Ensures that the MMDS version is persisted or initialised with the default
if the FC version does not support this feature.
"""
# due to bug fixed in commit 8dab78b
firecracker_version = firecracker_release.version_tuple
if global_props.instance == "m6a.metal" and firecracker_version < (1, 3, 3):
pytest.skip("incompatible with AMD and Firecracker <1.3.3")
microvm = microvm_factory.build(
guest_kernel,
rootfs,
fc_binary_path=firecracker_release.path,
jailer_binary_path=firecracker_release.jailer,
)
microvm.spawn()
microvm.basic_config()
microvm.add_net_iface()
mmds_version = "V2"
_validate_mmds_snapshot(
microvm,
microvm_factory,
mmds_version,
target_fc_version=firecracker_release.snapshot_version,
)
def test_mmds_v2_negative(test_microvm_with_api):
"""
Test invalid MMDS GET/PUT requests when using V2.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Attach network device.
test_microvm.add_net_iface()
# Configure MMDS version.
configure_mmds(test_microvm, version="V2", iface_ids=["eth0"])
data_store = {
"latest": {
"meta-data": {
"ami-id": "ami-12345678",
"reservation-id": "r-fea54097",
"local-hostname": "ip-10-251-50-12.ec2.internal",
"public-hostname": "ec2-203-0-113-25.compute-1.amazonaws.com",
}
}
}
populate_data_store(test_microvm, data_store)
test_microvm.basic_config(vcpu_count=1)
test_microvm.start()
ssh_connection = test_microvm.ssh
run_guest_cmd(ssh_connection, f"ip route add {DEFAULT_IPV4} dev eth0", "")
# Check `GET` request fails when token is not provided.
cmd = generate_mmds_get_request(DEFAULT_IPV4)
expected = (
"No MMDS token provided. Use `X-metadata-token` header "
"to specify the session token."
)
run_guest_cmd(ssh_connection, cmd, expected)
# Generic `GET` request.
# Check `GET` request fails when token is not valid.
run_guest_cmd(
ssh_connection,
generate_mmds_get_request(DEFAULT_IPV4, token="foo"),
"MMDS token not valid.",
)
# Check `PUT` request fails when token TTL is not provided.
cmd = f"curl -m 2 -s -X PUT http://{DEFAULT_IPV4}/latest/api/token"
expected = (
"Token time to live value not found. Use "
"`X-metadata-token-ttl-seconds` header to specify "
"the token's lifetime."
)
run_guest_cmd(ssh_connection, cmd, expected)
# Check `PUT` request fails when `X-Forwarded-For` header is provided.
cmd = "curl -m 2 -s"
cmd += " -X PUT"
cmd += ' -H "X-Forwarded-For: foo"'
cmd += f" http://{DEFAULT_IPV4}"
expected = (
"Invalid header. Reason: Unsupported header name. " "Key: X-Forwarded-For"
)
run_guest_cmd(ssh_connection, cmd, expected)
# Generic `PUT` request.
put_cmd = "curl -m 2 -s"
put_cmd += " -X PUT"
put_cmd += ' -H "X-metadata-token-ttl-seconds: {}"'
put_cmd += f" {DEFAULT_IPV4}/latest/api/token"
# Check `PUT` request fails when path is invalid.
# Path is invalid because we remove the last character
# at the end of the valid uri.
run_guest_cmd(
ssh_connection, put_cmd[:-1].format(60), "Resource not found: /latest/api/toke."
)
# Check `PUT` request fails when token TTL is not valid.
ttl_values = [MIN_TOKEN_TTL_SECONDS - 1, MAX_TOKEN_TTL_SECONDS + 1]
for ttl in ttl_values:
expected = (
"Invalid time to live value provided for token: {}. "
"Please provide a value between {} and {}.".format(
ttl, MIN_TOKEN_TTL_SECONDS, MAX_TOKEN_TTL_SECONDS
)
)
run_guest_cmd(ssh_connection, put_cmd.format(ttl), expected)
# Valid `PUT` request to generate token.
_, stdout, _ = ssh_connection.run(put_cmd.format(1))
token = stdout
assert len(token) > 0
# Wait for token to expire.
time.sleep(1)
# Check `GET` request fails when expired token is provided.
run_guest_cmd(
ssh_connection,
generate_mmds_get_request(DEFAULT_IPV4, token=token),
"MMDS token not valid.",
)
def test_deprecated_mmds_config(test_microvm_with_api):
"""
Test deprecated Mmds configs.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
# Attach network device.
test_microvm.add_net_iface()
# Use the default version, which is 1 for backwards compatibility.
response = configure_mmds(test_microvm, iface_ids=["eth0"])
assert "deprecation" in response.headers
response = configure_mmds(test_microvm, iface_ids=["eth0"], version="V1")
assert "deprecation" in response.headers
response = configure_mmds(test_microvm, iface_ids=["eth0"], version="V2")
assert "deprecation" not in response.headers
test_microvm.start()
datapoints = test_microvm.get_all_metrics()
assert (
sum(
datapoint["deprecated_api"]["deprecated_http_api_calls"]
for datapoint in datapoints
)
== 2
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,915
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/.buildkite/pipeline_pr.py
|
#!/usr/bin/env python3
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Generate Buildkite pipelines dynamically"""
from common import (
COMMON_PARSER,
get_changed_files,
group,
overlay_dict,
pipeline_to_json,
run_all_tests,
)
# Buildkite default job priority is 0. Setting this to 1 prioritizes PRs over
# scheduled jobs and other batch jobs.
DEFAULT_PRIORITY = 1
args = COMMON_PARSER.parse_args()
step_style = {
"command": "./tools/devtool -y test -- ../tests/integration_tests/style/",
"label": "🪶 Style",
"priority": DEFAULT_PRIORITY,
}
defaults = {
"instances": args.instances,
"platforms": args.platforms,
# buildkite step parameters
"priority": DEFAULT_PRIORITY,
"timeout_in_minutes": 45,
"artifacts": ["./test_results/**/*"],
}
defaults = overlay_dict(defaults, args.step_param)
devtool_build_grp = group(
"📦 Devtool Sanity Build",
"./tools/devtool -y build",
**defaults,
)
build_grp = group(
"📦 Build",
"./tools/devtool -y test -- ../tests/integration_tests/build/",
**defaults,
)
functional_1_grp = group(
"⚙ Functional [a-n]",
"./tools/devtool -y test -- `cd tests; ls integration_tests/functional/test_[a-n]*.py`",
**defaults,
)
functional_2_grp = group(
"⚙ Functional [o-z]",
"./tools/devtool -y test -- `cd tests; ls integration_tests/functional/test_[o-z]*.py`",
**defaults,
)
security_grp = group(
"🔒 Security",
"./tools/devtool -y test -- ../tests/integration_tests/security/",
**defaults,
)
defaults_for_performance = overlay_dict(
defaults,
{
# We specify higher priority so the ag=1 jobs get picked up before the ag=n
# jobs in ag=1 agents
"priority": DEFAULT_PRIORITY + 1,
"agents": {"ag": 1},
},
)
performance_grp = group(
"⏱ Performance",
"./tools/devtool -y test -- ../tests/integration_tests/performance/",
**defaults_for_performance,
)
defaults_for_kani = overlay_dict(
defaults_for_performance,
{
# Kani runs fastest on m6i.metal
"instances": ["m6i.metal"],
"platforms": [("al2", "linux_5.10")],
"timeout_in_minutes": 300,
},
)
kani_grp = group(
"🔍 Kani",
"./tools/devtool -y test -- ../tests/integration_tests/test_kani.py -n auto",
**defaults_for_kani,
)
for step in kani_grp["steps"]:
step["label"] = "🔍 Kani"
steps = [step_style]
changed_files = get_changed_files("main")
# run sanity build of devtool if Dockerfile is changed
if any(x.parts[-1] == "Dockerfile" for x in changed_files):
steps += [devtool_build_grp]
if run_all_tests(changed_files):
steps += [
kani_grp,
build_grp,
functional_1_grp,
functional_2_grp,
security_grp,
performance_grp,
]
pipeline = {"steps": steps}
print(pipeline_to_json(pipeline))
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,916
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_topology.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for ensuring correctness of CPU and cache topology in the guest."""
import platform
import subprocess
import pytest
import framework.utils_cpuid as utils
TOPOLOGY_STR = {1: "0", 2: "0,1", 16: "0-15"}
PLATFORM = platform.machine()
def _check_cpu_topology(
test_microvm, expected_cpu_count, expected_threads_per_core, expected_cpus_list
):
expected_cpu_topology = {
"CPU(s)": str(expected_cpu_count),
"On-line CPU(s) list": expected_cpus_list,
"Thread(s) per core": str(expected_threads_per_core),
"Core(s) per socket": str(int(expected_cpu_count / expected_threads_per_core)),
"Socket(s)": "1",
"NUMA node(s)": "1",
}
utils.check_guest_cpuid_output(
test_microvm, "lscpu", None, ":", expected_cpu_topology
)
def _check_cache_topology_x86(
test_microvm, num_vcpus_on_lvl_1_cache, num_vcpus_on_lvl_3_cache
):
vm = test_microvm
expected_lvl_1_str = "{} ({})".format(
hex(num_vcpus_on_lvl_1_cache), num_vcpus_on_lvl_1_cache
)
expected_lvl_3_str = "{} ({})".format(
hex(num_vcpus_on_lvl_3_cache), num_vcpus_on_lvl_3_cache
)
cpu_vendor = utils.get_cpu_vendor()
if cpu_vendor == utils.CpuVendor.AMD:
key_share = "extra cores sharing this cache"
expected_level_1_topology = {
"level": "0x1 (1)",
key_share: expected_lvl_1_str,
}
expected_level_3_topology = {
"level": "0x3 (3)",
key_share: expected_lvl_3_str,
}
elif cpu_vendor == utils.CpuVendor.INTEL:
key_share = "maximum IDs for CPUs sharing cache"
expected_level_1_topology = {
"cache level": "0x1 (1)",
key_share: expected_lvl_1_str,
}
expected_level_3_topology = {
"cache level": "0x3 (3)",
key_share: expected_lvl_3_str,
}
utils.check_guest_cpuid_output(
vm, "cpuid -1", "--- cache 0 ---", "=", expected_level_1_topology
)
utils.check_guest_cpuid_output(
vm, "cpuid -1", "--- cache 1 ---", "=", expected_level_1_topology
)
utils.check_guest_cpuid_output(
vm, "cpuid -1", "--- cache 2 ---", "=", expected_level_1_topology
)
utils.check_guest_cpuid_output(
vm, "cpuid -1", "--- cache 3 ---", "=", expected_level_3_topology
)
def _check_cache_topology_arm(test_microvm, no_cpus):
# We will check the cache topology by looking at what each cpu
# contains as far as cache info.
# For that we are iterating through the hierarchy of folders inside:
# /sys/devices/system/cpu/cpuX/cache/indexY/type - the type of the cache
# (i.e Instruction, Data, Unified)
# /sys/devices/system/cpu/cpuX/cache/indexY/size - size of the cache
# /sys/devices/system/cpu/cpuX/cache/indexY/level - L1, L2 or L3 cache.
# There are 2 types of L1 cache (instruction and data) that is why the
# "cache_info" variable below has 4 items.
sys_cpu = "/sys/devices/system/cpu"
fields = ["level", "type", "size", "coherency_line_size", "number_of_sets"]
cmd = f"grep . {sys_cpu}/cpu{{0..{no_cpus-1}}}/cache/index*/{{{','.join(fields)}}} |sort"
_, guest_stdout, guest_stderr = test_microvm.ssh.run(cmd)
assert guest_stderr == ""
res = subprocess.run(
cmd,
shell=True,
executable="/bin/bash",
capture_output=True,
check=True,
encoding="ascii",
)
assert res.stderr == ""
assert res.stdout == guest_stdout
@pytest.mark.skipif(
PLATFORM != "x86_64", reason="Firecracker supports CPU topology only on x86_64."
)
@pytest.mark.parametrize("num_vcpus", [1, 2, 16])
@pytest.mark.parametrize("htt", [True, False])
def test_cpu_topology(test_microvm_with_api, num_vcpus, htt):
"""
Check the CPU topology for a microvm with the specified config.
"""
vm = test_microvm_with_api
vm.spawn()
vm.basic_config(vcpu_count=num_vcpus, smt=htt)
vm.add_net_iface()
vm.start()
_check_cpu_topology(
vm, num_vcpus, 2 if htt and num_vcpus > 1 else 1, TOPOLOGY_STR[num_vcpus]
)
@pytest.mark.parametrize("num_vcpus", [1, 2, 16])
@pytest.mark.parametrize("htt", [True, False])
def test_cache_topology(test_microvm_with_api, num_vcpus, htt):
"""
Check the cache topology for a microvm with the specified config.
"""
if htt and PLATFORM == "aarch64":
pytest.skip("SMT is configurable only on x86.")
vm = test_microvm_with_api
vm.spawn()
vm.basic_config(vcpu_count=num_vcpus, smt=htt)
vm.add_net_iface()
vm.start()
if PLATFORM == "x86_64":
_check_cache_topology_x86(vm, 1 if htt and num_vcpus > 1 else 0, num_vcpus - 1)
elif PLATFORM == "aarch64":
_check_cache_topology_arm(vm, num_vcpus)
else:
raise Exception("This test is not run on this platform!")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,917
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_error_code.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests scenarios for Firecracker kvm exit handling."""
import platform
import pytest
from framework.utils import wait_process_termination
@pytest.mark.skipif(
platform.machine() != "aarch64",
reason="The error code returned on aarch64 will not be returned on x86 "
"under the same conditions.",
)
def test_enosys_error_code(uvm_plain):
"""
Test that ENOSYS error is caught and firecracker exits gracefully.
"""
# On aarch64 we trigger this error by running a C program that
# maps a file into memory and then tries to load the content from an
# offset in the file bigger than its length into a register asm volatile
# ("ldr %0, [%1], 4" : "=r" (ret), "+r" (buf));
vm = uvm_plain
vm.spawn()
vm.memory_monitor = None
vm.basic_config(
vcpu_count=1,
boot_args="reboot=k panic=1 pci=off init=/usr/local/bin/devmemread",
)
vm.start()
# Check if FC process is closed
wait_process_termination(vm.jailer_clone_pid)
vm.check_log_message(
"Received ENOSYS error because KVM failed to emulate an instruction."
)
vm.check_log_message("Vmm is stopping.")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,918
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/style/test_swagger.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests ensuring codebase style compliance for the OpenAPI specification."""
from pathlib import Path
from openapi_spec_validator import validate_spec
from openapi_spec_validator.readers import read_from_filename
def validate_swagger(swagger_spec):
"""Fail if OpenAPI spec is not followed."""
spec_dict, _ = read_from_filename(swagger_spec)
validate_spec(spec_dict)
def test_firecracker_swagger():
"""
Test that Firecracker swagger specification is valid.
"""
swagger_spec = Path("../src/api_server/swagger/firecracker.yaml")
validate_swagger(swagger_spec)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,919
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/defs.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Some common defines used in different modules of the testing framework."""
import platform
from pathlib import Path
# Firecracker's binary name
FC_BINARY_NAME = "firecracker"
# Jailer's binary name
JAILER_BINARY_NAME = "jailer"
# The Firecracker sources workspace dir
FC_WORKSPACE_DIR = Path(__file__).parent.parent.parent.resolve()
# Cargo target dir for the Firecracker workspace. Set via .cargo/config
FC_WORKSPACE_TARGET_DIR = FC_WORKSPACE_DIR / "build/cargo_target"
# Cargo build directory for seccompiler
SECCOMPILER_TARGET_DIR = FC_WORKSPACE_DIR / "build/seccompiler"
# Folder containing JSON seccomp filters
SECCOMP_JSON_DIR = FC_WORKSPACE_DIR / "resources/seccomp"
# Maximum accepted duration of an API call, in milliseconds
MAX_API_CALL_DURATION_MS = 700
# Default test session root directory path
DEFAULT_TEST_SESSION_ROOT_PATH = "/srv"
# Absolute path to the test results folder
TEST_RESULTS_DIR = FC_WORKSPACE_DIR / "test_results"
# Name of the file that stores firecracker's PID when launched by jailer with
# `--new-pid-ns`.
FC_PID_FILE_NAME = "firecracker.pid"
# The minimum required host kernel version for which io_uring is supported in
# Firecracker.
MIN_KERNEL_VERSION_FOR_IO_URING = "5.10.51"
SUPPORTED_HOST_KERNELS = ["4.14", "5.10", "6.1"]
IMG_DIR = Path(DEFAULT_TEST_SESSION_ROOT_PATH) / "img"
# fall-back to the local directory
if not IMG_DIR.exists():
IMG_DIR = Path(__file__).joinpath("../../../build/img").resolve()
ARTIFACT_DIR = IMG_DIR / platform.machine()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,920
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/gitlint_rules.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""The user defined rules for gitlint."""
from gitlint.rules import CommitRule, RuleViolation
class EndsSigned(CommitRule):
"""Checks commit message body formatting.
Makes sure each commit message body ends with
1 or more signatures ("Signed-off-by"), followed by
0 or more co-authors ("Co-authored-by").
"""
# The name of the rule.
name = "body-requires-signature"
# The unique id of the rule.
id = "UC2"
def validate(self, commit):
r"""Validate user defined gitlint rules.
>>> from gitlint.tests.base import BaseTestCase
>>> from gitlint.rules import RuleViolation
...
>>> ends_signed = EndsSigned()
...
>>> msg1 = (
... f"Title\n\nMessage.\n\n"
... f"Signed-off-by: name <email@domain>"
... )
>>> commit1 = BaseTestCase.gitcommit(msg1)
>>> ends_signed.validate(commit1)
[]
>>> msg2 = (
... f"Title\n\nMessage.\n\n"
... f"Signed-off-by: name <email>\n\n"
... f"Co-authored-by: name <email>"
... )
>>> commit2 = BaseTestCase.gitcommit(msg2)
>>> ends_signed.validate(commit2)
[]
>>> msg3 = (
... f"Title\n\nMessage.\n\n"
... )
>>> commit3 = BaseTestCase.gitcommit(msg3)
>>> vio3 = ends_signed.validate(commit3)
>>> vio_msg3 = (
... f"'Signed-off-by:' not found "
... f"in commit message body"
... )
>>> vio3 == [RuleViolation("UC2", vio_msg3)]
True
>>> msg4 = (
... f"Title\n\nMessage.\n\n"
... f"Signed-off-by: name <email@domain>\n\na sentence"
... )
>>> commit4 = BaseTestCase.gitcommit(msg4)
>>> vio4 = ends_signed.validate(commit4)
>>> vio_msg4 = (
... f"Non 'Co-authored-by:' or 'Signed-off-by:'"
... f" string found following 1st 'Signed-off-by:'"
... )
>>> vio4 == [RuleViolation("UC2", vio_msg4, None, 5)]
True
>>> msg5 = (
... f"Title\n\nMessage.\n\n"
... f"Co-authored-by: name <email@domain>\n\n"
... f"a sentence."
... )
>>> commit5 = BaseTestCase.gitcommit(msg5)
>>> vio5 = ends_signed.validate(commit5)
>>> vio_msg5 = (
... f"'Co-authored-by:' found before 'Signed-off-by:'"
... )
>>> vio5 == [RuleViolation("UC2", vio_msg5, None, 3)]
True
>>> msg6 = (
... f"Title\n\nMessage.\n\n"
... f"Signed-off-by: name <email@domain>\n\n"
... f"Co-authored-by: name <email@domain>\n\n"
... f"a sentence"
... )
>>> commit6 = BaseTestCase.gitcommit(msg6)
>>> vio6 = ends_signed.validate(commit6)
>>> vio_msg6 = (
... f"Non 'Co-authored-by:' string found "
... f"after 1st 'Co-authored-by:'"
... )
>>> vio6 == [RuleViolation("UC2", vio_msg6, None, 6)]
True
"""
# Utilities
def rtn(stmt, i):
return [RuleViolation(self.id, stmt, None, i)]
co_auth = "Co-authored-by:"
sig = "Signed-off-by:"
message_iter = enumerate(commit.message.original.split("\n"))
# Checks commit message contains a `sig` string
found = False
for i, line in message_iter:
# We check that no co-authors are declared before signatures.
if line.startswith(co_auth):
return rtn(f"'{co_auth}' found before '{sig}'", i)
if line.startswith(sig):
found = True
break
# If no signature was found in the message
# (before `message_iter` ended)
if not found:
return rtn(f"'{sig}' not found in commit message body", None)
# Checks lines following signature are
# either signatures or co-authors
for i, line in message_iter:
if line.startswith(sig) or not line.strip():
continue
# Once we encounter the first co-author,
# we no longer accept signatures
if line.startswith(co_auth):
break
return rtn(
f"Non '{co_auth}' or '{sig}' string found " f"following 1st '{sig}'",
i,
)
# Checks lines following co-author are only additional co-authors.
for i, line in message_iter:
if line and not line.startswith(co_auth):
return rtn(
f"Non '{co_auth}' string found after 1st '{co_auth}'",
i,
)
# Return no errors
return []
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,921
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/security/test_jail.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that verify the jailer's behavior."""
# pylint: disable=redefined-outer-name
import functools
import http.client as http_client
import os
import resource
import stat
import subprocess
import time
import psutil
import pytest
import requests
import urllib3
import host_tools.cargo_build as build_tools
from framework.defs import FC_BINARY_NAME
from framework.jailer import JailerContext
# These are the permissions that all files/dirs inside the jailer have.
REG_PERMS = (
stat.S_IRUSR
| stat.S_IWUSR
| stat.S_IXUSR
| stat.S_IRGRP
| stat.S_IXGRP
| stat.S_IROTH
| stat.S_IXOTH
)
DIR_STATS = stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_STATS = stat.S_IFREG | REG_PERMS
SOCK_STATS = stat.S_IFSOCK | REG_PERMS
# These are the stats of the devices created by tha jailer.
CHAR_STATS = stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR
# Limit on file size in bytes.
FSIZE = 2097151
# Limit on number of file descriptors.
NOFILE = 1024
# Resource limits to be set by the jailer.
RESOURCE_LIMITS = [
"no-file={}".format(NOFILE),
"fsize={}".format(FSIZE),
]
def check_stats(filepath, stats, uid, gid):
"""Assert on uid, gid and expected stats for the given path."""
st = os.stat(filepath)
assert st.st_gid == gid
assert st.st_uid == uid
assert st.st_mode ^ stats == 0
def test_default_chroot(test_microvm_with_api):
"""
Test that the jailer assigns a default chroot if none is specified.
"""
test_microvm = test_microvm_with_api
# Start customizing arguments.
# Test that firecracker's default chroot folder is indeed `/srv/jailer`.
test_microvm.jailer.chroot_base = None
test_microvm.spawn()
# Test the expected outcome.
assert os.path.exists(test_microvm.jailer.api_socket_path())
def test_empty_jailer_id(test_microvm_with_api):
"""
Test that the jailer ID cannot be empty.
"""
test_microvm = test_microvm_with_api
fc_binary, _ = build_tools.get_firecracker_binaries()
# Set the jailer ID to None.
test_microvm.jailer = JailerContext(
jailer_id="",
exec_file=fc_binary,
)
# pylint: disable=W0703
try:
test_microvm.spawn()
# If the exception is not thrown, it means that Firecracker was
# started successfully, hence there's a bug in the code due to which
# we can set an empty ID.
assert False
except Exception as err:
expected_err = (
"Jailer error: Invalid instance ID: Invalid len (0);"
" the length must be between 1 and 64"
)
assert expected_err in str(err)
def test_exec_file_not_exist(test_microvm_with_api, tmp_path):
"""
Test the jailer option `--exec-file`
"""
test_microvm = test_microvm_with_api
# Error case 1: No such file exists
pseudo_exec_file_path = tmp_path / "pseudo_firecracker_exec_file"
test_microvm.jailer.exec_file = pseudo_exec_file_path
with pytest.raises(
Exception,
match=rf"Jailer error: Failed to canonicalize path {pseudo_exec_file_path}:"
rf" No such file or directory \(os error 2\)",
):
test_microvm.spawn()
# Error case 2: Not a file
pseudo_exec_dir_path = tmp_path / "firecracker_test_dir"
pseudo_exec_dir_path.mkdir()
test_microvm.jailer.exec_file = pseudo_exec_dir_path
with pytest.raises(
Exception,
match=rf"Jailer error: {pseudo_exec_dir_path} is not a file",
):
test_microvm.spawn()
# Error case 3: Filename without "firecracker"
pseudo_exec_file_path = tmp_path / "foobarbaz"
pseudo_exec_file_path.touch()
test_microvm.jailer.exec_file = pseudo_exec_file_path
with pytest.raises(
Exception,
match=r"Jailer error: Invalid filename. The filename of `--exec-file` option"
r' must contain "firecracker": foobarbaz',
):
test_microvm.spawn()
def test_default_chroot_hierarchy(test_microvm_with_api):
"""
Test the folder hierarchy created by default by the jailer.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# We do checks for all the things inside the chroot that the jailer crates
# by default.
check_stats(
test_microvm.jailer.chroot_path(),
DIR_STATS,
test_microvm.jailer.uid,
test_microvm.jailer.gid,
)
check_stats(
os.path.join(test_microvm.jailer.chroot_path(), "dev"),
DIR_STATS,
test_microvm.jailer.uid,
test_microvm.jailer.gid,
)
check_stats(
os.path.join(test_microvm.jailer.chroot_path(), "dev/net"),
DIR_STATS,
test_microvm.jailer.uid,
test_microvm.jailer.gid,
)
check_stats(
os.path.join(test_microvm.jailer.chroot_path(), "run"),
DIR_STATS,
test_microvm.jailer.uid,
test_microvm.jailer.gid,
)
check_stats(
os.path.join(test_microvm.jailer.chroot_path(), "dev/net/tun"),
CHAR_STATS,
test_microvm.jailer.uid,
test_microvm.jailer.gid,
)
check_stats(
os.path.join(test_microvm.jailer.chroot_path(), "dev/kvm"),
CHAR_STATS,
test_microvm.jailer.uid,
test_microvm.jailer.gid,
)
check_stats(
os.path.join(test_microvm.jailer.chroot_path(), "firecracker"), FILE_STATS, 0, 0
)
def test_arbitrary_usocket_location(test_microvm_with_api):
"""
Test arbitrary location scenario for the api socket.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.extra_args = {"api-sock": "api.socket"}
test_microvm.spawn()
check_stats(
os.path.join(test_microvm.jailer.chroot_path(), "api.socket"),
SOCK_STATS,
test_microvm.jailer.uid,
test_microvm.jailer.gid,
)
@functools.lru_cache(maxsize=None)
def cgroup_v2_available():
"""Check if cgroup-v2 is enabled on the system."""
# https://rootlesscontaine.rs/getting-started/common/cgroup2/#checking-whether-cgroup-v2-is-already-enabled
return os.path.isfile("/sys/fs/cgroup/cgroup.controllers")
@pytest.fixture(scope="session", autouse=True)
def sys_setup_cgroups():
"""Configure cgroupfs in order to run the tests.
This fixture sets up the cgroups on the system to enable processes
spawned by the tests be able to create cgroups successfully.
This set-up is important to do when running from inside a Docker
container while the system is using cgroup-v2.
"""
cgroup_version = 2 if cgroup_v2_available() else 1
yield cgroup_version
def check_cgroups_v1(cgroups, cgroup_location, jailer_id, parent_cgroup=FC_BINARY_NAME):
"""Assert that every cgroupv1 in cgroups is correctly set."""
for cgroup in cgroups:
controller = cgroup.split(".")[0]
file_name, value = cgroup.split("=")
location = cgroup_location + "/{}/{}/{}/".format(
controller, parent_cgroup, jailer_id
)
tasks_file = location + "tasks"
file = location + file_name
assert open(file, "r", encoding="utf-8").readline().strip() == value
assert open(tasks_file, "r", encoding="utf-8").readline().strip().isdigit()
def check_cgroups_v2(cgroups, cgroup_location, jailer_id, parent_cgroup=FC_BINARY_NAME):
"""Assert that every cgroupv2 in cgroups is correctly set."""
cg_locations = {
"root": f"{cgroup_location}",
"fc": f"{cgroup_location}/{parent_cgroup}",
"jail": f"{cgroup_location}/{parent_cgroup}/{jailer_id}",
}
for cgroup in cgroups:
controller = cgroup.split(".")[0]
file_name, value = cgroup.split("=")
procs_file = f'{cg_locations["jail"]}/cgroup.procs'
file = f'{cg_locations["jail"]}/{file_name}'
assert (
controller
in open(f'{cg_locations["root"]}/cgroup.controllers', "r", encoding="utf-8")
.readline()
.strip()
)
assert (
controller
in open(
f'{cg_locations["root"]}/cgroup.subtree_control', "r", encoding="utf-8"
)
.readline()
.strip()
)
assert (
controller
in open(f'{cg_locations["fc"]}/cgroup.controllers', "r", encoding="utf-8")
.readline()
.strip()
)
assert (
controller
in open(
f'{cg_locations["fc"]}/cgroup.subtree_control', "r", encoding="utf-8"
)
.readline()
.strip()
)
assert (
controller
in open(f'{cg_locations["jail"]}/cgroup.controllers', "r", encoding="utf-8")
.readline()
.strip()
)
assert open(file, "r", encoding="utf-8").readline().strip() == value
assert open(procs_file, "r", encoding="utf-8").readline().strip().isdigit()
def get_cpus(node):
"""Retrieve CPUs from NUMA node."""
sys_node = "/sys/devices/system/node/node" + str(node)
assert os.path.isdir(sys_node)
node_cpus_path = sys_node + "/cpulist"
return open(node_cpus_path, "r", encoding="utf-8").readline().strip()
def check_limits(pid, no_file, fsize):
"""Verify resource limits against expected values."""
# Fetch firecracker process limits for number of open fds
(soft, hard) = resource.prlimit(pid, resource.RLIMIT_NOFILE)
assert soft == no_file
assert hard == no_file
# Fetch firecracker process limits for maximum file size
(soft, hard) = resource.prlimit(pid, resource.RLIMIT_FSIZE)
assert soft == fsize
assert hard == fsize
def test_cgroups(test_microvm_with_api, sys_setup_cgroups):
"""
Test the cgroups are correctly set by the jailer.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.cgroup_ver = sys_setup_cgroups
if test_microvm.jailer.cgroup_ver == 2:
test_microvm.jailer.cgroups = ["cpu.weight.nice=10"]
else:
test_microvm.jailer.cgroups = ["cpu.shares=2", "cpu.cfs_period_us=200000"]
# Retrieve CPUs from NUMA node 0.
node_cpus = get_cpus(0)
# Appending the cgroups for numa node 0.
test_microvm.jailer.cgroups = test_microvm.jailer.cgroups + [
"cpuset.mems=0",
"cpuset.cpus={}".format(node_cpus),
]
test_microvm.spawn()
# We assume sysfs cgroups are mounted here.
sys_cgroup = "/sys/fs/cgroup"
assert os.path.isdir(sys_cgroup)
if test_microvm.jailer.cgroup_ver == 1:
check_cgroups_v1(
test_microvm.jailer.cgroups, sys_cgroup, test_microvm.jailer.jailer_id
)
else:
check_cgroups_v2(
test_microvm.jailer.cgroups, sys_cgroup, test_microvm.jailer.jailer_id
)
def test_cgroups_custom_parent(test_microvm_with_api, sys_setup_cgroups):
"""
Test cgroups when a custom parent cgroup is used.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.cgroup_ver = sys_setup_cgroups
test_microvm.jailer.parent_cgroup = "custom_cgroup/group2"
if test_microvm.jailer.cgroup_ver == 2:
test_microvm.jailer.cgroups = ["cpu.weight=2"]
else:
test_microvm.jailer.cgroups = ["cpu.shares=2", "cpu.cfs_period_us=200000"]
# Retrieve CPUs from NUMA node 0.
node_cpus = get_cpus(0)
test_microvm.jailer.cgroups = test_microvm.jailer.cgroups + [
"cpuset.mems=0",
"cpuset.cpus={}".format(node_cpus),
]
test_microvm.spawn()
# We assume sysfs cgroups are mounted here.
sys_cgroup = "/sys/fs/cgroup"
assert os.path.isdir(sys_cgroup)
if test_microvm.jailer.cgroup_ver == 1:
check_cgroups_v1(
test_microvm.jailer.cgroups,
sys_cgroup,
test_microvm.jailer.jailer_id,
test_microvm.jailer.parent_cgroup,
)
else:
check_cgroups_v2(
test_microvm.jailer.cgroups,
sys_cgroup,
test_microvm.jailer.jailer_id,
test_microvm.jailer.parent_cgroup,
)
def test_node_cgroups(test_microvm_with_api, sys_setup_cgroups):
"""
Test the numa node cgroups are correctly set by the jailer.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.cgroup_ver = sys_setup_cgroups
# Retrieve CPUs from NUMA node 0.
node_cpus = get_cpus(0)
# Appending the cgroups for numa node 0
test_microvm.jailer.cgroups = ["cpuset.mems=0", "cpuset.cpus={}".format(node_cpus)]
test_microvm.spawn()
# We assume sysfs cgroups are mounted here.
sys_cgroup = "/sys/fs/cgroup"
assert os.path.isdir(sys_cgroup)
if test_microvm.jailer.cgroup_ver == 1:
check_cgroups_v1(
test_microvm.jailer.cgroups, sys_cgroup, test_microvm.jailer.jailer_id
)
else:
check_cgroups_v2(
test_microvm.jailer.cgroups, sys_cgroup, test_microvm.jailer.jailer_id
)
def test_cgroups_without_numa(test_microvm_with_api, sys_setup_cgroups):
"""
Test the cgroups are correctly set by the jailer, without numa assignment.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.cgroup_ver = sys_setup_cgroups
if test_microvm.jailer.cgroup_ver == 2:
test_microvm.jailer.cgroups = ["cpu.weight=2"]
else:
test_microvm.jailer.cgroups = ["cpu.shares=2", "cpu.cfs_period_us=200000"]
test_microvm.spawn()
# We assume sysfs cgroups are mounted here.
sys_cgroup = "/sys/fs/cgroup"
assert os.path.isdir(sys_cgroup)
if test_microvm.jailer.cgroup_ver == 1:
check_cgroups_v1(
test_microvm.jailer.cgroups, sys_cgroup, test_microvm.jailer.jailer_id
)
else:
check_cgroups_v2(
test_microvm.jailer.cgroups, sys_cgroup, test_microvm.jailer.jailer_id
)
@pytest.mark.skipif(
cgroup_v2_available() is True, reason="Requires system with cgroup-v1 enabled."
)
def test_v1_default_cgroups(test_microvm_with_api):
"""
Test if the jailer is using cgroup-v1 by default.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.cgroups = ["cpu.shares=2"]
test_microvm.spawn()
# We assume sysfs cgroups are mounted here.
sys_cgroup = "/sys/fs/cgroup"
assert os.path.isdir(sys_cgroup)
check_cgroups_v1(
test_microvm.jailer.cgroups, sys_cgroup, test_microvm.jailer.jailer_id
)
def test_args_default_resource_limits(test_microvm_with_api):
"""
Test the default resource limits are correctly set by the jailer.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Get firecracker's PID
pid = int(test_microvm.jailer_clone_pid)
assert pid != 0
# Fetch firecracker process limits for number of open fds
(soft, hard) = resource.prlimit(pid, resource.RLIMIT_NOFILE)
# Check that the default limit was set.
assert soft == 2048
assert hard == 2048
# Fetch firecracker process limits for number of open fds
(soft, hard) = resource.prlimit(pid, resource.RLIMIT_FSIZE)
# Check that no limit was set
assert soft == -1
assert hard == -1
def test_args_resource_limits(test_microvm_with_api):
"""
Test the resource limits are correctly set by the jailer.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.resource_limits = RESOURCE_LIMITS
test_microvm.spawn()
# Get firecracker's PID
pid = int(test_microvm.jailer_clone_pid)
assert pid != 0
# Check limit values were correctly set.
check_limits(pid, NOFILE, FSIZE)
def test_negative_file_size_limit(uvm_plain):
"""
Test creating snapshot file fails when size exceeds `fsize` limit.
"""
test_microvm = uvm_plain
# limit to 1MB, to account for logs and metrics
test_microvm.jailer.resource_limits = [f"fsize={2**20}"]
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.start()
test_microvm.pause()
# Attempt to create a snapshot.
try:
test_microvm.api.snapshot_create.put(
mem_file_path="/vm.mem",
snapshot_path="/vm.vmstate",
)
except (
http_client.RemoteDisconnected,
urllib3.exceptions.ProtocolError,
requests.exceptions.ConnectionError,
) as _error:
test_microvm.expect_kill_by_signal = True
# Check the microVM received signal `SIGXFSZ` (25),
# which corresponds to exceeding file size limit.
msg = "Shutting down VM after intercepting signal 25, code 0"
test_microvm.check_log_message(msg)
time.sleep(1)
# Check that the process was terminated.
assert not psutil.pid_exists(test_microvm.jailer_clone_pid)
else:
assert False, "Negative test failed"
def test_negative_no_file_limit(test_microvm_with_api):
"""
Test microVM is killed when exceeding `no-file` limit.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.resource_limits = ["no-file=3"]
# pylint: disable=W0703
try:
test_microvm.spawn()
except Exception as error:
assert "No file descriptors available (os error 24)" in str(error)
assert test_microvm.jailer_clone_pid is None
else:
assert False, "Negative test failed"
def test_new_pid_ns_resource_limits(test_microvm_with_api):
"""
Test that Firecracker process inherits jailer resource limits.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.new_pid_ns = True
test_microvm.jailer.resource_limits = RESOURCE_LIMITS
test_microvm.spawn()
# Get Firecracker's PID.
fc_pid = test_microvm.pid_in_new_ns
# Check limit values were correctly set.
check_limits(fc_pid, NOFILE, FSIZE)
def test_new_pid_namespace(test_microvm_with_api):
"""
Test that Firecracker is spawned in a new PID namespace if requested.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.new_pid_ns = True
test_microvm.spawn()
# Check that the PID file exists.
fc_pid = test_microvm.pid_in_new_ns
# Validate the PID.
stdout = subprocess.check_output("pidof firecracker", shell=True)
assert str(fc_pid) in stdout.strip().decode()
# Get the thread group IDs in each of the PID namespaces of which
# Firecracker process is a member of.
nstgid_cmd = "cat /proc/{}/status | grep NStgid".format(fc_pid)
nstgid_list = (
subprocess.check_output(nstgid_cmd, shell=True)
.decode("utf-8")
.strip()
.split("\t")[1:]
)
# Check that Firecracker's PID namespace is nested. `NStgid` should
# report two values and the last one should be 1, because Firecracker
# becomes the init(1) process of the new PID namespace it is spawned in.
assert len(nstgid_list) == 2
assert int(nstgid_list[1]) == 1
assert int(nstgid_list[0]) == fc_pid
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,922
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_cpu_features.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the CPU topology emulation feature."""
# pylint: disable=too-many-lines
import io
import os
import platform
import re
import shutil
import sys
import time
from difflib import unified_diff
from pathlib import Path
import pandas as pd
import pytest
import framework.utils_cpuid as cpuid_utils
from framework import utils
from framework.defs import SUPPORTED_HOST_KERNELS
from framework.properties import global_props
from framework.utils_cpu_templates import SUPPORTED_CPU_TEMPLATES
PLATFORM = platform.machine()
UNSUPPORTED_HOST_KERNEL = (
utils.get_kernel_version(level=1) not in SUPPORTED_HOST_KERNELS
)
DATA_FILES = Path("./data/msr")
def clean_and_mkdir(dir_path):
"""
Create a clean directory
"""
shutil.rmtree(dir_path, ignore_errors=True)
os.makedirs(dir_path)
def _check_cpuid_x86(test_microvm, expected_cpu_count, expected_htt):
expected_cpu_features = {
"maximum IDs for CPUs in pkg": f"{expected_cpu_count:#x} ({expected_cpu_count})",
"CLFLUSH line size": "0x8 (8)",
"hypervisor guest status": "true",
"hyper-threading / multi-core supported": expected_htt,
}
cpuid_utils.check_guest_cpuid_output(
test_microvm, "cpuid -1", None, "=", expected_cpu_features
)
def _check_extended_cache_features(vm):
l3_params = cpuid_utils.get_guest_cpuid(vm, "0x80000006")[(0x80000006, 0, "edx")]
# fmt: off
line_size = (l3_params >> 0) & 0xFF
lines_per_tag = (l3_params >> 8) & 0xF
assoc = (l3_params >> 12) & 0xF
cache_size = (l3_params >> 18) & 0x3FFF
# fmt: on
assert line_size > 0
assert lines_per_tag == 0x1 # This is hardcoded in the AMD spec
assert assoc == 0x9 # This is hardcoded in the AMD spec
assert cache_size > 0
def get_cpu_template_dir(cpu_template):
"""
Utility function to return a valid string which will be used as
name of the directory where snapshot artifacts are stored during
snapshot test and loaded from during restore test.
"""
return cpu_template if cpu_template else "none"
def skip_test_based_on_artifacts(snapshot_artifacts_dir):
"""
It is possible that some X template is not supported on
the instance where the snapshots were created and,
snapshot is loaded on an instance where X is supported. This
results in error since restore doesn't find the file to load.
e.g. let's suppose snapshot is created on Skylake and restored
on Cascade Lake. So, the created artifacts could just be:
snapshot_artifacts/wrmsr/vmlinux-4.14/T2S
but the restore test would fail because the files in
snapshot_artifacts/wrmsr/vmlinux-4.14/T2CL won't be available.
To avoid this we make an assumption that if template directory
does not exist then snapshot was not created for that template
and we skip the test.
"""
if not Path.exists(snapshot_artifacts_dir):
reason = f"\n Since {snapshot_artifacts_dir} does not exist \
we skip the test assuming that snapshot was not"
pytest.skip(re.sub(" +", " ", reason))
@pytest.mark.skipif(PLATFORM != "x86_64", reason="CPUID is only supported on x86_64.")
@pytest.mark.parametrize(
"num_vcpus",
[1, 2, 16],
)
@pytest.mark.parametrize(
"htt",
[True, False],
)
def test_cpuid(test_microvm_with_api, num_vcpus, htt):
"""
Check the CPUID for a microvm with the specified config.
"""
vm = test_microvm_with_api
vm.spawn()
vm.basic_config(vcpu_count=num_vcpus, smt=htt)
vm.add_net_iface()
vm.start()
_check_cpuid_x86(vm, num_vcpus, "true" if num_vcpus > 1 else "false")
@pytest.mark.skipif(PLATFORM != "x86_64", reason="CPUID is only supported on x86_64.")
@pytest.mark.skipif(
cpuid_utils.get_cpu_vendor() != cpuid_utils.CpuVendor.AMD,
reason="L3 cache info is only present in 0x80000006 for AMD",
)
def test_extended_cache_features(test_microvm_with_api):
"""
Check extended cache features (leaf 0x80000006).
"""
vm = test_microvm_with_api
vm.spawn()
vm.basic_config()
vm.add_net_iface()
vm.start()
_check_extended_cache_features(vm)
@pytest.mark.skipif(
PLATFORM != "x86_64", reason="The CPU brand string is masked only on x86_64."
)
def test_brand_string(test_microvm_with_api):
"""
Ensure good formatting for the guest brand string.
* For Intel CPUs, the guest brand string should be:
Intel(R) Xeon(R) Processor @ {host frequency}
where {host frequency} is the frequency reported by the host CPUID
(e.g. 4.01GHz)
* For AMD CPUs, the guest brand string should be:
AMD EPYC
* For other CPUs, the guest brand string should be:
""
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config(vcpu_count=1)
test_microvm.add_net_iface()
test_microvm.start()
guest_cmd = "cat /proc/cpuinfo | grep 'model name' | head -1"
_, stdout, stderr = test_microvm.ssh.run(guest_cmd)
assert stderr == ""
line = stdout.rstrip()
mo = re.search("^model name\\s+:\\s+(.+)$", line)
assert mo
guest_brand_string = mo.group(1)
assert guest_brand_string
cpu_vendor = cpuid_utils.get_cpu_vendor()
if cpu_vendor == cpuid_utils.CpuVendor.AMD:
# Assert the model name matches "AMD EPYC"
mo = re.search("model name.*: AMD EPYC", stdout)
assert mo
elif cpu_vendor == cpuid_utils.CpuVendor.INTEL:
# Get host frequency
cif = open("/proc/cpuinfo", "r", encoding="utf-8")
cpu_info = cif.read()
mo = re.search("model name.*:.* ([0-9]*.[0-9]*[G|M|T]Hz)", cpu_info)
assert mo
host_frequency = mo.group(1)
# Assert the model name matches "Intel(R) Xeon(R) Processor @ "
mo = re.search(
"model name.*: Intel\\(R\\) Xeon\\(R\\) Processor @ ([0-9]*.[0-9]*[T|G|M]Hz)",
stdout,
)
assert mo
# Get the frequency
guest_frequency = mo.group(1)
# Assert the guest frequency matches the host frequency
assert host_frequency == guest_frequency
else:
assert False
# Some MSR values should not be checked since they can change at guest runtime
# and between different boots.
# Current exceptions:
# * FS and GS change on task switch and arch_prctl.
# * TSC is different for each guest.
# * MSR_{C, L}STAR used for SYSCALL/SYSRET; can be different between guests.
# * MSR_IA32_SYSENTER_E{SP, IP} used for SYSENTER/SYSEXIT; same as above.
# * MSR_KVM_{WALL, SYSTEM}_CLOCK addresses for struct pvclock_* can be different.
# * MSR_IA32_TSX_CTRL is not available to read/write via KVM (known limitation).
#
# More detailed information about MSRs can be found in the Intel® 64 and IA-32
# Architectures Software Developer’s Manual - Volume 4: Model-Specific Registers
# Check `arch_gen/src/x86/msr_idex.rs` and `msr-index.h` in upstream Linux
# for symbolic definitions.
# fmt: off
MSR_EXCEPTION_LIST = [
"0x10", # MSR_IA32_TSC
"0x11", # MSR_KVM_WALL_CLOCK
"0x12", # MSR_KVM_SYSTEM_TIME
"0x122", # MSR_IA32_TSX_CTRL
"0x175", # MSR_IA32_SYSENTER_ESP
"0x176", # MSR_IA32_SYSENTER_EIP
"0x6e0", # MSR_IA32_TSC_DEADLINE
"0xc0000082", # MSR_LSTAR
"0xc0000083", # MSR_CSTAR
"0xc0000100", # MSR_FS_BASE
"0xc0000101", # MSR_GS_BASE
# MSRs below are required only on T2A, however,
# we are adding them to the common exception list to keep things simple
"0x834" , # LVT Performance Monitor Interrupt Register
"0xc0010007", # MSR_K7_PERFCTR3
"0xc001020b", # Performance Event Counter MSR_F15H_PERF_CTR5
"0xc0011029", # MSR_F10H_DECFG also referred to as MSR_AMD64_DE_CFG
"0x830" , # IA32_X2APIC_ICR is interrupt command register and,
# bit 0-7 represent interrupt vector that varies.
"0x83f" , # IA32_X2APIC_SELF_IPI
# A self IPI is semantically identical to an
# inter-processor interrupt sent via the ICR,
# with a Destination Shorthand of Self,
# Trigger Mode equal to Edge,
# and a Delivery Mode equal to Fixed.
# bit 0-7 represent interrupt vector that varies.
]
# fmt: on
MSR_SUPPORTED_TEMPLATES = ["T2A", "T2CL", "T2S"]
@pytest.fixture(
name="msr_cpu_template",
params=sorted(set(SUPPORTED_CPU_TEMPLATES).intersection(MSR_SUPPORTED_TEMPLATES)),
)
def msr_cpu_template_fxt(request):
"""CPU template fixture for MSR read/write supported CPU templates"""
return request.param
@pytest.mark.timeout(900)
@pytest.mark.nonci
def test_cpu_rdmsr(
microvm_factory, msr_cpu_template, guest_kernel, rootfs_ubuntu_22, results_dir
):
"""
Test MSRs that are available to the guest.
This test boots a uVM and tries to read a set of MSRs from the guest.
The guest MSR list is compared against a list of MSRs that are expected
when running on a particular combination of host CPU model, host kernel,
guest kernel and CPU template.
The list is dependent on:
* host CPU model, since some MSRs are passed through from the host in some
CPU templates
* host kernel version, since firecracker relies on MSR emulation provided
by KVM
* guest kernel version, since some MSRs are writable from guest uVMs and
different guest kernels might set different values
* CPU template, since enabled CPUIDs are different between CPU templates
and some MSRs are not available if CPUID features are disabled
This comparison helps validate that defaults have not changed due to
emulation implementation changes by host kernel patches and CPU templates.
TODO: This validates T2S, T2CL and T2A templates. Since T2 and C3 did not
set the ARCH_CAPABILITIES MSR, the value of that MSR is different between
different host CPU types (see Github PR #3066). So we can either:
* add an exceptions for different template types when checking values
* deprecate T2 and C3 since they are somewhat broken
Testing matrix:
- All supported guest kernels and rootfs
- Microvm: 1vCPU with 1024 MB RAM
"""
vcpus, guest_mem_mib = 1, 1024
vm = microvm_factory.build(guest_kernel, rootfs_ubuntu_22, monitor_memory=False)
vm.spawn()
vm.add_net_iface()
vm.basic_config(
vcpu_count=vcpus, mem_size_mib=guest_mem_mib, cpu_template=msr_cpu_template
)
vm.start()
vm.ssh.scp_put(DATA_FILES / "msr_reader.sh", "/tmp/msr_reader.sh")
_, stdout, stderr = vm.ssh.run("/tmp/msr_reader.sh")
assert stderr == ""
# Load results read from the microvm
microvm_df = pd.read_csv(io.StringIO(stdout))
# Load baseline
host_cpu = global_props.cpu_codename
host_kv = global_props.host_linux_version
guest_kv = re.search(r"vmlinux-(\d+\.\d+)", guest_kernel.name).group(1)
baseline_file_name = (
f"msr_list_{msr_cpu_template}_{host_cpu}_{host_kv}host_{guest_kv}guest.csv"
)
# save it as an artifact, so we don't have to manually launch an instance to
# get a baseline
save_msrs = results_dir / baseline_file_name
save_msrs.write_text(stdout)
# Load baseline
baseline_file_path = DATA_FILES / baseline_file_name
# We can use the following line when regathering baselines.
# microvm_df.to_csv(baseline_file_path, index=False, encoding="utf-8")
baseline_df = pd.read_csv(baseline_file_path)
check_msrs_are_equal(baseline_df, microvm_df)
# These names need to be consistent across the two parts of the snapshot-restore test
# that spans two instances (one that takes a snapshot and one that restores from it)
# fmt: off
SNAPSHOT_RESTORE_SHARED_NAMES = {
"snapshot_artifacts_root_dir_wrmsr": "snapshot_artifacts/wrmsr",
"snapshot_artifacts_root_dir_cpuid": "snapshot_artifacts/cpuid",
"msr_reader_host_fname": DATA_FILES / "msr_reader.sh",
"msr_reader_guest_fname": "/tmp/msr_reader.sh",
"msrs_before_fname": "msrs_before.txt",
"msrs_after_fname": "msrs_after.txt",
"cpuid_before_fname": "cpuid_before.txt",
"cpuid_after_fname": "cpuid_after.txt",
}
# fmt: on
def dump_msr_state_to_file(dump_fname, ssh_conn, shared_names):
"""
Read MSR state via SSH and dump it into a file.
"""
ssh_conn.scp_put(
shared_names["msr_reader_host_fname"], shared_names["msr_reader_guest_fname"]
)
_, stdout, stderr = ssh_conn.run(shared_names["msr_reader_guest_fname"])
assert stderr == ""
with open(dump_fname, "w", encoding="UTF-8") as file:
file.write(stdout)
@pytest.mark.skipif(
UNSUPPORTED_HOST_KERNEL,
reason=f"Supported kernels are {SUPPORTED_HOST_KERNELS}",
)
@pytest.mark.timeout(900)
@pytest.mark.nonci
def test_cpu_wrmsr_snapshot(
microvm_factory, guest_kernel, rootfs_ubuntu_22, msr_cpu_template
):
"""
This is the first part of the test verifying
that MSRs retain their values after restoring from a snapshot.
This function makes MSR value modifications according to the
./data/msr/wrmsr_list.txt file.
Before taking a snapshot, MSR values are dumped into a text file.
After restoring from the snapshot on another instance, the MSRs are
dumped again and their values are compared to previous.
Some MSRs are not inherently supposed to retain their values, so they
form an MSR exception list.
This part of the test is responsible for taking a snapshot and publishing
its files along with the `before` MSR dump.
"""
shared_names = SNAPSHOT_RESTORE_SHARED_NAMES
vcpus, guest_mem_mib = 1, 1024
vm = microvm_factory.build(guest_kernel, rootfs_ubuntu_22, monitor_memory=False)
vm.spawn()
vm.add_net_iface()
vm.basic_config(
vcpu_count=vcpus,
mem_size_mib=guest_mem_mib,
cpu_template=msr_cpu_template,
track_dirty_pages=True,
)
vm.start()
# Make MSR modifications
msr_writer_host_fname = DATA_FILES / "msr_writer.sh"
msr_writer_guest_fname = "/tmp/msr_writer.sh"
vm.ssh.scp_put(msr_writer_host_fname, msr_writer_guest_fname)
wrmsr_input_host_fname = DATA_FILES / "wrmsr_list.txt"
wrmsr_input_guest_fname = "/tmp/wrmsr_input.txt"
vm.ssh.scp_put(wrmsr_input_host_fname, wrmsr_input_guest_fname)
_, _, stderr = vm.ssh.run(f"{msr_writer_guest_fname} {wrmsr_input_guest_fname}")
assert stderr == ""
# Dump MSR state to a file that will be published to S3 for the 2nd part of the test
snapshot_artifacts_dir = (
Path(shared_names["snapshot_artifacts_root_dir_wrmsr"])
/ guest_kernel.name
/ (msr_cpu_template if msr_cpu_template else "none")
)
clean_and_mkdir(snapshot_artifacts_dir)
msrs_before_fname = snapshot_artifacts_dir / shared_names["msrs_before_fname"]
dump_msr_state_to_file(msrs_before_fname, vm.ssh, shared_names)
# On T2A, the restore test fails with error "cannot allocate memory" so,
# adding delay below as a workaround to unblock the tests for now.
# TODO: Debug the issue and remove this delay. Create below issue to track this:
# https://github.com/firecracker-microvm/firecracker/issues/3453
time.sleep(0.25)
# Take a snapshot
snapshot = vm.snapshot_diff()
# Copy snapshot files to be published to S3 for the 2nd part of the test
snapshot.save_to(snapshot_artifacts_dir)
def check_msrs_are_equal(before_df, after_df):
"""
Checks that reported MSRs and their values in the files are equal.
"""
# We first want to see if the same set of MSRs are exposed in the microvm.
# Drop the VALUE columns and compare the 2 dataframes.
impl_diff = pd.concat(
[before_df.drop(columns="VALUE"), after_df.drop(columns="VALUE")],
keys=["before", "after"],
).drop_duplicates(keep=False)
assert impl_diff.empty, f"\n {impl_diff.to_string()}"
# Remove MSR that can change at runtime.
before_df = before_df[~before_df["MSR_ADDR"].isin(MSR_EXCEPTION_LIST)]
after_df = after_df[~after_df["MSR_ADDR"].isin(MSR_EXCEPTION_LIST)]
# Compare values
val_diff = pd.concat(
[before_df, after_df], keys=["before", "after"]
).drop_duplicates(keep=False)
assert val_diff.empty, f"\n {val_diff.to_string()}"
@pytest.mark.skipif(
UNSUPPORTED_HOST_KERNEL,
reason=f"Supported kernels are {SUPPORTED_HOST_KERNELS}",
)
@pytest.mark.timeout(900)
@pytest.mark.nonci
def test_cpu_wrmsr_restore(microvm_factory, msr_cpu_template, guest_kernel):
"""
This is the second part of the test verifying
that MSRs retain their values after restoring from a snapshot.
Before taking a snapshot, MSR values are dumped into a text file.
After restoring from the snapshot on another instance, the MSRs are
dumped again and their values are compared to previous.
Some MSRs are not inherently supposed to retain their values, so they
form an MSR exception list.
This part of the test is responsible for restoring from a snapshot and
comparing two sets of MSR values.
"""
shared_names = SNAPSHOT_RESTORE_SHARED_NAMES
cpu_template_dir = msr_cpu_template if msr_cpu_template else "none"
snapshot_artifacts_dir = (
Path(shared_names["snapshot_artifacts_root_dir_wrmsr"])
/ guest_kernel.name
/ cpu_template_dir
)
skip_test_based_on_artifacts(snapshot_artifacts_dir)
vm = microvm_factory.build()
vm.spawn()
vm.restore_from_path(snapshot_artifacts_dir, resume=True)
# Dump MSR state to a file for further comparison
msrs_after_fname = snapshot_artifacts_dir / shared_names["msrs_after_fname"]
dump_msr_state_to_file(msrs_after_fname, vm.ssh, shared_names)
# Compare the two lists of MSR values and assert they are equal
before_df = pd.read_csv(snapshot_artifacts_dir / shared_names["msrs_before_fname"])
after_df = pd.read_csv(snapshot_artifacts_dir / shared_names["msrs_after_fname"])
check_msrs_are_equal(before_df, after_df)
def dump_cpuid_to_file(dump_fname, ssh_conn):
"""
Read CPUID via SSH and dump it into a file.
"""
_, stdout, stderr = ssh_conn.run("cpuid --one-cpu")
assert stderr == ""
dump_fname.write_text(stdout, encoding="UTF-8")
@pytest.mark.skipif(
UNSUPPORTED_HOST_KERNEL,
reason=f"Supported kernels are {SUPPORTED_HOST_KERNELS}",
)
@pytest.mark.timeout(900)
@pytest.mark.nonci
def test_cpu_cpuid_snapshot(
microvm_factory, guest_kernel, rootfs_ubuntu_22, msr_cpu_template
):
"""
This is the first part of the test verifying
that CPUID remains the same after restoring from a snapshot.
Before taking a snapshot, CPUID is dumped into a text file.
After restoring from the snapshot on another instance, the CPUID is
dumped again and its content is compared to previous.
This part of the test is responsible for taking a snapshot and publishing
its files along with the `before` CPUID dump.
"""
shared_names = SNAPSHOT_RESTORE_SHARED_NAMES
vm = microvm_factory.build(
kernel=guest_kernel,
rootfs=rootfs_ubuntu_22,
)
vm.spawn()
vm.add_net_iface()
vm.basic_config(
vcpu_count=1,
mem_size_mib=1024,
cpu_template=msr_cpu_template,
track_dirty_pages=True,
)
vm.start()
# Dump CPUID to a file that will be published to S3 for the 2nd part of the test
cpu_template_dir = get_cpu_template_dir(msr_cpu_template)
snapshot_artifacts_dir = (
Path(shared_names["snapshot_artifacts_root_dir_cpuid"])
/ guest_kernel.name
/ cpu_template_dir
)
clean_and_mkdir(snapshot_artifacts_dir)
cpuid_before_fname = snapshot_artifacts_dir / shared_names["cpuid_before_fname"]
dump_cpuid_to_file(cpuid_before_fname, vm.ssh)
# Take a snapshot
snapshot = vm.snapshot_diff()
# Copy snapshot files to be published to S3 for the 2nd part of the test
snapshot.save_to(snapshot_artifacts_dir)
def check_cpuid_is_equal(before_cpuid_fname, after_cpuid_fname):
"""
Checks that CPUID dumps in the files are equal.
"""
with open(before_cpuid_fname, "r", encoding="UTF-8") as file:
before = file.readlines()
with open(after_cpuid_fname, "r", encoding="UTF-8") as file:
after = file.readlines()
diff = sys.stdout.writelines(unified_diff(before, after))
assert not diff, f"\n\n{diff}"
@pytest.mark.skipif(
UNSUPPORTED_HOST_KERNEL,
reason=f"Supported kernels are {SUPPORTED_HOST_KERNELS}",
)
@pytest.mark.timeout(900)
@pytest.mark.nonci
def test_cpu_cpuid_restore(microvm_factory, guest_kernel, msr_cpu_template):
"""
This is the second part of the test verifying
that CPUID remains the same after restoring from a snapshot.
Before taking a snapshot, CPUID is dumped into a text file.
After restoring from the snapshot on another instance, the CPUID is
dumped again and compared to previous.
This part of the test is responsible for restoring from a snapshot and
comparing two CPUIDs.
"""
shared_names = SNAPSHOT_RESTORE_SHARED_NAMES
cpu_template_dir = get_cpu_template_dir(msr_cpu_template)
snapshot_artifacts_dir = (
Path(shared_names["snapshot_artifacts_root_dir_cpuid"])
/ guest_kernel.name
/ cpu_template_dir
)
skip_test_based_on_artifacts(snapshot_artifacts_dir)
vm = microvm_factory.build()
vm.spawn()
vm.restore_from_path(snapshot_artifacts_dir, resume=True)
# Dump CPUID to a file for further comparison
cpuid_after_fname = snapshot_artifacts_dir / shared_names["cpuid_after_fname"]
dump_cpuid_to_file(cpuid_after_fname, vm.ssh)
# Compare the two lists of MSR values and assert they are equal
check_cpuid_is_equal(
snapshot_artifacts_dir / shared_names["cpuid_before_fname"],
snapshot_artifacts_dir / shared_names["cpuid_after_fname"],
)
@pytest.mark.skipif(
PLATFORM != "x86_64", reason="CPU features are masked only on x86_64."
)
@pytest.mark.parametrize("cpu_template", ["T2", "T2S", "C3"])
def test_cpu_template(test_microvm_with_api, cpu_template):
"""
Test masked and enabled cpu features against the expected template.
This test checks that all expected masked features are not present in the
guest and that expected enabled features are present for each of the
supported CPU templates.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set template as specified in the `cpu_template` parameter.
test_microvm.basic_config(
vcpu_count=1,
mem_size_mib=256,
cpu_template=cpu_template,
)
test_microvm.add_net_iface()
if cpuid_utils.get_cpu_vendor() != cpuid_utils.CpuVendor.INTEL:
# We shouldn't be able to apply Intel templates on AMD hosts
with pytest.raises(RuntimeError):
test_microvm.start()
return
test_microvm.start()
check_masked_features(test_microvm, cpu_template)
check_enabled_features(test_microvm, cpu_template)
def check_masked_features(test_microvm, cpu_template):
"""Verify the masked features of the given template."""
# fmt: off
if cpu_template == "C3":
must_be_unset = [
(0x1, 0x0, "ecx",
(1 << 2) | # DTES64
(1 << 3) | # MONITOR
(1 << 4) | # DS_CPL_SHIFT
(1 << 5) | # VMX
(1 << 8) | # TM2
(1 << 10) | # CNXT_ID
(1 << 11) | # SDBG
(1 << 12) | # FMA
(1 << 14) | # XTPR_UPDATE
(1 << 15) | # PDCM
(1 << 22) # MOVBE
),
(0x1, 0x0, "edx",
(1 << 18) | # PSN
(1 << 20) | # DS
(1 << 22) | # ACPI
(1 << 27) | # SS
(1 << 29) | # TM
(1 << 31) # PBE
),
(0x7, 0x0, "ebx",
(1 << 2) | # SGX
(1 << 3) | # BMI1
(1 << 4) | # HLE
(1 << 5) | # AVX2
(1 << 8) | # BMI2
(1 << 10) | # INVPCID
(1 << 11) | # RTM
(1 << 12) | # RDT_M
(1 << 14) | # MPX
(1 << 15) | # RDT_A
(1 << 16) | # AVX512F
(1 << 17) | # AVX512DQ
(1 << 18) | # RDSEED
(1 << 19) | # ADX
(1 << 21) | # AVX512IFMA
(1 << 23) | # CLFLUSHOPT
(1 << 24) | # CLWB
(1 << 25) | # PT
(1 << 26) | # AVX512PF
(1 << 27) | # AVX512ER
(1 << 28) | # AVX512CD
(1 << 29) | # SHA
(1 << 30) | # AVX512BW
(1 << 31) # AVX512VL
),
(0x7, 0x0, "ecx",
(1 << 1) | # AVX512_VBMI
(1 << 2) | # UMIP
(1 << 3) | # PKU
(1 << 4) | # OSPKE
(1 << 11) | # AVX512_VNNI
(1 << 14) | # AVX512_VPOPCNTDQ
(1 << 16) | # LA57
(1 << 22) | # RDPID
(1 << 30) # SGX_LC
),
(0x7, 0x0, "edx",
(1 << 2) | # AVX512_4VNNIW
(1 << 3) # AVX512_4FMAPS
),
(0xd, 0x0, "eax",
(1 << 3) | # MPX_STATE bit 0
(1 << 4) | # MPX_STATE bit 1
(1 << 5) | # AVX512_STATE bit 0
(1 << 6) | # AVX512_STATE bit 1
(1 << 7) | # AVX512_STATE bit 2
(1 << 9) # PKRU
),
(0xd, 0x1, "eax",
(1 << 1) | # XSAVEC_SHIFT
(1 << 2) | # XGETBV_SHIFT
(1 << 3) # XSAVES_SHIFT
),
(0x80000001, 0x0, "ecx",
(1 << 5) | # LZCNT
(1 << 8) # PREFETCH
),
(0x80000001, 0x0, "edx",
(1 << 26) # PDPE1GB
),
]
elif cpu_template in ("T2", "T2S"):
must_be_unset = [
(0x1, 0x0, "ecx",
(1 << 2) | # DTES64
(1 << 3) | # MONITOR
(1 << 4) | # DS_CPL_SHIFT
(1 << 5) | # VMX
(1 << 6) | # SMX
(1 << 7) | # EIST
(1 << 8) | # TM2
(1 << 10) | # CNXT_ID
(1 << 11) | # SDBG
(1 << 14) | # XTPR_UPDATE
(1 << 15) | # PDCM
(1 << 18) # DCA
),
(0x1, 0x0, "edx",
(1 << 18) | # PSN
(1 << 20) | # DS
(1 << 22) | # ACPI
(1 << 27) | # SS
(1 << 29) | # TM
(1 << 30) | # IA64
(1 << 31) # PBE
),
(0x7, 0x0, "ebx",
(1 << 2) | # SGX
(1 << 4) | # HLE
(1 << 11) | # RTM
(1 << 12) | # RDT_M
(1 << 14) | # MPX
(1 << 15) | # RDT_A
(1 << 16) | # AVX512F
(1 << 17) | # AVX512DQ
(1 << 18) | # RDSEED
(1 << 19) | # ADX
(1 << 21) | # AVX512IFMA
(1 << 22) | # PCOMMIT
(1 << 23) | # CLFLUSHOPT
(1 << 24) | # CLWB
(1 << 25) | # PT
(1 << 26) | # AVX512PF
(1 << 27) | # AVX512ER
(1 << 28) | # AVX512CD
(1 << 29) | # SHA
(1 << 30) | # AVX512BW
(1 << 31) # AVX512VL
),
(0x7, 0x0, "ecx",
(1 << 1) | # AVX512_VBMI
(1 << 2) | # UMIP
(1 << 3) | # PKU
(1 << 4) | # OSPKE
(1 << 6) | # AVX512_VBMI2
(1 << 8) | # GFNI
(1 << 9) | # VAES
(1 << 10) | # VPCLMULQDQ
(1 << 11) | # AVX512_VNNI
(1 << 12) | # AVX512_BITALG
(1 << 14) | # AVX512_VPOPCNTDQ
(1 << 16) | # LA57
(1 << 22) | # RDPID
(1 << 30) # SGX_LC
),
(0x7, 0x0, "edx",
(1 << 2) | # AVX512_4VNNIW
(1 << 3) | # AVX512_4FMAPS
(1 << 4) | # FSRM
(1 << 8) # AVX512_VP2INTERSECT
),
(0xd, 0x0, "eax",
(1 << 3) | # MPX_STATE bit 0
(1 << 4) | # MPX_STATE bit 1
(1 << 5) | # AVX512_STATE bit 0
(1 << 6) | # AVX512_STATE bit 1
(1 << 7) | # AVX512_STATE bit 2
(1 << 9) # PKRU
),
(0xd, 0x1, "eax",
(1 << 1) | # XSAVEC_SHIFT
(1 << 2) | # XGETBV_SHIFT
(1 << 3) # XSAVES_SHIFT
),
(0x80000001, 0x0, "ecx",
(1 << 8) | # PREFETCH
(1 << 29) # MWAIT_EXTENDED
),
(0x80000001, 0x0, "edx",
(1 << 26) # PDPE1GB
),
(0x80000008, 0x0, "ebx",
(1 << 9) # WBNOINVD
)
]
# fmt: on
cpuid_utils.check_cpuid_feat_flags(
test_microvm,
[],
must_be_unset,
)
def check_enabled_features(test_microvm, cpu_template):
"""Test for checking that all expected features are enabled in guest."""
enabled_list = { # feature_info_1_edx
"x87 FPU on chip": "true",
"CMPXCHG8B inst.": "true",
"VME: virtual-8086 mode enhancement": "true",
"SSE extensions": "true",
"SSE2 extensions": "true",
"DE: debugging extensions": "true",
"PSE: page size extensions": "true",
"TSC: time stamp counter": "true",
"RDMSR and WRMSR support": "true",
"PAE: physical address extensions": "true",
"MCE: machine check exception": "true",
"APIC on chip": "true",
"MMX Technology": "true",
"SYSENTER and SYSEXIT": "true",
"MTRR: memory type range registers": "true",
"PTE global bit": "true",
"FXSAVE/FXRSTOR": "true",
"MCA: machine check architecture": "true",
"CMOV: conditional move/compare instr": "true",
"PAT: page attribute table": "true",
"PSE-36: page size extension": "true",
"CLFLUSH instruction": "true",
# feature_info_1_ecx
"PNI/SSE3: Prescott New Instructions": "true",
"PCLMULDQ instruction": "true",
"SSSE3 extensions": "true",
"AES instruction": "true",
"CMPXCHG16B instruction": "true",
"PCID: process context identifiers": "true",
"SSE4.1 extensions": "true",
"SSE4.2 extensions": "true",
"x2APIC: extended xAPIC support": "true",
"POPCNT instruction": "true",
"time stamp counter deadline": "true",
"XSAVE/XSTOR states": "true",
"OS-enabled XSAVE/XSTOR": "true",
"AVX: advanced vector extensions": "true",
"F16C half-precision convert instruction": "true",
"RDRAND instruction": "true",
"hypervisor guest status": "true",
# thermal_and_power_mgmt
"ARAT always running APIC timer": "true",
# extended_features
"FSGSBASE instructions": "true",
"IA32_TSC_ADJUST MSR supported": "true",
"SMEP supervisor mode exec protection": "true",
"enhanced REP MOVSB/STOSB": "true",
"SMAP: supervisor mode access prevention": "true",
# xsave_0xd_0
"XCR0 supported: x87 state": "true",
"XCR0 supported: SSE state": "true",
"XCR0 supported: AVX state": "true",
# xsave_0xd_1
"XSAVEOPT instruction": "true",
# extended_080000001_edx
"SYSCALL and SYSRET instructions": "true",
"64-bit extensions technology available": "true",
"execution disable": "true",
"RDTSCP": "true",
# intel_080000001_ecx
"LAHF/SAHF supported in 64-bit mode": "true",
# adv_pwr_mgmt
"TscInvariant": "true",
}
cpuid_utils.check_guest_cpuid_output(
test_microvm, "cpuid -1", None, "=", enabled_list
)
if cpu_template == "T2":
t2_enabled_features = {
"FMA instruction": "true",
"BMI1 instructions": "true",
"BMI2 instructions": "true",
"AVX2: advanced vector extensions 2": "true",
"MOVBE instruction": "true",
"INVPCID instruction": "true",
}
cpuid_utils.check_guest_cpuid_output(
test_microvm, "cpuid -1", None, "=", t2_enabled_features
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,923
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/build/test_dependencies.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Enforces controls over dependencies."""
import os
import pytest
from host_tools import proc
from host_tools.cargo_build import cargo
pytestmark = pytest.mark.skipif(
"Intel" not in proc.proc_type(), reason="test only runs on Intel"
)
def test_licenses():
"""Ensure license compatibility for Firecracker.
For a list of currently allowed licenses checkout deny.toml in
the root directory.
"""
toml_file = os.path.normpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../Cargo.toml")
)
cargo("deny", f"--manifest-path {toml_file} check licenses bans")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,924
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/build/test_unittests.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""A test that ensures that all unit tests pass at integration time."""
import platform
import pytest
import host_tools.cargo_build as host # pylint:disable=import-error
MACHINE = platform.machine()
# Currently profiling with `aarch64-unknown-linux-musl` is unsupported (see
# https://github.com/rust-lang/rustup/issues/3095#issuecomment-1280705619) therefore we profile and
# run coverage with the `gnu` toolchains and run unit tests with the `musl` toolchains.
TARGET = "{}-unknown-linux-musl".format(MACHINE)
@pytest.mark.timeout(600)
def test_unittests(test_fc_session_root_path):
"""
Run unit and doc tests for all supported targets.
"""
extra_args = "--target {} ".format(TARGET)
host.cargo_test(test_fc_session_root_path, extra_args=extra_args)
def test_benchmarks_compile():
"""Checks that all benchmarks compile"""
host.cargo("bench", f"--all --no-run --target {TARGET}")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,925
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/state_machine.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Defines a stream based string matcher and a generic state object."""
# Too few public methods (1/2) (too-few-public-methods)
# pylint: disable=R0903
class MatchStaticString:
"""Match a static string versus input."""
# Prevent state objects from being collected by pytest.
__test__ = False
def __init__(self, match_string):
"""Initialize using specified match string."""
self._string = match_string
self._input = ""
def match(self, input_char) -> bool:
"""
Check if `_input` matches the match `_string`.
Process one char at a time and build `_input` string.
Preserve built `_input` if partially matches `_string`.
Return True when `_input` is the same as `_string`.
"""
if input_char == "":
return False
self._input += str(input_char)
if self._input == self._string[: len(self._input)]:
if len(self._input) == len(self._string):
self._input = ""
return True
return False
self._input = self._input[1:]
return False
class TestState(MatchStaticString):
"""Generic test state object."""
# Prevent state objects from being collected by pytest.
__test__ = False
def __init__(self, match_string=""):
"""Initialize state fields."""
MatchStaticString.__init__(self, match_string)
print("\n*** Current test state: ", str(self), end="")
def handle_input(self, serial, input_char):
"""Handle input event and return next state."""
def __repr__(self):
"""Leverages the __str__ method to describe the TestState."""
return self.__str__()
def __str__(self):
"""Return state name."""
return self.__class__.__name__
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,926
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/build/test_binary_size.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that check if the release binary sizes fall within expected size.
This is not representative of the actual memory overhead of Firecracker.
A more representative test is file:../performance/test_memory_overhead.py
"""
import platform
import pytest
import host_tools.cargo_build as host
MACHINE = platform.machine()
@pytest.mark.timeout(500)
def test_firecracker_binary_size(record_property, metrics):
"""
Test if the size of the firecracker binary is within expected ranges.
"""
fc_binary = host.get_binary("firecracker")
result = fc_binary.stat().st_size
record_property("firecracker_binary_size", f"{result}B")
metrics.set_dimensions({"cpu_arch": MACHINE})
metrics.put_metric("firecracker_binary_size", result, unit="Bytes")
@pytest.mark.timeout(500)
def test_jailer_binary_size(record_property, metrics):
"""
Test if the size of the jailer binary is within expected ranges.
"""
jailer_binary = host.get_binary("jailer")
result = jailer_binary.stat().st_size
record_property("jailer_binary_size", f"{result}B")
metrics.set_dimensions({"cpu_arch": MACHINE})
metrics.put_metric("jailer_binary_size", result, unit="Bytes")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,927
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_uffd.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Test UFFD related functionality when resuming from snapshot."""
import os
import re
import stat
from subprocess import TimeoutExpired
import pytest
import requests
from framework.utils import Timeout, UffdHandler, run_cmd
SOCKET_PATH = "/firecracker-uffd.sock"
@pytest.fixture(scope="function", name="snapshot")
def snapshot_fxt(microvm_factory, guest_kernel_linux_5_10, rootfs_ubuntu_22):
"""Create a snapshot of a microVM."""
basevm = microvm_factory.build(guest_kernel_linux_5_10, rootfs_ubuntu_22)
basevm.spawn()
basevm.basic_config(vcpu_count=2, mem_size_mib=256)
basevm.add_net_iface()
# Add a memory balloon.
basevm.api.balloon.put(
amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=0
)
basevm.start()
# Verify if guest can run commands.
exit_code, _, _ = basevm.ssh.run("sync")
assert exit_code == 0
# Create base snapshot.
snapshot = basevm.snapshot_full()
basevm.kill()
yield snapshot
def spawn_pf_handler(vm, handler_path, mem_path):
"""Spawn page fault handler process."""
# Copy snapshot memory file into chroot of microVM.
jailed_mem = vm.create_jailed_resource(mem_path)
# Copy the valid page fault binary into chroot of microVM.
jailed_handler = vm.create_jailed_resource(handler_path)
handler_name = os.path.basename(jailed_handler)
args = [SOCKET_PATH, jailed_mem]
uffd_handler = UffdHandler(handler_name, args)
real_root = os.open("/", os.O_RDONLY)
working_dir = os.getcwd()
os.chroot(vm.chroot())
os.chdir("/")
st = os.stat(handler_name)
os.chmod(handler_name, st.st_mode | stat.S_IEXEC)
uffd_handler.spawn()
try:
outs, errs = uffd_handler.proc().communicate(timeout=1)
print(outs)
print(errs)
assert False, "Could not start PF handler!"
except TimeoutExpired:
print("This is the good case!")
# The page fault handler will create the socket path with root rights.
# Change rights to the jailer's.
os.chown(SOCKET_PATH, vm.jailer.uid, vm.jailer.gid)
os.fchdir(real_root)
os.chroot(".")
os.chdir(working_dir)
return uffd_handler
def test_bad_socket_path(uvm_plain, snapshot):
"""
Test error scenario when socket path does not exist.
"""
vm = uvm_plain
vm.spawn()
jailed_vmstate = vm.create_jailed_resource(snapshot.vmstate)
expected_msg = re.escape(
"Load microVM snapshot error: Failed to restore from snapshot: Failed to load guest "
"memory: Error creating guest memory from uffd: Failed to connect to UDS Unix stream: No "
"such file or directory (os error 2)"
)
with pytest.raises(RuntimeError, match=expected_msg):
vm.api.snapshot_load.put(
mem_backend={"backend_type": "Uffd", "backend_path": "inexistent"},
snapshot_path=jailed_vmstate,
)
def test_unbinded_socket(uvm_plain, snapshot):
"""
Test error scenario when PF handler has not yet called bind on socket.
"""
vm = uvm_plain
vm.spawn()
jailed_vmstate = vm.create_jailed_resource(snapshot.vmstate)
socket_path = os.path.join(vm.path, "firecracker-uffd.sock")
run_cmd("touch {}".format(socket_path))
jailed_sock_path = vm.create_jailed_resource(socket_path)
expected_msg = re.escape(
"Load microVM snapshot error: Failed to restore from snapshot: Failed to load guest "
"memory: Error creating guest memory from uffd: Failed to connect to UDS Unix stream: "
"Connection refused (os error 111)"
)
with pytest.raises(RuntimeError, match=expected_msg):
vm.api.snapshot_load.put(
mem_backend={"backend_type": "Uffd", "backend_path": jailed_sock_path},
snapshot_path=jailed_vmstate,
)
def test_valid_handler(uvm_plain, snapshot, uffd_handler_paths):
"""
Test valid uffd handler scenario.
"""
vm = uvm_plain
vm.memory_monitor = None
vm.spawn()
# Spawn page fault handler process.
_pf_handler = spawn_pf_handler(
vm, uffd_handler_paths["valid_handler"], snapshot.mem
)
vm.restore_from_snapshot(snapshot, resume=True, uffd_path=SOCKET_PATH)
# Inflate balloon.
vm.api.balloon.patch(amount_mib=200)
# Deflate balloon.
vm.api.balloon.patch(amount_mib=0)
# Verify if guest can run commands.
exit_code, _, _ = vm.ssh.run("sync")
assert exit_code == 0
def test_malicious_handler(uvm_plain, snapshot, uffd_handler_paths):
"""
Test malicious uffd handler scenario.
The page fault handler panics when receiving a page fault,
so no events are handled and snapshot memory regions cannot be
loaded into memory. In this case, Firecracker is designed to freeze,
instead of silently switching to having the kernel handle page
faults, so that it becomes obvious that something went wrong.
"""
vm = uvm_plain
vm.memory_monitor = None
vm.spawn()
# Spawn page fault handler process.
_pf_handler = spawn_pf_handler(
vm, uffd_handler_paths["malicious_handler"], snapshot.mem
)
# We expect Firecracker to freeze while resuming from a snapshot
# due to the malicious handler's unavailability.
try:
with Timeout(seconds=30):
vm.restore_from_snapshot(snapshot, resume=True, uffd_path=SOCKET_PATH)
assert False, "Firecracker should freeze"
except (TimeoutError, requests.exceptions.ReadTimeout):
pass
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,928
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/ip_generator.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# pylint:disable=redefined-outer-name
"""
Each microVM needs to have a unique IP on the host network, or there will be
conflicts.
Helper classes to hand out IPs.
"""
import math
import os
from ipaddress import ip_network
import pytest
class SubnetGenerator:
"""Simple subnet allocator"""
def __init__(self, network_str, subnet_netmask_len=24):
self.network = ip_network(network_str)
netmask_len_diff = subnet_netmask_len - self.network.prefixlen
self._subnets = self.network.subnets(netmask_len_diff)
self._returned_subnets = []
def borrow_subnet(self):
"""Borrow a subnet from the pool"""
if len(self._returned_subnets) > 0:
return self._returned_subnets.pop(0)
return next(self._subnets)
def return_subnet(self, subnet):
"""Return a subnet to the pool"""
self._returned_subnets.append(subnet)
class IPv4Generator:
"""Simple IPv4 allocator"""
def __init__(self, network):
self.network = network
self._hosts = enumerate(self.network)
def next_ip(self):
"""Get the next ip"""
return next(self._hosts)
def get_next_available_ips_aligned(self, count, netmask_len=30):
"""
Allocate `count` contiguous IPs within the same `netmask_len` network.
"""
align = 2 ** (32 - netmask_len)
if count > align:
raise ValueError("Cannot give IPs in the same subnet")
ips = []
remaining = count
while remaining > 0:
i, ip = self.next_ip()
next_aligned_subnet = math.ceil(i / align) * align
# if we don't have enough IPs left at this alignment, get to the
# next network start
if i + remaining >= next_aligned_subnet:
for _ in range(next_aligned_subnet - i):
self.next_ip()
continue
# skip the network address
if i % align == 0:
continue
ips.append(ip)
remaining -= 1
return [str(ip) for ip in ips]
get_next_available_ips = get_next_available_ips_aligned
@pytest.fixture(scope="session")
def subnet_generator(worker_id):
"""
Yield a SubnetGenerator per pytest worker
We use the 16-bit block 192.168.0.0/16 as it's (empirically) the least
likely to conflict with a cloud provider private IPs.
https://en.wikipedia.org/wiki/Private_network
"""
# Example worker_id = gw4
worker_num = 0 if worker_id == "master" else int(worker_id[2:])
# We use the worker id to carve separate networks, as large as possible
worker_count = int(os.environ.get("PYTEST_XDIST_WORKER_COUNT", 1))
bits = math.ceil(math.log2(worker_count))
netmask = 16 # we use 192.168.0.0/16
netmask += bits
o3 = 2 ** (8 - bits) * worker_num
# Most tests just need a /30, but some tests may want more IPs, so we give
# each single test a whole /24. This is OK since those /24s are returned at
# the end of the test.
return SubnetGenerator(f"192.168.{o3}.0/{netmask}", 24)
@pytest.fixture
def network_config(subnet_generator):
"""Yield an IPv4Generator per test"""
subnet = subnet_generator.borrow_subnet()
yield IPv4Generator(subnet)
subnet_generator.return_subnet(subnet)
if __name__ == "__main__":
from ipaddress import IPv4Network
ipgen = IPv4Generator(IPv4Network("192.168.0.0/16"))
ipgen.get_next_available_ips(2, netmask_len=30)
ipgen.get_next_available_ips(2, netmask_len=30)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,929
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/microvm.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Classes for working with microVMs.
This module defines `Microvm`, which can be used to create, test drive, and
destroy microvms.
- Use the Firecracker Open API spec to populate Microvm API resource URLs.
"""
# pylint:disable=too-many-lines
import json
import logging
import os
import re
import select
import shutil
import time
import uuid
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum
from functools import lru_cache
from pathlib import Path
from typing import Optional
from retry import retry
import host_tools.cargo_build as build_tools
import host_tools.memory as mem_tools
import host_tools.network as net_tools
from framework import utils
from framework.artifacts import NetIfaceConfig
from framework.defs import FC_PID_FILE_NAME, MAX_API_CALL_DURATION_MS
from framework.http_api import Api
from framework.jailer import JailerContext
from framework.properties import global_props
LOG = logging.getLogger("microvm")
class SnapshotType(Enum):
"""Supported snapshot types."""
FULL = "Full"
DIFF = "Diff"
def __repr__(self):
cls_name = self.__class__.__name__
return f"{cls_name}.{self.name}"
def hardlink_or_copy(src, dst):
"""If src and dst are in the same device, hardlink. Otherwise, copy."""
dst.touch(exist_ok=False)
if dst.stat().st_dev == src.stat().st_dev:
dst.unlink()
dst.hardlink_to(src)
else:
shutil.copyfile(src, dst)
@dataclass(frozen=True, repr=True)
class Snapshot:
"""A Firecracker snapshot"""
vmstate: Path
mem: Path
net_ifaces: list
disks: dict
ssh_key: Path
snapshot_type: SnapshotType
@property
def is_diff(self) -> bool:
"""Is this a DIFF snapshot?"""
return self.snapshot_type == SnapshotType.DIFF
def rebase_snapshot(self, base):
"""Rebases current incremental snapshot onto a specified base layer."""
if not self.is_diff:
raise ValueError("Can only rebase DIFF snapshots")
build_tools.run_rebase_snap_bin(base.mem, self.mem)
new_args = self.__dict__ | {"mem": base.mem}
return Snapshot(**new_args)
@classmethod
# TBD when Python 3.11: -> Self
def load_from(cls, src: Path) -> "Snapshot":
"""Load a snapshot saved with `save_to`"""
snap_json = src / "snapshot.json"
obj = json.loads(snap_json.read_text())
return cls(
vmstate=src / obj["vmstate"],
mem=src / obj["mem"],
net_ifaces=[NetIfaceConfig(**d) for d in obj["net_ifaces"]],
disks={dsk: src / p for dsk, p in obj["disks"].items()},
ssh_key=src / obj["ssh_key"],
snapshot_type=SnapshotType(obj["snapshot_type"]),
)
def save_to(self, dst: Path):
"""Serialize snapshot details to `dst`
Deserialize the snapshot with `load_from`
"""
for path in [self.vmstate, self.mem, self.ssh_key]:
new_path = dst / path.name
hardlink_or_copy(path, new_path)
new_disks = {}
for disk_id, path in self.disks.items():
new_path = dst / path.name
hardlink_or_copy(path, new_path)
new_disks[disk_id] = new_path.name
obj = {
"vmstate": self.vmstate.name,
"mem": self.mem.name,
"net_ifaces": [x.__dict__ for x in self.net_ifaces],
"disks": new_disks,
"ssh_key": self.ssh_key.name,
"snapshot_type": self.snapshot_type.value,
}
snap_json = dst / "snapshot.json"
snap_json.write_text(json.dumps(obj))
def delete(self):
"""Delete the backing files from disk."""
self.mem.unlink()
self.vmstate.unlink()
# pylint: disable=R0904
class Microvm:
"""Class to represent a Firecracker microvm.
A microvm is described by a unique identifier, a path to all the resources
it needs in order to be able to start and the binaries used to spawn it.
Besides keeping track of microvm resources and exposing microvm API
methods, `spawn()` and `kill()` can be used to start/end the microvm
process.
"""
def __init__(
self,
resource_path,
fc_binary_path=None,
jailer_binary_path=None,
microvm_id=None,
monitor_memory=True,
bin_cloner_path=None,
):
"""Set up microVM attributes, paths, and data structures."""
# pylint: disable=too-many-statements
# Unique identifier for this machine.
if microvm_id is None:
microvm_id = str(uuid.uuid4())
self._microvm_id = microvm_id
# Compose the paths to the resources specific to this microvm.
self._path = os.path.join(resource_path, microvm_id)
os.makedirs(self._path, exist_ok=True)
self.kernel_file = None
self.rootfs_file = None
self.ssh_key = None
self.initrd_file = None
# The binaries this microvm will use to start.
if fc_binary_path is None:
fc_binary_path, _ = build_tools.get_firecracker_binaries()
if jailer_binary_path is None:
_, jailer_binary_path = build_tools.get_firecracker_binaries()
self._fc_binary_path = str(fc_binary_path)
assert fc_binary_path.exists()
self._jailer_binary_path = str(jailer_binary_path)
assert jailer_binary_path.exists()
# Create the jailer context associated with this microvm.
self.jailer = JailerContext(
jailer_id=self._microvm_id,
exec_file=self._fc_binary_path,
)
self.jailer_clone_pid = None
# Copy the /etc/localtime file in the jailer root
self.jailer.jailed_path("/etc/localtime", subdir="etc")
# Initialize the logging subsystem.
self._screen_pid = None
self.time_api_requests = global_props.host_linux_version != "6.1"
# Initalize memory monitor
self.memory_monitor = None
if monitor_memory:
self.memory_monitor = mem_tools.MemoryMonitor()
self.api = None
self.log_file = None
self.metrics_file = None
# device dictionaries
self.iface = {}
self.disks = {}
self.vcpus_count = None
# External clone/exec tool, because Python can't into clone
self.bin_cloner_path = bin_cloner_path
# Flag checked in destructor to see abnormal signal-induced crashes.
self.expect_kill_by_signal = False
# MMDS content from file
self.metadata_file = None
def __repr__(self):
return f"<Microvm id={self.id}>"
def kill(self):
"""All clean up associated with this microVM should go here."""
# pylint: disable=subprocess-run-check
if (
self.expect_kill_by_signal is False
and "Shutting down VM after intercepting signal" in self.log_data
):
# Too late to assert at this point, pytest will still report the
# test as passed. BUT we can dump full logs for debugging,
# as well as an intentional eye-sore in the test report.
LOG.error(self.log_data)
if self.jailer.daemonize:
if self.jailer_clone_pid:
utils.run_cmd(
"kill -9 {}".format(self.jailer_clone_pid), ignore_return_code=True
)
else:
# Killing screen will send SIGHUP to underlying Firecracker.
# Needed to avoid false positives in case kill() is called again.
self.expect_kill_by_signal = True
utils.run_cmd("kill -9 {} || true".format(self.screen_pid))
if self.time_api_requests:
self._validate_api_response_times()
# Check if Firecracker was launched by the jailer in a new pid ns.
if self.jailer.new_pid_ns:
# We need to explicitly kill the Firecracker pid, since it's
# different from the jailer pid that was previously killed.
utils.run_cmd(f"kill -9 {self.pid_in_new_ns}", ignore_return_code=True)
if self.memory_monitor:
if self.memory_monitor.is_alive():
self.memory_monitor.signal_stop()
self.memory_monitor.join(timeout=1)
self.memory_monitor.check_samples()
def _validate_api_response_times(self):
"""
Parses the firecracker logs for information regarding api server request processing times, and asserts they
are within acceptable bounds.
"""
# Log messages are either
# 2023-06-16T07:45:41.767987318 [fc44b23e-ce47-4635-9549-5779a6bd9cee:fc_api] The API server received a Get request on "/mmds".
# or
# 2023-06-16T07:47:31.204704732 [2f2427c7-e4de-4226-90e6-e3556402be84:fc_api] The API server received a Put request on "/actions" with body "{\"action_type\": \"InstanceStart\"}".
api_request_regex = re.compile(
r"\] The API server received a (?P<method>\w+) request on \"(?P<url>(/(\w|-)*)+)\"( with body (?P<body>.*))?\."
)
api_request_times_regex = re.compile(
r"\] Total previous API call duration: (?P<execution_time>\d+) us.$"
)
# Note: Processing of api requests is synchronous, so these messages cannot be torn by concurrency effects
log_lines = self.log_data.split("\n")
ApiCall = namedtuple("ApiCall", "method url body")
current_call = None
for log_line in log_lines:
match = api_request_regex.search(log_line)
if match:
if current_call is not None:
raise Exception(
f"API call duration log entry for {current_call.method} {current_call.url} with body {current_call.body} is missing!"
)
current_call = ApiCall(
match.group("method"), match.group("url"), match.group("body")
)
match = api_request_times_regex.search(log_line)
if match:
if current_call is None:
raise Exception(
"Got API call duration log entry before request entry"
)
if current_call.url != "/snapshot/create":
exec_time = float(match.group("execution_time")) / 1000.0
assert (
exec_time <= MAX_API_CALL_DURATION_MS
), f"{current_call.method} {current_call.url} API call exceeded maximum duration: {exec_time} ms. Body: {current_call.body}"
current_call = None
@property
def firecracker_version(self):
"""Return the version of the Firecracker executable."""
_, stdout, _ = utils.run_cmd(f"{self._fc_binary_path} --version")
return re.match(r"^Firecracker v(.+)", stdout.partition("\n")[0]).group(1)
@property
def path(self):
"""Return the path on disk used that represents this microVM."""
return self._path
# some functions use this
fsfiles = path
@property
def id(self):
"""Return the unique identifier of this microVM."""
return self._microvm_id
@property
def log_data(self):
"""Return the log data."""
if self.log_file is None:
return ""
return self.log_file.read_text()
@property
def state(self):
"""Get the InstanceInfo property and return the state field."""
return self.api.describe.get().json()["state"]
@property
@retry(delay=0.1, tries=5)
def pid_in_new_ns(self):
"""Get the pid of the Firecracker process in the new namespace.
Reads the pid from a file created by jailer with `--new-pid-ns` flag.
"""
# Check if the pid file exists.
pid_file_path = Path(f"{self.jailer.chroot_path()}/{FC_PID_FILE_NAME}")
assert pid_file_path.exists()
# Read the PID stored inside the file.
return int(pid_file_path.read_text(encoding="ascii"))
def flush_metrics(self):
"""Flush the microvm metrics and get the latest datapoint"""
self.api.actions.put(action_type="FlushMetrics")
# get the latest metrics
return self.get_all_metrics()[-1]
def get_all_metrics(self):
"""Return all metric data points written by FC."""
return [json.loads(line) for line in self.metrics_file.read_text().splitlines()]
def create_jailed_resource(self, path):
"""Create a hard link to some resource inside this microvm."""
return self.jailer.jailed_path(path, create=True)
def get_jailed_resource(self, path):
"""Get the relative jailed path to a resource."""
return self.jailer.jailed_path(path, create=False)
def chroot(self):
"""Get the chroot of this microVM."""
return self.jailer.chroot_path()
@property
def screen_session(self):
"""The screen session name
The id of this microVM, which should be unique.
"""
return self.id
@property
def screen_log(self):
"""Get the screen log file."""
return f"/tmp/screen-{self.screen_session}.log"
@property
def screen_pid(self):
"""Get the screen PID."""
return self._screen_pid
def pin_vmm(self, cpu_id: int) -> bool:
"""Pin the firecracker process VMM thread to a cpu list."""
if self.jailer_clone_pid:
for thread_name, thread_pids in utils.ProcessManager.get_threads(
self.jailer_clone_pid
).items():
# the firecracker thread should start with firecracker...
if thread_name.startswith("firecracker"):
for pid in thread_pids:
utils.ProcessManager.set_cpu_affinity(pid, [cpu_id])
return True
return False
def pin_vcpu(self, vcpu_id: int, cpu_id: int):
"""Pin the firecracker vcpu thread to a cpu list."""
if self.jailer_clone_pid:
for thread in utils.ProcessManager.get_threads(self.jailer_clone_pid)[
f"fc_vcpu {vcpu_id}"
]:
utils.ProcessManager.set_cpu_affinity(thread, [cpu_id])
return True
return False
def pin_api(self, cpu_id: int):
"""Pin the firecracker process API server thread to a cpu list."""
if self.jailer_clone_pid:
for thread in utils.ProcessManager.get_threads(self.jailer_clone_pid)[
"fc_api"
]:
utils.ProcessManager.set_cpu_affinity(thread, [cpu_id])
return True
return False
def spawn(
self,
log_file="fc.log",
log_level="Debug",
metrics_path="fc.ndjson",
):
"""Start a microVM as a daemon or in a screen session."""
# pylint: disable=subprocess-run-check
self.jailer.setup()
self.api = Api(self.jailer.api_socket_path())
if log_file is not None:
self.log_file = Path(self.path) / log_file
self.log_file.touch()
self.create_jailed_resource(self.log_file)
# The default value for `level`, when configuring the
# logger via cmd line, is `Warning`. We set the level
# to `Debug` to also have the boot time printed in fifo.
self.jailer.extra_args.update({"log-path": log_file, "level": log_level})
if metrics_path is not None:
self.metrics_file = Path(self.path) / metrics_path
self.metrics_file.touch()
self.create_jailed_resource(self.metrics_file)
self.jailer.extra_args.update({"metrics-path": self.metrics_file.name})
if self.metadata_file:
if os.path.exists(self.metadata_file):
LOG.debug("metadata file exists, adding as a jailed resource")
self.create_jailed_resource(self.metadata_file)
self.jailer.extra_args.update(
{"metadata": os.path.basename(self.metadata_file)}
)
jailer_param_list = self.jailer.construct_param_list()
if log_level != "Debug":
# Checking the timings requires DEBUG level log messages
self.time_api_requests = False
# When the daemonize flag is on, we want to clone-exec into the
# jailer rather than executing it via spawning a shell. Going
# forward, we'll probably switch to this method for running
# Firecracker in general, because it represents the way it's meant
# to be run by customers (together with CLONE_NEWPID flag).
#
# We have to use an external tool for CLONE_NEWPID, because
# 1) Python doesn't provide os.clone() interface, and
# 2) Python's ctypes libc interface appears to be broken, causing
# our clone / exec to deadlock at some point.
if self.jailer.daemonize:
self.daemonize_jailer(jailer_param_list)
else:
# This file will collect any output from 'screen'ed Firecracker.
screen_pid, binary_pid = utils.start_screen_process(
self.screen_log,
self.screen_session,
self._jailer_binary_path,
jailer_param_list,
)
self._screen_pid = screen_pid
self.jailer_clone_pid = binary_pid
# Wait for the jailer to create resources needed, and Firecracker to
# create its API socket.
# We expect the jailer to start within 80 ms. However, we wait for
# 1 sec since we are rechecking the existence of the socket 5 times
# and leave 0.2 delay between them.
if "no-api" not in self.jailer.extra_args:
self._wait_create()
if self.log_file:
self.check_log_message("Running Firecracker")
@retry(delay=0.2, tries=5)
def _wait_create(self):
"""Wait until the API socket and chroot folder are available."""
os.stat(self.jailer.api_socket_path())
@retry(delay=0.2, tries=5)
def check_log_message(self, message):
"""Wait until `message` appears in logging output."""
assert (
message in self.log_data
), f'Message ("{message}") not found in log data ("{self.log_data}").'
@retry(delay=0.2, tries=5)
def check_any_log_message(self, messages):
"""Wait until any message in `messages` appears in logging output."""
for message in messages:
if message in self.log_data:
return
raise AssertionError(
f"`{messages}` were not found in this log: {self.log_data}"
)
def serial_input(self, input_string):
"""Send a string to the Firecracker serial console via screen."""
input_cmd = f'screen -S {self.screen_session} -p 0 -X stuff "{input_string}"'
return utils.run_cmd(input_cmd)
def basic_config(
self,
vcpu_count: int = 2,
smt: bool = None,
mem_size_mib: int = 256,
add_root_device: bool = True,
boot_args: str = None,
use_initrd: bool = False,
track_dirty_pages: bool = False,
rootfs_io_engine=None,
cpu_template: Optional[str] = None,
):
"""Shortcut for quickly configuring a microVM.
It handles:
- CPU and memory.
- Kernel image (will load the one in the microVM allocated path).
- Root File System (will use the one in the microVM allocated path).
- Does not start the microvm.
The function checks the response status code and asserts that
the response is within the interval [200, 300).
If boot_args is None, the default boot_args in Firecracker is
reboot=k panic=1 pci=off nomodules 8250.nr_uarts=0
i8042.noaux i8042.nomux i8042.nopnp i8042.dumbkbd
Reference: file:../../src/vmm/src/vmm_config/boot_source.rs::DEFAULT_KERNEL_CMDLINE
"""
self.api.machine_config.put(
vcpu_count=vcpu_count,
smt=smt,
mem_size_mib=mem_size_mib,
track_dirty_pages=track_dirty_pages,
cpu_template=cpu_template,
)
self.vcpus_count = vcpu_count
if self.memory_monitor:
self.memory_monitor.guest_mem_mib = mem_size_mib
self.memory_monitor.pid = self.jailer_clone_pid
self.memory_monitor.start()
boot_source_args = {
"kernel_image_path": self.create_jailed_resource(self.kernel_file),
"boot_args": boot_args,
}
if use_initrd and self.initrd_file is not None:
boot_source_args.update(
initrd_path=self.create_jailed_resource(self.initrd_file)
)
self.api.boot.put(**boot_source_args)
if add_root_device and self.rootfs_file is not None:
read_only = self.rootfs_file.suffix == ".squashfs"
# Add the root file system
self.add_drive(
drive_id="rootfs",
path_on_host=self.rootfs_file,
is_root_device=True,
is_read_only=read_only,
io_engine=rootfs_io_engine,
)
def daemonize_jailer(self, jailer_param_list):
"""Daemonize the jailer."""
if self.bin_cloner_path and self.jailer.new_pid_ns is not True:
cmd = (
[self.bin_cloner_path] + [self._jailer_binary_path] + jailer_param_list
)
_p = utils.run_cmd(cmd)
# Terrible hack to make the tests fail when starting the
# jailer fails with a panic. This is needed because we can't
# get the exit code of the jailer. In newpid_clone.c we are
# not waiting for the process and we always return 0 if the
# clone was successful (which in most cases will be) and we
# don't do anything if the jailer was not started
# successfully.
if _p.stderr.strip():
raise Exception(_p.stderr)
self.jailer_clone_pid = int(_p.stdout.rstrip())
else:
# Fallback mechanism for when we offload PID namespacing
# to the jailer.
_pid = os.fork()
if _pid == 0:
os.execv(
self._jailer_binary_path,
[self._jailer_binary_path] + jailer_param_list,
)
self.jailer_clone_pid = _pid
def add_drive(
self,
drive_id,
path_on_host,
is_root_device=False,
is_read_only=False,
partuuid=None,
cache_type=None,
io_engine=None,
):
"""Add a block device."""
path_on_jail = self.create_jailed_resource(path_on_host)
self.api.drive.put(
drive_id=drive_id,
path_on_host=path_on_jail,
is_root_device=is_root_device,
is_read_only=is_read_only,
partuuid=partuuid,
cache_type=cache_type,
io_engine=io_engine,
)
self.disks[drive_id] = path_on_host
def patch_drive(self, drive_id, file):
"""Modify/patch an existing block device."""
self.api.drive.patch(
drive_id=drive_id,
path_on_host=self.create_jailed_resource(file.path),
)
self.disks[drive_id] = Path(file.path)
def add_net_iface(self, iface=None, api=True, **kwargs):
"""Add a network interface"""
if iface is None:
iface = NetIfaceConfig.with_id(len(self.iface))
tap = net_tools.Tap(
iface.tap_name, self.jailer.netns, ip=f"{iface.host_ip}/{iface.netmask}"
)
self.iface[iface.dev_name] = {
"iface": iface,
"tap": tap,
}
# If api, call it... there may be cases when we don't want it, for
# example during restore
if api:
self.api.network.put(
iface_id=iface.dev_name,
host_dev_name=iface.tap_name,
guest_mac=iface.guest_mac,
**kwargs,
)
return iface
def start(self):
"""Start the microvm.
This function validates that the microvm boot succeeds.
"""
# Check that the VM has not started yet
assert self.state == "Not started"
self.api.actions.put(action_type="InstanceStart")
# Check that the VM has started
assert self.state == "Running"
def pause(self):
"""Pauses the microVM"""
self.api.vm.patch(state="Paused")
def resume(self):
"""Resume the microVM"""
self.api.vm.patch(state="Resumed")
def make_snapshot(
self, snapshot_type: SnapshotType | str, target_version: str = None
):
"""Create a Snapshot object from a microvm.
It pauses the microvm before taking the snapshot.
"""
vmstate_path = "vmstate"
mem_path = "mem"
snapshot_type = SnapshotType(snapshot_type)
self.pause()
self.api.snapshot_create.put(
mem_file_path=str(mem_path),
snapshot_path=str(vmstate_path),
snapshot_type=snapshot_type.value,
version=target_version,
)
root = Path(self.chroot())
return Snapshot(
vmstate=root / vmstate_path,
mem=root / mem_path,
disks=self.disks,
net_ifaces=[x["iface"] for ifname, x in self.iface.items()],
ssh_key=self.ssh_key,
snapshot_type=snapshot_type,
)
def snapshot_diff(self, target_version: str = None):
"""Make a Diff snapshot"""
return self.make_snapshot("Diff", target_version)
def snapshot_full(self, target_version: str = None):
"""Make a Full snapshot"""
return self.make_snapshot("Full", target_version)
def restore_from_snapshot(
self,
snapshot: Snapshot,
resume: bool = False,
uffd_path: Path = None,
):
"""Restore a snapshot"""
# Move all the snapshot files into the microvm jail.
# Use different names so a snapshot doesn't overwrite our original snapshot.
chroot = Path(self.chroot())
mem_src = chroot / snapshot.mem.with_suffix(".src").name
hardlink_or_copy(snapshot.mem, mem_src)
vmstate_src = chroot / snapshot.vmstate.with_suffix(".src").name
hardlink_or_copy(snapshot.vmstate, vmstate_src)
jailed_mem = Path("/") / mem_src.name
jailed_vmstate = Path("/") / vmstate_src.name
snapshot_disks = [v for k, v in snapshot.disks.items()]
assert len(snapshot_disks) > 0, "Snapshot requires at least one disk."
jailed_disks = []
for disk in snapshot_disks:
jailed_disks.append(self.create_jailed_resource(disk))
self.disks = snapshot.disks
self.ssh_key = snapshot.ssh_key
# Create network interfaces.
for iface in snapshot.net_ifaces:
self.add_net_iface(iface, api=False)
mem_backend = {"backend_type": "File", "backend_path": str(jailed_mem)}
if uffd_path is not None:
mem_backend = {"backend_type": "Uffd", "backend_path": str(uffd_path)}
self.api.snapshot_load.put(
mem_backend=mem_backend,
snapshot_path=str(jailed_vmstate),
enable_diff_snapshots=snapshot.is_diff,
resume_vm=resume,
)
return True
def restore_from_path(self, snap_dir: Path, **kwargs):
"""Restore snapshot from a path"""
return self.restore_from_snapshot(Snapshot.load_from(snap_dir), **kwargs)
@lru_cache
def ssh_iface(self, iface_idx=0):
"""Return a cached SSH connection on a given interface id."""
guest_ip = list(self.iface.values())[iface_idx]["iface"].guest_ip
self.ssh_key = Path(self.ssh_key)
return net_tools.SSHConnection(
netns_path=self.jailer.netns_file_path(),
ssh_key=self.ssh_key,
user="root",
host=guest_ip,
)
@property
def ssh(self):
"""Return a cached SSH connection on the 1st interface"""
return self.ssh_iface(0)
class MicroVMFactory:
"""MicroVM factory"""
def __init__(self, base_path, bin_cloner):
self.base_path = Path(base_path)
self.bin_cloner_path = bin_cloner
self.vms = []
def build(self, kernel=None, rootfs=None, microvm_id=None, **kwargs):
"""Build a microvm"""
vm = Microvm(
resource_path=self.base_path,
microvm_id=microvm_id or str(uuid.uuid4()),
bin_cloner_path=self.bin_cloner_path,
**kwargs,
)
self.vms.append(vm)
if kernel is not None:
vm.kernel_file = kernel
if rootfs is not None:
ssh_key = rootfs.with_suffix(".id_rsa")
# copy only iff not a read-only rootfs
rootfs_path = rootfs
if rootfs_path.suffix != ".squashfs":
rootfs_path = Path(vm.path) / rootfs.name
shutil.copyfile(rootfs, rootfs_path)
vm.rootfs_file = rootfs_path
vm.ssh_key = ssh_key
return vm
def kill(self):
"""Clean up all built VMs"""
for vm in self.vms:
vm.kill()
vm.jailer.cleanup()
if len(vm.jailer.jailer_id) > 0:
shutil.rmtree(vm.jailer.chroot_base_with_id())
class Serial:
"""Class for serial console communication with a Microvm."""
RX_TIMEOUT_S = 20
def __init__(self, vm):
"""Initialize a new Serial object."""
self._poller = None
self._vm = vm
def open(self):
"""Open a serial connection."""
# Open the screen log file.
if self._poller is not None:
# serial already opened
return
screen_log_fd = os.open(self._vm.screen_log, os.O_RDONLY)
self._poller = select.poll()
self._poller.register(screen_log_fd, select.POLLIN | select.POLLHUP)
def tx(self, input_string, end="\n"):
# pylint: disable=invalid-name
# No need to have a snake_case naming style for a single word.
r"""Send a string terminated by an end token (defaulting to "\n")."""
self._vm.serial_input(input_string + end)
def rx_char(self):
"""Read a single character."""
result = self._poller.poll(0.1)
for fd, flag in result:
if flag & select.POLLHUP:
assert False, "Oh! The console vanished before test completed."
if flag & select.POLLIN:
output_char = str(os.read(fd, 1), encoding="utf-8", errors="ignore")
return output_char
return ""
def rx(self, token="\n"):
# pylint: disable=invalid-name
# No need to have a snake_case naming style for a single word.
r"""Read a string delimited by an end token (defaults to "\n")."""
rx_str = ""
start = time.time()
while True:
rx_str += self.rx_char()
if rx_str.endswith(token):
break
if (time.time() - start) >= self.RX_TIMEOUT_S:
self._vm.kill()
assert False
return rx_str
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,930
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/build/test_clippy.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests ensuring codebase style compliance for Rust and Python."""
import platform
import pytest
from host_tools.cargo_build import cargo
SUCCESS_CODE = 0
MACHINE = platform.machine()
TARGETS = [
"{}-unknown-linux-gnu".format(MACHINE),
"{}-unknown-linux-musl".format(MACHINE),
]
@pytest.mark.parametrize("target", TARGETS)
def test_rust_clippy(target):
"""
Test that clippy does not generate any errors/warnings.
"""
cargo("clippy", f"--target {target} --all --profile test", "-D warnings")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,931
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_api.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that ensure the correctness of the Firecracker API."""
# Disable pylint C0302: Too many lines in module
# pylint: disable=C0302
import os
import platform
import re
import resource
import time
import packaging.version
import pytest
import host_tools.drive as drive_tools
import host_tools.network as net_tools
from framework import utils_cpuid
from framework.utils import get_firecracker_version_from_toml, is_io_uring_supported
from framework.utils_cpu_templates import nonci_on_arm
MEM_LIMIT = 1000000000
NOT_SUPPORTED_BEFORE_START = (
"The requested operation is not supported before starting the microVM."
)
NOT_SUPPORTED_AFTER_START = (
"The requested operation is not supported after starting the microVM"
)
def test_api_happy_start(test_microvm_with_api):
"""
Test that a regular microvm API config and boot sequence works.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 2 vCPUs, 256 MiB of RAM and
# a root file system with the rw permission.
test_microvm.basic_config()
test_microvm.start()
def test_drive_io_engine(test_microvm_with_api):
"""
Test io_engine configuration.
Test that the io_engine can be configured via the API on kernels that
support the given type and that FC returns an error otherwise.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config(add_root_device=False)
test_microvm.add_net_iface()
supports_io_uring = is_io_uring_supported()
kwargs = {
"drive_id": "rootfs",
"path_on_host": test_microvm.create_jailed_resource(test_microvm.rootfs_file),
"is_root_device": True,
"is_read_only": True,
}
# Test the opposite of the default backend type.
if supports_io_uring:
test_microvm.api.drive.put(io_engine="Sync", **kwargs)
if not supports_io_uring:
with pytest.raises(RuntimeError):
test_microvm.api.drive.put(io_engine="Async", **kwargs)
# The Async engine is not supported for older kernels.
test_microvm.check_log_message(
"Received Error. Status code: 400 Bad Request. Message: Unable"
" to create the block device: FileEngine(UnsupportedEngine(Async))"
)
# Now configure the default engine type and check that it works.
test_microvm.api.drive.put(**kwargs)
test_microvm.start()
# Execute a simple command to check that the guest booted successfully.
rc, _, stderr = test_microvm.ssh.run("true")
assert rc == 0
assert stderr == ""
assert test_microvm.api.vm_config.get().json()["drives"][0]["io_engine"] == "Sync"
def test_api_put_update_pre_boot(test_microvm_with_api):
"""
Test that PUT updates are allowed before the microvm boots.
Tests updates on drives, boot source and machine config.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 2 vCPUs, 256 MiB of RAM and
# a root file system with the rw permission.
test_microvm.basic_config()
fs1 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "scratch"))
response = test_microvm.api.drive.put(
drive_id="scratch",
path_on_host=test_microvm.create_jailed_resource(fs1.path),
is_root_device=False,
is_read_only=False,
)
# Updates to `kernel_image_path` with an invalid path are not allowed.
expected_msg = re.escape(
"The kernel file cannot be opened: No such file or directory (os error 2)"
)
with pytest.raises(RuntimeError, match=expected_msg):
test_microvm.api.boot.put(kernel_image_path="foo.bar")
# Updates to `kernel_image_path` with a valid path are allowed.
test_microvm.api.boot.put(
kernel_image_path=test_microvm.get_jailed_resource(test_microvm.kernel_file)
)
# Updates to `path_on_host` with an invalid path are not allowed.
with pytest.raises(RuntimeError, match="Invalid block device path"):
test_microvm.api.drive.put(
drive_id="rootfs",
path_on_host="foo.bar",
is_read_only=True,
is_root_device=True,
)
# Updates to `is_root_device` that result in two root block devices are not
# allowed.
with pytest.raises(RuntimeError, match="A root block device already exists"):
test_microvm.api.drive.put(
drive_id="scratch",
path_on_host=test_microvm.get_jailed_resource(fs1.path),
is_read_only=False,
is_root_device=True,
)
# Valid updates to `path_on_host` and `is_read_only` are allowed.
fs2 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "otherscratch"))
test_microvm.api.drive.put(
drive_id="scratch",
path_on_host=test_microvm.create_jailed_resource(fs2.path),
is_read_only=True,
is_root_device=False,
)
# Valid updates to all fields in the machine configuration are allowed.
# The machine configuration has a default value, so all PUTs are updates.
microvm_config_json = {
"vcpu_count": 4,
"smt": platform.machine() == "x86_64",
"mem_size_mib": 256,
"track_dirty_pages": True,
}
if platform.machine() == "x86_64":
microvm_config_json["cpu_template"] = "C3"
test_microvm.api.machine_config.put(**microvm_config_json)
response = test_microvm.api.machine_config.get()
response_json = response.json()
vcpu_count = microvm_config_json["vcpu_count"]
assert response_json["vcpu_count"] == vcpu_count
smt = microvm_config_json["smt"]
assert response_json["smt"] == smt
mem_size_mib = microvm_config_json["mem_size_mib"]
assert response_json["mem_size_mib"] == mem_size_mib
if platform.machine() == "x86_64":
cpu_template = str(microvm_config_json["cpu_template"])
assert response_json["cpu_template"] == cpu_template
track_dirty_pages = microvm_config_json["track_dirty_pages"]
assert response_json["track_dirty_pages"] == track_dirty_pages
def test_net_api_put_update_pre_boot(test_microvm_with_api):
"""
Test PUT updates on network configurations before the microvm boots.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
first_if_name = "first_tap"
tap1 = net_tools.Tap(first_if_name, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id="1", guest_mac="06:00:00:00:00:01", host_dev_name=tap1.name
)
# Adding new network interfaces is allowed.
second_if_name = "second_tap"
tap2 = net_tools.Tap(second_if_name, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id="2", guest_mac="07:00:00:00:00:01", host_dev_name=tap2.name
)
# Updates to a network interface with an unavailable MAC are not allowed.
guest_mac = "06:00:00:00:00:01"
expected_msg = f"The MAC address is already in use: {guest_mac}"
with pytest.raises(RuntimeError, match=expected_msg):
test_microvm.api.network.put(
iface_id="2", host_dev_name=second_if_name, guest_mac=guest_mac
)
# Updates to a network interface with an available MAC are allowed.
test_microvm.api.network.put(
iface_id="2", host_dev_name=second_if_name, guest_mac="08:00:00:00:00:01"
)
# Updates to a network interface with an unavailable name are not allowed.
expected_msg = "Could not create the network device"
with pytest.raises(RuntimeError, match=expected_msg):
test_microvm.api.network.put(
iface_id="1", host_dev_name=second_if_name, guest_mac="06:00:00:00:00:01"
)
# Updates to a network interface with an available name are allowed.
iface_id = "1"
tapname = test_microvm.id[:8] + "tap" + iface_id
tap3 = net_tools.Tap(tapname, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id=iface_id, host_dev_name=tap3.name, guest_mac="06:00:00:00:00:01"
)
def test_api_mmds_config(test_microvm_with_api):
"""
Test /mmds/config PUT scenarios that unit tests can't cover.
Tests updates on MMDS config before and after attaching a network device.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 2 vCPUs, 256 MiB of RAM and
# a root file system with the rw permission.
test_microvm.basic_config()
# Setting MMDS config with empty network interface IDs list is not allowed.
err_msg = (
"The list of network interface IDs that allow "
"forwarding MMDS requests is empty."
)
with pytest.raises(RuntimeError):
test_microvm.api.mmds_config.put(network_interfaces=[])
# Setting MMDS config when no network device has been attached
# is not allowed.
err_msg = (
"The list of network interface IDs provided contains "
"at least one ID that does not correspond to any "
"existing network interface."
)
with pytest.raises(RuntimeError, match=err_msg):
test_microvm.api.mmds_config.put(network_interfaces=["foo"])
# Attach network interface.
tap = net_tools.Tap("tap1", test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id="1", guest_mac="06:00:00:00:00:01", host_dev_name=tap.name
)
# Setting MMDS config with an ID that does not correspond to an already
# attached network device is not allowed.
err_msg = (
"The list of network interface IDs provided contains"
" at least one ID that does not correspond to any "
"existing network interface."
)
with pytest.raises(RuntimeError, match=err_msg):
test_microvm.api.mmds_config.put(network_interfaces=["1", "foo"])
# Updates to MMDS version with invalid value are not allowed.
err_msg = (
"An error occurred when deserializing the json body of a "
"request: unknown variant `foo`, expected `V1` or `V2`"
)
with pytest.raises(RuntimeError, match=err_msg):
test_microvm.api.mmds_config.put(version="foo", network_interfaces=["1"])
# Valid MMDS config not specifying version or IPv4 address.
test_microvm.api.mmds_config.put(network_interfaces=["1"])
assert test_microvm.api.vm_config.get().json()["mmds-config"]["version"] == "V1"
# Valid MMDS config not specifying version.
mmds_config = {"ipv4_address": "169.254.169.250", "network_interfaces": ["1"]}
test_microvm.api.mmds_config.put(**mmds_config)
assert (
test_microvm.api.vm_config.get().json()["mmds-config"]["ipv4_address"]
== "169.254.169.250"
)
# Valid MMDS config.
mmds_config = {
"version": "V2",
"ipv4_address": "169.254.169.250",
"network_interfaces": ["1"],
}
test_microvm.api.mmds_config.put(**mmds_config)
assert test_microvm.api.vm_config.get().json()["mmds-config"]["version"] == "V2"
# pylint: disable=too-many-statements
def test_api_machine_config(test_microvm_with_api):
"""
Test /machine_config PUT/PATCH scenarios that unit tests can't cover.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Test invalid vcpu count < 0.
with pytest.raises(RuntimeError):
test_microvm.api.machine_config.put(vcpu_count="-2")
# Test invalid type for smt flag.
with pytest.raises(RuntimeError):
test_microvm.api.machine_config.put(smt="random_string")
# Test invalid CPU template.
with pytest.raises(RuntimeError):
test_microvm.api.machine_config.put(cpu_template="random_string")
test_microvm.api.machine_config.patch(track_dirty_pages=True)
# Test missing vcpu_count.
with pytest.raises(
RuntimeError, match="missing field `vcpu_count` at line 1 column 21."
):
test_microvm.api.machine_config.put(mem_size_mib=128)
# Test missing mem_size_mib.
with pytest.raises(
RuntimeError, match="missing field `mem_size_mib` at line 1 column 17."
):
test_microvm.api.machine_config.put(vcpu_count=2)
# Test default smt value.
test_microvm.api.machine_config.put(mem_size_mib=128, vcpu_count=1)
response = test_microvm.api.machine_config.get()
assert response.json()["smt"] is False
# Test that smt=True errors on ARM.
if platform.machine() == "x86_64":
test_microvm.api.machine_config.patch(smt=True)
elif platform.machine() == "aarch64":
expected_msg = (
"Enabling simultaneous multithreading is not supported on aarch64"
)
with pytest.raises(RuntimeError, match=expected_msg):
test_microvm.api.machine_config.patch(smt=True)
# Test invalid mem_size_mib < 0.
with pytest.raises(RuntimeError):
test_microvm.api.machine_config.put(mem_size_mib="-2")
# Test invalid mem_size_mib > usize::MAX.
bad_size = 1 << 64
fail_msg = (
"error occurred when deserializing the json body of a request: invalid type"
)
with pytest.raises(RuntimeError, match=fail_msg):
test_microvm.api.machine_config.put(mem_size_mib=bad_size)
# Reset the configuration of the microvm
# This will explicitly set vcpu_num = 2, mem_size_mib = 256
# track_dirty_pages = false. All other parameters are
# unspecified so will revert to default values.
test_microvm.basic_config()
# Test mem_size_mib of valid type, but too large.
firecracker_pid = int(test_microvm.jailer_clone_pid)
resource.prlimit(
firecracker_pid, resource.RLIMIT_AS, (MEM_LIMIT, resource.RLIM_INFINITY)
)
bad_size = (1 << 64) - 1
test_microvm.api.machine_config.patch(mem_size_mib=bad_size)
fail_msg = re.escape(
"Invalid Memory Configuration: MmapRegion(Mmap(Os { code: "
"12, kind: OutOfMemory, message: Out of memory }))"
)
with pytest.raises(RuntimeError, match=fail_msg):
test_microvm.start()
# Test invalid mem_size_mib = 0.
with pytest.raises(
RuntimeError, match=re.escape("The memory size (MiB) is invalid.")
):
test_microvm.api.machine_config.patch(mem_size_mib=0)
# Test valid mem_size_mib.
test_microvm.api.machine_config.patch(mem_size_mib=256)
# Set the cpu template
if platform.machine() == "x86_64":
test_microvm.api.machine_config.patch(cpu_template="C3")
else:
# We test with "None" because this is the only option supported on
# all aarch64 instances. It still tests setting `cpu_template`,
# even though the values we set is "None".
test_microvm.api.machine_config.patch(cpu_template="None")
if utils_cpuid.get_cpu_vendor() == utils_cpuid.CpuVendor.AMD:
# We shouldn't be able to apply Intel templates on AMD hosts
fail_msg = "CPU vendor mismatched between actual CPU and CPU template"
with pytest.raises(RuntimeError, match=fail_msg):
test_microvm.start()
else:
test_microvm.start()
# Validate full vm configuration after patching machine config.
json = test_microvm.api.vm_config.get().json()
assert json["machine-config"]["vcpu_count"] == 2
assert json["machine-config"]["mem_size_mib"] == 256
assert json["machine-config"]["smt"] is False
@nonci_on_arm
def test_api_cpu_config(test_microvm_with_api, custom_cpu_template):
"""
Test /cpu-config PUT scenarios.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
with pytest.raises(RuntimeError):
test_microvm.api.cpu_config.put(foo=False)
test_microvm.api.cpu_config.put(**custom_cpu_template["template"])
def test_api_put_update_post_boot(test_microvm_with_api):
"""
Test that PUT updates are rejected after the microvm boots.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up the microVM with 2 vCPUs, 256 MiB of RAM and
# a root file system with the rw permission.
test_microvm.basic_config()
iface_id = "1"
tapname = test_microvm.id[:8] + "tap" + iface_id
tap1 = net_tools.Tap(tapname, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id=iface_id, host_dev_name=tap1.name, guest_mac="06:00:00:00:00:01"
)
test_microvm.start()
# Valid updates to `kernel_image_path` are not allowed after boot.
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_AFTER_START):
test_microvm.api.boot.put(
kernel_image_path=test_microvm.get_jailed_resource(test_microvm.kernel_file)
)
# Valid updates to the machine configuration are not allowed after boot.
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_AFTER_START):
test_microvm.api.machine_config.patch(vcpu_count=4)
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_AFTER_START):
test_microvm.api.machine_config.put(vcpu_count=4, mem_size_mib=128)
# Network interface update is not allowed after boot.
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_AFTER_START):
test_microvm.api.network.put(
iface_id="1", host_dev_name=tap1.name, guest_mac="06:00:00:00:00:02"
)
# Block device update is not allowed after boot.
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_AFTER_START):
test_microvm.api.drive.put(
drive_id="rootfs",
path_on_host=test_microvm.jailer.jailed_path(test_microvm.rootfs_file),
is_read_only=False,
is_root_device=True,
)
# MMDS config is not allowed post-boot.
mmds_config = {
"version": "V2",
"ipv4_address": "169.254.169.250",
"network_interfaces": ["1"],
}
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_AFTER_START):
test_microvm.api.mmds_config.put(**mmds_config)
def test_rate_limiters_api_config(test_microvm_with_api):
"""
Test the IO rate limiter API config.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Test the DRIVE rate limiting API.
# Test drive with bw rate-limiting.
fs1 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "bw"))
test_microvm.api.drive.put(
drive_id="bw",
path_on_host=test_microvm.create_jailed_resource(fs1.path),
is_read_only=False,
is_root_device=False,
rate_limiter={"bandwidth": {"size": 1000000, "refill_time": 100}},
)
# Test drive with ops rate-limiting.
fs2 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "ops"))
test_microvm.api.drive.put(
drive_id="ops",
path_on_host=test_microvm.create_jailed_resource(fs2.path),
is_read_only=False,
is_root_device=False,
rate_limiter={"ops": {"size": 1, "refill_time": 100}},
)
# Test drive with bw and ops rate-limiting.
fs3 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "bwops"))
test_microvm.api.drive.put(
drive_id="bwops",
path_on_host=test_microvm.create_jailed_resource(fs3.path),
is_read_only=False,
is_root_device=False,
rate_limiter={
"bandwidth": {"size": 1000000, "refill_time": 100},
"ops": {"size": 1, "refill_time": 100},
},
)
# Test drive with 'empty' rate-limiting (same as not specifying the field)
fs4 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "nada"))
test_microvm.api.drive.put(
drive_id="nada",
path_on_host=test_microvm.create_jailed_resource(fs4.path),
is_read_only=False,
is_root_device=False,
rate_limiter={},
)
# Test the NET rate limiting API.
# Test network with tx bw rate-limiting.
iface_id = "1"
tapname = test_microvm.id[:8] + "tap" + iface_id
tap1 = net_tools.Tap(tapname, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id=iface_id,
guest_mac="06:00:00:00:00:01",
host_dev_name=tap1.name,
tx_rate_limiter={"bandwidth": {"size": 1000000, "refill_time": 100}},
)
# Test network with rx bw rate-limiting.
iface_id = "2"
tapname = test_microvm.id[:8] + "tap" + iface_id
tap2 = net_tools.Tap(tapname, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id=iface_id,
guest_mac="06:00:00:00:00:02",
host_dev_name=tap2.name,
rx_rate_limiter={"bandwidth": {"size": 1000000, "refill_time": 100}},
)
# Test network with tx and rx bw and ops rate-limiting.
iface_id = "3"
tapname = test_microvm.id[:8] + "tap" + iface_id
tap3 = net_tools.Tap(tapname, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id=iface_id,
guest_mac="06:00:00:00:00:03",
host_dev_name=tap3.name,
rx_rate_limiter={
"bandwidth": {"size": 1000000, "refill_time": 100},
"ops": {"size": 1, "refill_time": 100},
},
tx_rate_limiter={
"bandwidth": {"size": 1000000, "refill_time": 100},
"ops": {"size": 1, "refill_time": 100},
},
)
# Test entropy device bw and ops rate-limiting.
test_microvm.api.entropy.put(
rate_limiter={
"bandwidth": {"size": 1000000, "refill_time": 100},
"ops": {"size": 1, "refill_time": 100},
},
)
def test_api_patch_pre_boot(test_microvm_with_api):
"""
Test that PATCH updates are not allowed before the microvm boots.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Sets up the microVM with 2 vCPUs, 256 MiB of RAM, 1 network interface
# and a root file system with the rw permission.
test_microvm.basic_config()
fs1 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "scratch"))
drive_id = "scratch"
test_microvm.api.drive.put(
drive_id=drive_id,
path_on_host=test_microvm.create_jailed_resource(fs1.path),
is_root_device=False,
is_read_only=False,
)
iface_id = "1"
tapname = test_microvm.id[:8] + "tap" + iface_id
tap1 = net_tools.Tap(tapname, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id=iface_id, host_dev_name=tap1.name, guest_mac="06:00:00:00:00:01"
)
# Partial updates to the boot source are not allowed.
with pytest.raises(RuntimeError, match="Invalid request method"):
test_microvm.api.boot.patch(kernel_image_path="otherfile")
# Partial updates to the machine configuration are allowed before boot.
test_microvm.api.machine_config.patch(vcpu_count=4)
response_json = test_microvm.api.machine_config.get().json()
assert response_json["vcpu_count"] == 4
# Partial updates to the logger configuration are not allowed.
with pytest.raises(RuntimeError, match="Invalid request method"):
test_microvm.api.logger.patch(level="Error")
# Patching drive before boot is not allowed.
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_BEFORE_START):
test_microvm.api.drive.patch(drive_id=drive_id, path_on_host="foo.bar")
# Patching net before boot is not allowed.
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_BEFORE_START):
test_microvm.api.network.patch(iface_id=iface_id)
def test_negative_api_patch_post_boot(test_microvm_with_api):
"""
Test PATCH updates that are not allowed after the microvm boots.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Sets up the microVM with 2 vCPUs, 256 MiB of RAM, 1 network iface and
# a root file system with the rw permission.
test_microvm.basic_config()
fs1 = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "scratch"))
test_microvm.api.drive.put(
drive_id="scratch",
path_on_host=test_microvm.create_jailed_resource(fs1.path),
is_root_device=False,
is_read_only=False,
)
iface_id = "1"
tapname = test_microvm.id[:8] + "tap" + iface_id
tap1 = net_tools.Tap(tapname, test_microvm.jailer.netns)
test_microvm.api.network.put(
iface_id=iface_id, host_dev_name=tap1.name, guest_mac="06:00:00:00:00:01"
)
test_microvm.start()
# Partial updates to the boot source are not allowed.
with pytest.raises(RuntimeError, match="Invalid request method"):
test_microvm.api.boot.patch(kernel_image_path="otherfile")
# Partial updates to the machine configuration are not allowed after boot.
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_AFTER_START):
test_microvm.api.machine_config.patch(vcpu_count=4)
# Partial updates to the logger configuration are not allowed.
with pytest.raises(RuntimeError, match="Invalid request method"):
test_microvm.api.logger.patch(level="Error")
def test_drive_patch(test_microvm_with_api):
"""
Extensively test drive PATCH scenarios before and after boot.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Sets up the microVM with 2 vCPUs, 256 MiB of RAM and
# a root file system with the rw permission.
test_microvm.basic_config(rootfs_io_engine="Sync")
fs = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "scratch"))
test_microvm.add_drive(
drive_id="scratch",
path_on_host=fs.path,
is_root_device=False,
is_read_only=False,
io_engine="Async" if is_io_uring_supported() else "Sync",
)
# Patching drive before boot is not allowed.
with pytest.raises(RuntimeError, match=NOT_SUPPORTED_BEFORE_START):
test_microvm.api.drive.patch(drive_id="scratch", path_on_host="foo.bar")
test_microvm.start()
_drive_patch(test_microvm)
@pytest.mark.skipif(
platform.machine() != "x86_64", reason="not yet implemented on aarch64"
)
def test_send_ctrl_alt_del(test_microvm_with_api):
"""
Test shutting down the microVM gracefully on x86, by sending CTRL+ALT+DEL.
"""
# This relies on the i8042 device and AT Keyboard support being present in
# the guest kernel.
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.start()
# Wait around for the guest to boot up and initialize the user space
time.sleep(2)
test_microvm.api.actions.put(action_type="SendCtrlAltDel")
firecracker_pid = test_microvm.jailer_clone_pid
# If everyting goes as expected, the guest OS will issue a reboot,
# causing Firecracker to exit.
# We'll keep poking Firecracker for at most 30 seconds, waiting for it
# to die.
start_time = time.time()
shutdown_ok = False
while time.time() - start_time < 30:
try:
os.kill(firecracker_pid, 0)
time.sleep(0.01)
except OSError:
shutdown_ok = True
break
assert shutdown_ok
def _drive_patch(test_microvm):
"""Exercise drive patch test scenarios."""
# Patches without mandatory fields are not allowed.
expected_msg = "at least one property to patch: path_on_host, rate_limiter"
with pytest.raises(RuntimeError, match=expected_msg):
test_microvm.api.drive.patch(drive_id="scratch")
drive_path = "foo.bar"
# Cannot patch drive permissions post boot.
with pytest.raises(RuntimeError, match="unknown field `is_read_only`"):
test_microvm.api.drive.patch(
drive_id="scratch", path_on_host=drive_path, is_read_only=True
)
# Cannot patch io_engine post boot.
with pytest.raises(RuntimeError, match="unknown field `io_engine`"):
test_microvm.api.drive.patch(
drive_id="scratch", path_on_host=drive_path, io_engine="Sync"
)
# Updates to `is_root_device` with a valid value are not allowed.
with pytest.raises(RuntimeError, match="unknown field `is_root_device`"):
test_microvm.api.drive.patch(
drive_id="scratch", path_on_host=drive_path, is_root_device=False
)
# Updates to `path_on_host` with an invalid path are not allowed.
expected_msg = (
"Unable to patch the block device: BackingFile(Os { code: 2, "
f'kind: NotFound, message: "No such file or directory" }}, "{drive_path}")'
)
with pytest.raises(RuntimeError, match=re.escape(expected_msg)):
test_microvm.api.drive.patch(drive_id="scratch", path_on_host=drive_path)
fs = drive_tools.FilesystemFile(os.path.join(test_microvm.fsfiles, "scratch_new"))
# Updates to `path_on_host` with a valid path are allowed.
test_microvm.api.drive.patch(
drive_id="scratch", path_on_host=test_microvm.create_jailed_resource(fs.path)
)
# Updates to valid `path_on_host` and `rate_limiter` are allowed.
test_microvm.api.drive.patch(
drive_id="scratch",
path_on_host=test_microvm.create_jailed_resource(fs.path),
rate_limiter={
"bandwidth": {"size": 1000000, "refill_time": 100},
"ops": {"size": 1, "refill_time": 100},
},
)
# Updates to `rate_limiter` only are allowed.
test_microvm.api.drive.patch(
drive_id="scratch",
rate_limiter={
"bandwidth": {"size": 5000, "refill_time": 100},
"ops": {"size": 500, "refill_time": 100},
},
)
# Updates to `rate_limiter` and invalid path fail.
with pytest.raises(RuntimeError, match="No such file or directory"):
test_microvm.api.drive.patch(
drive_id="scratch",
path_on_host="foo.bar",
rate_limiter={
"bandwidth": {"size": 5000, "refill_time": 100},
"ops": {"size": 500, "refill_time": 100},
},
)
# Validate full vm configuration after patching drives.
response = test_microvm.api.vm_config.get().json()
assert response["drives"] == [
{
"drive_id": "rootfs",
"path_on_host": "/ubuntu-22.04.squashfs",
"is_root_device": True,
"partuuid": None,
"is_read_only": True,
"cache_type": "Unsafe",
"io_engine": "Sync",
"rate_limiter": None,
},
{
"drive_id": "scratch",
"path_on_host": "/scratch_new.ext4",
"is_root_device": False,
"partuuid": None,
"is_read_only": False,
"cache_type": "Unsafe",
"io_engine": "Async" if is_io_uring_supported() else "Sync",
"rate_limiter": {
"bandwidth": {"size": 5000, "one_time_burst": None, "refill_time": 100},
"ops": {"size": 500, "one_time_burst": None, "refill_time": 100},
},
},
]
def test_api_version(test_microvm_with_api):
"""
Test the permanent VM version endpoint.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
# Getting the VM version should be available pre-boot.
preboot_response = test_microvm.api.version.get()
# Check that the response contains the version.
assert "firecracker_version" in preboot_response.json()
# Start the microvm.
test_microvm.start()
# Getting the VM version should be available post-boot.
postboot_response = test_microvm.api.version.get()
# Check that the response contains the version.
assert "firecracker_version" in postboot_response.json()
# Validate VM version post-boot is the same as pre-boot.
assert preboot_response.json() == postboot_response.json()
cargo_version = get_firecracker_version_from_toml()
api_version = packaging.version.parse(
preboot_response.json()["firecracker_version"]
)
# Cargo version should match FC API version
assert cargo_version == api_version
binary_version = packaging.version.parse(test_microvm.firecracker_version)
assert api_version == binary_version
def test_api_vsock(uvm_nano):
"""
Test vsock related API commands.
"""
vm = uvm_nano
# Create a vsock device.
vm.api.vsock.put(guest_cid=15, uds_path="vsock.sock")
# Updating an existing vsock is currently fine.
vm.api.vsock.put(guest_cid=166, uds_path="vsock.sock")
# Check PUT request. Although vsock_id is deprecated, it must still work.
response = vm.api.vsock.put(vsock_id="vsock1", guest_cid=15, uds_path="vsock.sock")
assert response.headers["deprecation"]
# Updating an existing vsock is currently fine even with deprecated
# `vsock_id`.
response = vm.api.vsock.put(vsock_id="vsock1", guest_cid=166, uds_path="vsock.sock")
assert response.headers["deprecation"]
# No other vsock action is allowed after booting the VM.
vm.start()
# Updating an existing vsock should not be fine at this point.
with pytest.raises(RuntimeError):
vm.api.vsock.put(guest_cid=17, uds_path="vsock.sock")
def test_api_entropy(uvm_plain):
"""
Test entropy related API commands.
"""
test_microvm = uvm_plain
test_microvm.spawn()
test_microvm.basic_config()
# Create a new entropy device should be OK.
test_microvm.api.entropy.put()
# Overwriting an existing should be OK.
test_microvm.api.entropy.put()
# Start the microvm
test_microvm.start()
with pytest.raises(RuntimeError):
test_microvm.api.entropy.put()
def test_api_balloon(uvm_nano):
"""
Test balloon related API commands.
"""
test_microvm = uvm_nano
# Updating an inexistent balloon device should give an error.
with pytest.raises(RuntimeError):
test_microvm.api.balloon.patch(amount_mib=0)
# Adding a memory balloon should be OK.
test_microvm.api.balloon.put(amount_mib=1, deflate_on_oom=True)
# As is overwriting one.
test_microvm.api.balloon.put(
amount_mib=0, deflate_on_oom=False, stats_polling_interval_s=5
)
# Getting the device configuration should be available pre-boot.
response = test_microvm.api.balloon.get()
assert response.json()["amount_mib"] == 0
assert response.json()["deflate_on_oom"] is False
assert response.json()["stats_polling_interval_s"] == 5
# Updating an existing balloon device is forbidden before boot.
with pytest.raises(RuntimeError):
test_microvm.api.balloon.patch(amount_mib=2)
# We can't have a balloon device with a target size greater than
# the available amount of memory.
with pytest.raises(RuntimeError):
test_microvm.api.balloon.put(
amount_mib=1024, deflate_on_oom=False, stats_polling_interval_s=5
)
# Start the microvm.
test_microvm.start()
# Updating should fail as driver didn't have time to initialize.
with pytest.raises(RuntimeError):
test_microvm.api.balloon.patch(amount_mib=4)
# Overwriting the existing device should give an error now.
with pytest.raises(RuntimeError):
test_microvm.api.balloon.put(
amount_mib=3, deflate_on_oom=False, stats_polling_interval_s=3
)
# Give the balloon driver time to initialize.
# 500 ms is the maximum acceptable boot time.
time.sleep(0.5)
# But updating should be OK.
test_microvm.api.balloon.patch(amount_mib=4)
# Check we can't request more than the total amount of VM memory.
with pytest.raises(RuntimeError):
test_microvm.api.balloon.patch(amount_mib=300)
# Check we can't disable statistics as they were enabled at boot.
# We can, however, change the interval to a non-zero value.
test_microvm.api.balloon_stats.patch(stats_polling_interval_s=5)
# Getting the device configuration should be available post-boot.
response = test_microvm.api.balloon.get()
assert response.json()["amount_mib"] == 4
assert response.json()["deflate_on_oom"] is False
assert response.json()["stats_polling_interval_s"] == 5
# Check we can't overflow the `num_pages` field in the config space by
# requesting too many MB. There are 256 4K pages in a MB. Here, we are
# requesting u32::MAX / 128.
with pytest.raises(RuntimeError):
test_microvm.api.balloon.patch(amount_mib=33554432)
def test_get_full_config_after_restoring_snapshot(microvm_factory, uvm_nano):
"""
Test the configuration of a microVM after restoring from a snapshot.
"""
net_iface = uvm_nano.add_net_iface()
cpu_vendor = utils_cpuid.get_cpu_vendor()
setup_cfg = {}
# Basic config also implies a root block device.
setup_cfg["machine-config"] = {
"vcpu_count": 2,
"mem_size_mib": 256,
"smt": True,
"track_dirty_pages": False,
}
if cpu_vendor == utils_cpuid.CpuVendor.ARM:
setup_cfg["machine-config"]["smt"] = False
if cpu_vendor == utils_cpuid.CpuVendor.INTEL:
setup_cfg["machine-config"]["cpu_template"] = "C3"
uvm_nano.api.machine_config.patch(**setup_cfg["machine-config"])
setup_cfg["cpu-config"] = None
setup_cfg["drives"] = [
{
"drive_id": "rootfs",
"path_on_host": f"/{uvm_nano.rootfs_file.name}",
"is_root_device": True,
"partuuid": None,
"is_read_only": True,
"cache_type": "Unsafe",
"rate_limiter": None,
"io_engine": "Sync",
}
]
# Add a memory balloon device.
uvm_nano.api.balloon.put(amount_mib=1, deflate_on_oom=True)
setup_cfg["balloon"] = {
"amount_mib": 1,
"deflate_on_oom": True,
"stats_polling_interval_s": 0,
}
# Add a vsock device.
uvm_nano.api.vsock.put(guest_cid=15, uds_path="vsock.sock")
setup_cfg["vsock"] = {"guest_cid": 15, "uds_path": "vsock.sock"}
setup_cfg["logger"] = None
setup_cfg["metrics"] = None
setup_cfg["mmds-config"] = {
"version": "V1",
"network_interfaces": [net_iface.dev_name],
}
uvm_nano.api.mmds_config.put(**setup_cfg["mmds-config"])
# Start the microvm.
uvm_nano.start()
# Add a tx rate limiter to the net device.
tx_rl = {
"bandwidth": {"size": 1000000, "refill_time": 100, "one_time_burst": None},
"ops": None,
}
response = uvm_nano.api.network.patch(
iface_id=net_iface.dev_name, tx_rate_limiter=tx_rl
)
setup_cfg["network-interfaces"] = [
{
"guest_mac": net_tools.mac_from_ip(net_iface.guest_ip),
"iface_id": net_iface.dev_name,
"host_dev_name": net_iface.tap_name,
"rx_rate_limiter": None,
"tx_rate_limiter": tx_rl,
}
]
snapshot = uvm_nano.snapshot_full()
uvm2 = microvm_factory.build()
uvm2.spawn()
uvm2.restore_from_snapshot(snapshot)
uvm2.resume()
expected_cfg = setup_cfg.copy()
# We expect boot-source to be set with the following values
expected_cfg["boot-source"] = {
"kernel_image_path": uvm_nano.get_jailed_resource(uvm_nano.kernel_file),
"initrd_path": None,
}
# no ipv4 specified during PUT /mmds/config so we expect the default
expected_cfg["mmds-config"] = {
"version": "V1",
"ipv4_address": "169.254.169.254",
"network_interfaces": [net_iface.dev_name],
}
# We should expect a null entropy device
expected_cfg["entropy"] = None
# Validate full vm configuration post-restore.
response = uvm2.api.vm_config.get().json()
assert response != setup_cfg
assert response == expected_cfg
def test_get_full_config(test_microvm_with_api):
"""
Test the reported configuration of a microVM configured with all resources.
"""
test_microvm = test_microvm_with_api
expected_cfg = {}
test_microvm.spawn()
# Basic config also implies a root block device.
test_microvm.basic_config(boot_args="", rootfs_io_engine="Sync")
expected_cfg["machine-config"] = {
"vcpu_count": 2,
"mem_size_mib": 256,
"smt": False,
"track_dirty_pages": False,
}
expected_cfg["cpu-config"] = None
expected_cfg["boot-source"] = {
"boot_args": "",
"kernel_image_path": f"/{test_microvm.kernel_file.name}",
"initrd_path": None,
}
expected_cfg["drives"] = [
{
"drive_id": "rootfs",
"path_on_host": "/ubuntu-22.04.squashfs",
"is_root_device": True,
"partuuid": None,
"is_read_only": True,
"cache_type": "Unsafe",
"rate_limiter": None,
"io_engine": "Sync",
}
]
# Add a memory balloon device.
test_microvm.api.balloon.put(amount_mib=1, deflate_on_oom=True)
expected_cfg["balloon"] = {
"amount_mib": 1,
"deflate_on_oom": True,
"stats_polling_interval_s": 0,
}
# Add a vsock device.
response = test_microvm.api.vsock.put(guest_cid=15, uds_path="vsock.sock")
expected_cfg["vsock"] = {"guest_cid": 15, "uds_path": "vsock.sock"}
# Add a net device.
iface_id = "1"
tapname = test_microvm.id[:8] + "tap" + iface_id
tap1 = net_tools.Tap(tapname, test_microvm.jailer.netns)
guest_mac = "06:00:00:00:00:01"
tx_rl = {
"bandwidth": {"size": 1000000, "refill_time": 100, "one_time_burst": None},
"ops": None,
}
response = test_microvm.api.network.put(
iface_id=iface_id,
guest_mac=guest_mac,
host_dev_name=tap1.name,
tx_rate_limiter=tx_rl,
)
expected_cfg["network-interfaces"] = [
{
"iface_id": iface_id,
"host_dev_name": tap1.name,
"guest_mac": "06:00:00:00:00:01",
"rx_rate_limiter": None,
"tx_rate_limiter": tx_rl,
}
]
# Update MMDS config.
mmds_config = {
"version": "V2",
"ipv4_address": "169.254.169.250",
"network_interfaces": ["1"],
}
response = test_microvm.api.mmds_config.put(**mmds_config)
expected_cfg["logger"] = None
expected_cfg["metrics"] = None
expected_cfg["mmds-config"] = {
"version": "V2",
"ipv4_address": "169.254.169.250",
"network_interfaces": ["1"],
}
# We should expect a null entropy device
expected_cfg["entropy"] = None
# Getting full vm configuration should be available pre-boot.
response = test_microvm.api.vm_config.get()
assert response.json() == expected_cfg
# Start the microvm.
test_microvm.start()
# Validate full vm configuration post-boot as well.
response = test_microvm.api.vm_config.get()
assert response.json() == expected_cfg
def test_map_private_seccomp_regression(test_microvm_with_api):
"""
Seccomp mmap MAP_PRIVATE regression test.
When sending large buffer to an api endpoint there will be an attempt to
call mmap with MAP_PRIVATE|MAP_ANONYMOUS. This would result in vmm being
killed by the seccomp filter before this PR.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.extra_args.update(
{"http-api-max-payload-size": str(1024 * 1024 * 2)}
)
test_microvm.spawn()
test_microvm.time_api_request = False
response = test_microvm.api.mmds.get()
assert response.json() == {}
data_store = {"latest": {"meta-data": {"ami-id": "b" * (1024 * 1024)}}}
test_microvm.api.mmds.put(**data_store)
# pylint: disable=protected-access
def test_negative_snapshot_load_api(microvm_factory):
"""
Test snapshot load API.
"""
vm = microvm_factory.build()
vm.spawn()
# Specifying both `mem_backend` and 'mem_file_path` should fail.
err_msg = (
"too many fields: either `mem_backend` or "
"`mem_file_path` exclusively is required."
)
with pytest.raises(RuntimeError, match=err_msg):
vm.api.snapshot_load.put(
snapshot_path="foo",
mem_backend={"backend_type": "File", "backend_path": "bar"},
mem_file_path="bar",
)
# API request with `mem_backend` but no `backend_type` should fail.
with pytest.raises(RuntimeError, match="missing field `backend_type`"):
vm.api.snapshot_load.put(
snapshot_path="foo",
mem_backend={"backend_path": "bar"},
)
# API request with `mem_backend` but no `backend_path` should fail.
with pytest.raises(RuntimeError, match="missing field `backend_path`"):
vm.api.snapshot_load.put(
snapshot_path="foo",
mem_backend={"backend_type": "File"},
)
# API request with invalid `backend_type` should fail.
with pytest.raises(
RuntimeError, match="unknown variant `foo`, expected `File` or `Uffd`"
):
vm.api.snapshot_load.put(
snapshot_path="foo",
mem_backend={"backend_type": "foo", "backend_path": "bar"},
)
# API request without `snapshot_path` should fail.
with pytest.raises(RuntimeError, match="missing field `snapshot_path`"):
vm.api.snapshot_load.put(
mem_backend={"backend_type": "File", "backend_path": "bar"},
)
# API request without `mem_backend` or `mem_file_path` should fail.
err_msg = "missing field: either `mem_backend` or " "`mem_file_path` is required"
with pytest.raises(RuntimeError, match=err_msg):
vm.api.snapshot_load.put(snapshot_path="foo")
# Deprecated API should return deprecation response header.
with pytest.raises(RuntimeError) as exc_info:
vm.api.snapshot_load.put(
snapshot_path="foo",
mem_file_path="bar",
)
assert exc_info.value.args[2].headers["deprecation"]
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,932
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/security/test_seccomp.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that the seccomp filters don't let denied syscalls through."""
import json as json_lib
import os
import platform
import tempfile
from framework import utils
from host_tools.cargo_build import run_seccompiler_bin
def _get_basic_syscall_list():
"""Return the JSON list of syscalls that the demo jailer needs."""
if platform.machine() == "x86_64":
sys_list = [
"rt_sigprocmask",
"rt_sigaction",
"execve",
"mmap",
"mprotect",
"arch_prctl",
"set_tid_address",
"readlink",
"open",
"read",
"close",
"brk",
"sched_getaffinity",
"sigaltstack",
"munmap",
"exit_group",
"poll",
]
else:
# platform.machine() == "aarch64"
sys_list = [
"rt_sigprocmask",
"rt_sigaction",
"execve",
"mmap",
"mprotect",
"set_tid_address",
"read",
"close",
"brk",
"sched_getaffinity",
"sigaltstack",
"munmap",
"exit_group",
"ppoll",
]
json = ""
for syscall in sys_list[0:-1]:
json += """
{{
"syscall": \"{}\"
}},
""".format(
syscall
)
json += """
{{
"syscall": \"{}\"
}}
""".format(
sys_list[-1]
)
return json
def _run_seccompiler_bin(json_data, basic=False):
json_temp = tempfile.NamedTemporaryFile(delete=False)
json_temp.write(json_data.encode("utf-8"))
json_temp.flush()
bpf_temp = tempfile.NamedTemporaryFile(delete=False)
run_seccompiler_bin(bpf_path=bpf_temp.name, json_path=json_temp.name, basic=basic)
os.unlink(json_temp.name)
return bpf_temp.name
def test_seccomp_ls(bin_seccomp_paths):
"""
Assert that the seccomp filter denies an unallowed syscall.
"""
# pylint: disable=redefined-outer-name
# pylint: disable=subprocess-run-check
# The fixture pattern causes a pylint false positive for that rule.
# Path to the `ls` binary, which attempts to execute the forbidden
# `SYS_access`.
ls_command_path = "/bin/ls"
demo_jailer = bin_seccomp_paths["demo_jailer"]
assert os.path.exists(demo_jailer)
json_filter = """{{
"main": {{
"default_action": "trap",
"filter_action": "allow",
"filter": [
{}
]
}}
}}""".format(
_get_basic_syscall_list()
)
# Run seccompiler-bin.
bpf_path = _run_seccompiler_bin(json_filter)
# Run the mini jailer.
outcome = utils.run_cmd(
[demo_jailer, ls_command_path, bpf_path], no_shell=True, ignore_return_code=True
)
os.unlink(bpf_path)
# The seccomp filters should send SIGSYS (31) to the binary. `ls` doesn't
# handle it, so it will exit with error.
assert outcome.returncode != 0
def test_advanced_seccomp(bin_seccomp_paths):
"""
Test seccompiler-bin with `demo_jailer`.
Test that the demo jailer (with advanced seccomp) allows the harmless demo
binary, denies the malicious demo binary and that an empty allowlist
denies everything.
"""
# pylint: disable=redefined-outer-name
# pylint: disable=subprocess-run-check
# The fixture pattern causes a pylint false positive for that rule.
demo_jailer = bin_seccomp_paths["demo_jailer"]
demo_harmless = bin_seccomp_paths["demo_harmless"]
demo_malicious = bin_seccomp_paths["demo_malicious"]
assert os.path.exists(demo_jailer)
assert os.path.exists(demo_harmless)
assert os.path.exists(demo_malicious)
json_filter = """{{
"main": {{
"default_action": "trap",
"filter_action": "allow",
"filter": [
{},
{{
"syscall": "write",
"args": [
{{
"index": 0,
"type": "dword",
"op": "eq",
"val": 1,
"comment": "stdout fd"
}},
{{
"index": 2,
"type": "qword",
"op": "eq",
"val": 14,
"comment": "nr of bytes"
}}
]
}}
]
}}
}}""".format(
_get_basic_syscall_list()
)
# Run seccompiler-bin.
bpf_path = _run_seccompiler_bin(json_filter)
# Run the mini jailer for harmless binary.
outcome = utils.run_cmd(
[demo_jailer, demo_harmless, bpf_path], no_shell=True, ignore_return_code=True
)
# The demo harmless binary should have terminated gracefully.
assert outcome.returncode == 0
# Run the mini jailer for malicious binary.
outcome = utils.run_cmd(
[demo_jailer, demo_malicious, bpf_path], no_shell=True, ignore_return_code=True
)
# The demo malicious binary should have received `SIGSYS`.
assert outcome.returncode == -31
os.unlink(bpf_path)
# Run seccompiler-bin with `--basic` flag.
bpf_path = _run_seccompiler_bin(json_filter, basic=True)
# Run the mini jailer for malicious binary.
outcome = utils.run_cmd(
[demo_jailer, demo_malicious, bpf_path], no_shell=True, ignore_return_code=True
)
# The malicious binary also terminates gracefully, since the --basic option
# disables all argument checks.
assert outcome.returncode == 0
os.unlink(bpf_path)
# Run the mini jailer with an empty allowlist. It should trap on any
# syscall.
json_filter = """{
"main": {
"default_action": "trap",
"filter_action": "allow",
"filter": []
}
}"""
# Run seccompiler-bin.
bpf_path = _run_seccompiler_bin(json_filter)
outcome = utils.run_cmd(
[demo_jailer, demo_harmless, bpf_path], no_shell=True, ignore_return_code=True
)
# The demo binary should have received `SIGSYS`.
assert outcome.returncode == -31
os.unlink(bpf_path)
def test_no_seccomp(test_microvm_with_api):
"""
Test that Firecracker --no-seccomp installs no filter.
"""
test_microvm = test_microvm_with_api
test_microvm.jailer.extra_args.update({"no-seccomp": None})
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.start()
utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "0")
def test_default_seccomp_level(test_microvm_with_api):
"""
Test that Firecracker installs a seccomp filter by default.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.start()
utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "2")
def test_seccomp_rust_panic(bin_seccomp_paths):
"""
Test seccompiler-bin with `demo_panic`.
Test that the Firecracker filters allow a Rust panic to run its
course without triggering a seccomp violation.
"""
# pylint: disable=redefined-outer-name
# pylint: disable=subprocess-run-check
# The fixture pattern causes a pylint false positive for that rule.
demo_panic = bin_seccomp_paths["demo_panic"]
assert os.path.exists(demo_panic)
fc_filters_path = "../resources/seccomp/{}-unknown-linux-musl.json".format(
platform.machine()
)
with open(fc_filters_path, "r", encoding="utf-8") as fc_filters:
filter_threads = list(json_lib.loads(fc_filters.read()))
bpf_temp = tempfile.NamedTemporaryFile(delete=False)
run_seccompiler_bin(bpf_path=bpf_temp.name, json_path=fc_filters_path)
bpf_path = bpf_temp.name
# Run the panic binary with all filters.
for thread in filter_threads:
code, _, _ = utils.run_cmd(
[demo_panic, bpf_path, thread], no_shell=True, ignore_return_code=True
)
# The demo panic binary should have terminated with SIGABRT
# and not with a seccomp violation.
# On a seccomp violation, the program exits with code -31 for
# SIGSYS. Here, we make sure the program exits with -6, which
# is for SIGABRT.
assert (
code == -6
), "Panic binary failed with exit code {} on {} " "filters.".format(
code, thread
)
os.unlink(bpf_path)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,933
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/style/test_gitlint.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests ensuring desired style for commit messages."""
import os
from framework import utils
def test_gitlint():
"""
Test that all commit messages pass the gitlint rules.
"""
os.environ["LC_ALL"] = "C.UTF-8"
os.environ["LANG"] = "C.UTF-8"
try:
utils.run_cmd(
"gitlint --commits origin/main..HEAD"
" -C ../.gitlint"
" --extra-path framework/gitlint_rules.py"
)
except ChildProcessError as error:
assert False, "Commit message violates gitlint rules: {}".format(error)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,934
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_metrics.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests the metrics system."""
import datetime
import math
import platform
def test_flush_metrics(test_microvm_with_api):
"""
Check the `FlushMetrics` vmm action.
"""
microvm = test_microvm_with_api
microvm.spawn()
microvm.basic_config()
microvm.start()
metrics = microvm.flush_metrics()
exp_keys = [
"utc_timestamp_ms",
"api_server",
"balloon",
"block",
"deprecated_api",
"get_api_requests",
"i8042",
"latencies_us",
"logger",
"mmds",
"net",
"patch_api_requests",
"put_api_requests",
"seccomp",
"vcpu",
"vmm",
"uart",
"signals",
"vsock",
"entropy",
]
if platform.machine() == "aarch64":
exp_keys.append("rtc")
assert set(metrics.keys()) == set(exp_keys)
utc_time = datetime.datetime.now(datetime.timezone.utc)
utc_timestamp_ms = math.floor(utc_time.timestamp() * 1000)
# Assert that the absolute difference is less than 1 second, to check that
# the reported utc_timestamp_ms is actually a UTC timestamp from the Unix
# Epoch.Regression test for:
# https://github.com/firecracker-microvm/firecracker/issues/2639
assert abs(utc_timestamp_ms - metrics["utc_timestamp_ms"]) < 1000
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,935
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_memory_overhead.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Track Firecracker memory overhead
Because Firecracker is a static binary, and is copied before execution, no
memory is shared across many different processes. It is thus important to track
how much memory overhead Firecracker adds.
These tests output metrics to capture the memory overhead that Firecracker adds,
both from the binary file (file-backed) and what Firecracker allocates during
the process lifetime.
The memory overhead of the jailer is not important as it is short-lived.
"""
from collections import defaultdict
from pathlib import Path
import psutil
import pytest
from framework.properties import global_props
# If guest memory is >3328MB, it is split in a 2nd region
X86_MEMORY_GAP_START = 3328 * 2**20
@pytest.mark.parametrize(
"vcpu_count,mem_size_mib",
[(1, 128), (1, 1024), (2, 2024), (4, 4096)],
)
def test_memory_overhead(
microvm_factory, guest_kernel, rootfs, vcpu_count, mem_size_mib, metrics
):
"""Track Firecracker memory overhead.
We take a single measurement as it only varies by a few KiB each run.
"""
microvm = microvm_factory.build(guest_kernel, rootfs)
microvm.spawn()
microvm.basic_config(vcpu_count=vcpu_count, mem_size_mib=mem_size_mib)
microvm.add_net_iface()
microvm.start()
# check that the vm is running
microvm.ssh.run("true")
guest_mem_bytes = mem_size_mib * 2**20
guest_mem_splits = {
guest_mem_bytes,
X86_MEMORY_GAP_START,
}
if guest_mem_bytes > X86_MEMORY_GAP_START:
guest_mem_splits.add(guest_mem_bytes - X86_MEMORY_GAP_START)
mem_stats = defaultdict(int)
mem_stats["guest_memory"] = guest_mem_bytes
ps = psutil.Process(microvm.jailer_clone_pid)
for pmmap in ps.memory_maps(grouped=False):
# We publish 'size' and 'rss' (resident). size would be the worst case,
# whereas rss is the current paged-in memory.
mem_stats["total_size"] += pmmap.size
mem_stats["total_rss"] += pmmap.rss
pmmap_path = Path(pmmap.path)
if pmmap_path.exists() and pmmap_path.name.startswith("firecracker"):
mem_stats["binary_size"] += pmmap.size
mem_stats["binary_rss"] += pmmap.rss
if pmmap.size not in guest_mem_splits:
mem_stats["overhead_size"] += pmmap.size
mem_stats["overhead_rss"] += pmmap.rss
dimensions = {
# "instance": global_props.instance,
# "cpu_model": global_props.cpu_model,
"architecture": global_props.cpu_architecture,
"host_kernel": "linux-" + global_props.host_linux_version,
"guest_kernel": guest_kernel.name,
"rootfs": rootfs.name,
}
metrics.set_dimensions(dimensions)
for key, value in mem_stats.items():
metrics.put_metric(key, value, unit="Bytes")
mem_info = ps.memory_full_info()
for metric in ["uss", "text"]:
val = getattr(mem_info, metric)
metrics.put_metric(metric, val, unit="Bytes")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,936
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/gh_release.py
|
#!/usr/bin/env python3
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Draft a release in GitHub by calling into its API.
Assumes all the releases are in the current path.
"""
import argparse
import re
import subprocess
import tarfile
from pathlib import Path
from github import Github
def build_tarball(release_dir, release_tgz, arch):
"""Build a release tarball with local assets"""
# Do not include signatures in GitHub release since we aren't
# making those keys public
exclude_files = {"RELEASE_NOTES", "SHA256SUMS.sig"}
with tarfile.open(release_tgz, "w:gz") as tar:
files = [x for x in release_dir.rglob("*") if x.is_file()]
for asset in files:
if asset.name in exclude_files:
print(f"Skipping file {asset}")
continue
if asset.name.endswith(arch):
print(f"Setting +x bit for {asset}")
asset.chmod(0o755)
print(f"Adding {asset} to {release_tgz}")
tar.add(asset)
def github_release(tag_version, repo, github_token):
"""Create a draft release in GitHub"""
prerelease = False
assets = []
for arch in ["x86_64", "aarch64"]:
release_dir = Path(f"release-{tag_version}-{arch}")
# Build tarball
release_tgz = Path(f"firecracker-{tag_version}-{arch}.tgz")
print(f"Creating release archive {release_tgz} ...")
build_tarball(release_dir, release_tgz, arch)
print("Done. Archive successfully created. sha256sum result:")
sha256sums = release_tgz.with_suffix(release_tgz.suffix + ".sha256.txt")
subprocess.run(
f"sha256sum {release_tgz} > {sha256sums}",
check=True,
shell=True,
)
print(sha256sums.read_text("utf-8"))
assets.append(release_tgz)
assets.append(sha256sums)
message_file = Path(f"release-{tag_version}-x86_64") / "RELEASE_NOTES"
message = message_file.read_text()
# Create release
print("Creating GitHub release draft")
gh_client = Github(github_token)
gh_repo = gh_client.get_repo(repo)
gh_release = gh_repo.create_git_release(
tag_version,
f"Firecracker {tag_version}",
message,
draft=True,
prerelease=prerelease,
)
# Upload assets
for asset in assets:
content_type = "application/octet-stream"
if asset.suffix == ".txt":
content_type = "text/plain"
elif asset.suffix == ".tgz":
content_type = "application/gzip"
print(f"Uploading asset {asset} with content-type={content_type}")
gh_release.upload_asset(str(asset), label=asset.name, content_type=content_type)
release_url = gh_release.html_url
print(f"Draft release created successful. Check it out at {release_url}")
def version(version_str: str):
"""Validate version parameter"""
if not re.fullmatch(r"v\d+\.\d+\.\d+", version_str):
raise ValueError("version does not match vX.Y.Z")
return version_str
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
required=True,
metavar="vX.Y.Z",
help="Firecracker version.",
type=version,
)
parser.add_argument(
"--repository", required=False, default="firecracker-microvm/firecracker"
)
parser.add_argument("--github-token", required=True)
args = parser.parse_args()
github_release(
tag_version=args.version,
repo=args.repository,
github_token=args.github_token,
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,937
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_cmd_line_parameters.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that ensure the correctness of the command line parameters."""
import platform
from pathlib import Path
import pytest
from framework.utils import run_cmd
from host_tools.cargo_build import get_firecracker_binaries
def test_describe_snapshot_all_versions(
microvm_factory, guest_kernel, rootfs, firecracker_release
):
"""
Test `--describe-snapshot` correctness for all snapshot versions.
For each release create a snapshot and verify the data version of the
snapshot state file.
"""
target_version = firecracker_release.snapshot_version
vm = microvm_factory.build(
guest_kernel,
rootfs,
fc_binary_path=firecracker_release.path,
jailer_binary_path=firecracker_release.jailer,
)
vm.spawn()
vm.basic_config(track_dirty_pages=True)
vm.start()
snapshot = vm.snapshot_diff()
print("========== Firecracker create snapshot log ==========")
print(vm.log_data)
vm.kill()
# Fetch Firecracker binary for the latest version
fc_binary, _ = get_firecracker_binaries()
# Verify the output of `--describe-snapshot` command line parameter
cmd = [fc_binary] + ["--describe-snapshot", snapshot.vmstate]
code, stdout, stderr = run_cmd(cmd)
assert code == 0, stderr
assert stderr == ""
assert target_version in stdout
def test_cli_metrics_path(uvm_plain):
"""
Test --metrics-path parameter
"""
microvm = uvm_plain
metrics_path = Path(microvm.path) / "my_metrics.ndjson"
microvm.spawn(metrics_path=metrics_path)
microvm.basic_config()
microvm.start()
metrics = microvm.flush_metrics()
exp_keys = [
"utc_timestamp_ms",
"api_server",
"balloon",
"block",
"deprecated_api",
"get_api_requests",
"i8042",
"latencies_us",
"logger",
"mmds",
"net",
"patch_api_requests",
"put_api_requests",
"seccomp",
"vcpu",
"vmm",
"uart",
"signals",
"vsock",
"entropy",
]
if platform.machine() == "aarch64":
exp_keys.append("rtc")
assert set(metrics.keys()) == set(exp_keys)
def test_cli_metrics_path_if_metrics_initialized_twice_fail(test_microvm_with_api):
"""
Given: a running firecracker with metrics configured with the CLI option
When: Configure metrics via API
Then: API returns an error
"""
microvm = test_microvm_with_api
# First configure the µvm metrics with --metrics-path
metrics_path = Path(microvm.path) / "metrics.ndjson"
metrics_path.touch()
microvm.spawn(metrics_path=metrics_path)
# Then try to configure it with PUT /metrics
metrics2_path = Path(microvm.path) / "metrics2.ndjson"
metrics2_path.touch()
# It should fail with because it's already configured
with pytest.raises(RuntimeError, match="Reinitialization of metrics not allowed."):
microvm.api.metrics.put(
metrics_path=microvm.create_jailed_resource(metrics2_path)
)
def test_cli_metrics_if_resume_no_metrics(uvm_plain, microvm_factory):
"""
Check that metrics configuration is not part of the snapshot
"""
# Given: a snapshot of a FC with metrics configured with the CLI option
uvm1 = uvm_plain
metrics_path = Path(uvm1.path) / "metrics.ndjson"
metrics_path.touch()
uvm1.spawn(metrics_path=metrics_path)
uvm1.basic_config()
uvm1.start()
snapshot = uvm1.snapshot_full()
# When: restoring from the snapshot
uvm2 = microvm_factory.build()
uvm2.spawn()
uvm2.restore_from_snapshot(snapshot)
# Then: the old metrics configuration does not exist
metrics2 = Path(uvm2.jailer.chroot_path()) / metrics_path.name
assert not metrics2.exists()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,938
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/types.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Module for common types definitions."""
from collections import defaultdict
from dataclasses import dataclass
from typing import List
from .criteria import ComparisonCriteria
from .function import Function
@dataclass
class MeasurementDef:
"""Measurement definition data class."""
name: str
unit: str
statistics: List["StatisticDef"]
@classmethod
def create_measurement(
cls,
measurement_name: str,
unit: str,
st_functions: List[Function],
pass_criteria: dict = None,
) -> "MeasurementDef":
"""
Create a measurement based on the given params.
The expected `pass_criteria` dict is a dictionary with the following
format:
{
# Statistic name explicitly provided in statistics definitions or
# inherited from statistic functions (e.g Avg, Min, Max etc.).
"key": str,
# The comparison criteria used for pass/failure.
"value": statistics.criteria.ComparisonCriteria,
}
"""
if pass_criteria is None:
pass_criteria = defaultdict()
else:
pass_criteria = defaultdict(None, pass_criteria)
stats = []
for func in st_functions:
stats.append(
StatisticDef(func=func, pass_criteria=pass_criteria.get(func.name))
)
return cls(measurement_name, unit, stats)
@dataclass
class StatisticDef:
"""Statistic definition data class."""
func: Function
pass_criteria: ComparisonCriteria = None
@property
def name(self) -> str:
"""Return the name used to identify the statistic definition."""
return self.func.name
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,939
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/metrics.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Fixture to send metrics to AWS CloudWatch
We use the aws-embedded-metrics library although it has some sharp corners,
namely:
1. It uses asyncio, which complicates the flushing a bit.
2. It has an stateful API. Setting dimensions will override previous ones.
Example:
set_dimensions("instance")
put_metric("duration", 1)
set_dimensions("cpu")
put_metric("duration", 1)
This will end with 2 identical metrics with dimension "cpu" (the last one). The
correct way of doing it is:
set_dimensions("instance")
put_metric("duration", 1)
flush()
set_dimensions("cpu")
put_metric("duration", 1)
This is not very intuitive, but we assume all metrics within a test will have
the same dimensions.
# Debugging
You can override the destination of the metrics to stdout with:
AWS_EMF_NAMESPACE=$USER-test
AWS_EMF_ENVIRONMENT=local ./tools/devtest test
# References:
- https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html
- https://github.com/awslabs/aws-embedded-metrics-python
"""
import asyncio
import os
from aws_embedded_metrics.logger.metrics_logger_factory import create_metrics_logger
class MetricsWrapperDummy:
"""Send metrics to /dev/null"""
def set_dimensions(self, *args, **kwargs):
"""Set dimensions"""
def put_metric(self, *args, **kwargs):
"""Put a datapoint with given dimensions"""
def set_property(self, *args, **kwargs):
"""Set a property"""
def flush(self):
"""Flush any remaining metrics"""
class MetricsWrapper:
"""A convenient metrics logger"""
def __init__(self, logger):
self.logger = logger
def __getattr__(self, attr):
"""Dispatch methods to logger instance"""
if attr not in self.__dict__:
return getattr(self.logger, attr)
return getattr(self, attr)
def flush(self):
"""Flush any remaining metrics"""
asyncio.run(self.logger.flush())
def get_metrics_logger():
"""Get a new metrics logger object"""
# if no metrics namespace, don't output metrics
if "AWS_EMF_NAMESPACE" not in os.environ:
return MetricsWrapperDummy()
logger = create_metrics_logger()
logger.reset_dimensions(False)
return MetricsWrapper(logger)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,940
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/style/test_markdown.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for markdown style checks."""
from framework import utils
def test_markdown_style():
"""
Test that markdown files adhere to the style rules.
"""
# Get all *.md files from the project
md_files = utils.get_files_from(
find_path="..", pattern="*.md", exclude_names=["build"]
)
# Assert if somehow no markdown files were found.
assert len(md_files) != 0
# Run commands
cmd = "mdl -c ../.mdlrc "
for fname in md_files:
cmd += fname + " "
_, output, _ = utils.run_cmd(cmd)
assert output == ""
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,941
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/producer.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Producer of statistics."""
from abc import ABC, abstractmethod
from typing import Any, Callable
from framework import utils
# pylint: disable=R0903
class Producer(ABC):
"""Base class for raw results producer."""
@abstractmethod
def produce(self) -> Any:
"""Produce raw results."""
class SSHCommand(Producer):
"""Producer from executing ssh commands."""
def __init__(self, cmd, ssh_connection):
"""Initialize the raw data producer."""
self._cmd = cmd
self._ssh_connection = ssh_connection
def produce(self) -> Any:
"""Return the output of the executed ssh command."""
rc, stdout, stderr = self._ssh_connection.run(self._cmd)
assert rc == 0
assert stderr == ""
return stdout
class HostCommand(Producer):
"""Producer from executing commands on host."""
def __init__(self, cmd):
"""Initialize the raw data producer."""
self._cmd = cmd
def produce(self) -> Any:
"""Return output of the executed command."""
result = utils.run_cmd(self._cmd)
return result.stdout
@property
def cmd(self):
"""Return the command executed on host."""
return self._cmd
@cmd.setter
def cmd(self, cmd):
"""Set the command executed on host."""
self._cmd = cmd
class LambdaProducer(Producer):
"""Producer from calling python functions."""
def __init__(self, func: Callable, func_kwargs=None):
"""Initialize the raw data producer."""
super().__init__()
assert callable(func)
self._func = func
self._func_kwargs = func_kwargs
# pylint: disable=R1710
def produce(self) -> Any:
"""Call `self._func`."""
if self._func_kwargs:
raw_data = self._func(**self._func_kwargs)
return raw_data
raw_data = self._func()
return raw_data
@property
def func(self):
"""Return producer function."""
return self._func
@func.setter
def func(self, func: Callable):
self._func = func
@property
def func_kwargs(self):
"""Return producer function arguments."""
return self._func_kwargs
@func_kwargs.setter
def func_kwargs(self, func_kwargs):
self._func_kwargs = func_kwargs
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,942
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/properties.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# pylint:disable=broad-except
# pylint:disable=too-few-public-methods
"""
Metadata we want to attach to tests for further analysis and troubleshooting
"""
import platform
import re
import subprocess
from pathlib import Path
from framework.utils import get_kernel_version
from framework.utils_cpuid import get_cpu_codename, get_cpu_model_name, get_cpu_vendor
from framework.utils_imdsv2 import imdsv2_get
def run_cmd(cmd):
"""Return the stdout of a command"""
return subprocess.check_output(cmd, shell=True).decode().strip()
def get_os_version():
"""Get the OS version
>>> get_os_version()
Ubuntu 18.04.6 LTS
"""
os_release = Path("/etc/os-release").read_text(encoding="ascii")
match = re.search('PRETTY_NAME="(.*)"', os_release)
return match.group(1)
class GlobalProps:
"""Class to hold metadata about the testrun environment"""
def __init__(self):
self.cpu_architecture: str = platform.machine()
self.cpu_model = get_cpu_model_name()
self.cpu_codename = get_cpu_codename()
self.cpu_vendor = get_cpu_vendor().name.lower()
self.cpu_microcode = run_cmd(
"grep microcode /proc/cpuinfo |head -1 |awk '{print $3}'"
)
self.host_linux_full_version = platform.release()
# major.minor
self.host_linux_version = get_kernel_version(1)
# major.minor.patch
self.host_linux_patch = get_kernel_version(2)
self.os = get_os_version()
self.libc_ver = "-".join(platform.libc_ver())
self.git_commit_id = run_cmd("git rev-parse HEAD")
self.git_branch = run_cmd("git show -s --pretty=%D HEAD")
self.git_origin_url = run_cmd("git config --get remote.origin.url")
self.rust_version = run_cmd("rustc --version |awk '{print $2}'")
self.environment = self._detect_environment()
if self.is_ec2:
self.instance = imdsv2_get("/meta-data/instance-type")
self.ami = imdsv2_get("/meta-data/ami-id")
else:
self.instance = "NA"
self.ami = "NA"
@property
def is_ec2(self):
"""Are we running on an EC2 instance?"""
return self.environment == "ec2"
def _detect_environment(self):
"""Detect what kind of environment we are running under
The most reliable way is to just query IMDSv2
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html
"""
try:
imdsv2_get("/meta-data/instance-type")
return "ec2"
except Exception:
return "local"
global_props = GlobalProps()
# TBD could do a props fixture for tests to use...
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,943
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_vsock.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the virtio-vsock device.
In order to test the vsock device connection state machine, these tests will:
- Generate a 20MiB random data blob;
- Use `host_tools/vsock_helper.c` to start a listening echo server inside the
guest VM;
- Run 50, concurrent, host-initiated connections, each transfering the random
blob to and from the guest echo server;
- For every connection, check that the data received back from the echo server
hashes to the same value as the data sent;
- Start a host echo server, and repeat the process for the same number of
guest-initiated connections.
"""
import os.path
from socket import timeout as SocketTimeout
from framework.utils_vsock import (
ECHO_SERVER_PORT,
VSOCK_UDS_PATH,
HostEchoWorker,
_copy_vsock_data_to_guest,
check_guest_connections,
check_host_connections,
check_vsock_device,
make_blob,
make_host_port_path,
)
NEGATIVE_TEST_CONNECTION_COUNT = 100
TEST_WORKER_COUNT = 10
def test_vsock(test_microvm_with_api, bin_vsock_path, test_fc_session_root_path):
"""
Test guest and host vsock initiated connections.
Check the module docstring for details on the setup.
"""
vm = test_microvm_with_api
vm.spawn()
vm.basic_config()
vm.add_net_iface()
vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
vm.start()
check_vsock_device(vm, bin_vsock_path, test_fc_session_root_path, vm.ssh)
def negative_test_host_connections(vm, uds_path, blob_path, blob_hash):
"""Negative test for host-initiated connections.
This will start a daemonized echo server on the guest VM, and then spawn
`NEGATIVE_TEST_CONNECTION_COUNT` `HostEchoWorker` threads.
Closes the UDS sockets while data is in flight.
"""
cmd = "/tmp/vsock_helper echosrv -d {}".format(ECHO_SERVER_PORT)
ecode, _, _ = vm.ssh.run(cmd)
assert ecode == 0
workers = []
for _ in range(NEGATIVE_TEST_CONNECTION_COUNT):
worker = HostEchoWorker(uds_path, blob_path)
workers.append(worker)
worker.start()
for wrk in workers:
wrk.close_uds()
wrk.join()
# Validate that Firecracker is still up and running.
ecode, _, _ = vm.ssh.run("sync")
# Should fail if Firecracker exited from SIGPIPE handler.
assert ecode == 0
# Validate vsock emulation still accepts connections and works
# as expected.
check_host_connections(vm, uds_path, blob_path, blob_hash)
def test_vsock_epipe(test_microvm_with_api, bin_vsock_path, test_fc_session_root_path):
"""
Vsock negative test to validate SIGPIPE/EPIPE handling.
"""
vm = test_microvm_with_api
vm.spawn()
vm.basic_config()
vm.add_net_iface()
vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
vm.start()
# Generate the random data blob file, 20MB
blob_path, blob_hash = make_blob(test_fc_session_root_path, 20 * 2**20)
vm_blob_path = "/tmp/vsock/test.blob"
# Set up a tmpfs drive on the guest, so we can copy the blob there.
# Guest-initiated connections (echo workers) will use this blob.
_copy_vsock_data_to_guest(vm.ssh, blob_path, vm_blob_path, bin_vsock_path)
path = os.path.join(vm.jailer.chroot_path(), VSOCK_UDS_PATH)
# Negative test for host-initiated connections that
# are closed with in flight data.
negative_test_host_connections(vm, path, blob_path, blob_hash)
metrics = vm.flush_metrics()
# Validate that at least 1 `SIGPIPE` signal was received.
# Since we are reusing the existing echo server which triggers
# reads/writes on the UDS backend connections, these might be closed
# before a read() or a write() is about to be performed by the emulation.
# The test uses 100 connections it is enough to close at least one
# before write().
#
# If this ever fails due to 100 closes before read() we must
# add extra tooling that will trigger only writes().
assert metrics["signals"]["sigpipe"] > 0
def test_vsock_transport_reset(
uvm_nano, microvm_factory, bin_vsock_path, test_fc_session_root_path
):
"""
Vsock transport reset test.
Steps:
1. Start echo server on the guest
2. Start host workers that ping-pong data between guest and host,
without closing any of them
3. Pause VM -> Create snapshot -> Resume VM
4. Check that worker sockets no longer work by setting a timeout
so the sockets won't block and do a recv operation.
5. If the recv operation timeouts, the connection was closed.
Else, the connection was not closed and the test fails.
6. Close VM -> Load VM from Snapshot -> check that vsock
device is still working.
"""
test_vm = uvm_nano
test_vm.add_net_iface()
test_vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
test_vm.start()
# Generate the random data blob file.
blob_path, blob_hash = make_blob(test_fc_session_root_path)
vm_blob_path = "/tmp/vsock/test.blob"
# Set up a tmpfs drive on the guest, so we can copy the blob there.
# Guest-initiated connections (echo workers) will use this blob.
_copy_vsock_data_to_guest(test_vm.ssh, blob_path, vm_blob_path, bin_vsock_path)
# Start guest echo server.
path = os.path.join(test_vm.jailer.chroot_path(), VSOCK_UDS_PATH)
cmd = f"/tmp/vsock_helper echosrv -d {ECHO_SERVER_PORT}"
ecode, _, _ = test_vm.ssh.run(cmd)
assert ecode == 0
# Start host workers that connect to the guest server.
workers = []
for _ in range(TEST_WORKER_COUNT):
worker = HostEchoWorker(path, blob_path)
workers.append(worker)
worker.start()
for wrk in workers:
wrk.join()
# Create snapshot.
snapshot = test_vm.snapshot_full()
test_vm.resume()
# Check that sockets are no longer working on workers.
for worker in workers:
# Whatever we send to the server, it should return the same
# value.
buf = bytearray("TEST\n".encode("utf-8"))
worker.sock.send(buf)
try:
# Arbitrary timeout, we set this so the socket won't block as
# it shouldn't receive anything.
worker.sock.settimeout(0.25)
response = worker.sock.recv(32)
if response != b"":
# If we reach here, it means the connection did not close.
assert False, "Connection not closed: response recieved '{}'".format(
response.decode("utf-8")
)
except SocketTimeout:
assert True
# Terminate VM.
test_vm.kill()
# Load snapshot.
vm2 = microvm_factory.build()
vm2.spawn()
vm2.restore_from_snapshot(snapshot, resume=True)
# Check that vsock device still works.
# Test guest-initiated connections.
path = os.path.join(vm2.path, make_host_port_path(VSOCK_UDS_PATH, ECHO_SERVER_PORT))
check_guest_connections(vm2, path, vm_blob_path, blob_hash)
# Test host-initiated connections.
path = os.path.join(vm2.jailer.chroot_path(), VSOCK_UDS_PATH)
check_host_connections(vm2, path, blob_path, blob_hash)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,944
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_snapshot_advanced.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Advanced tests scenarios for snapshot save/restore."""
import platform
import tempfile
import pytest
from test_balloon import _test_rss_memory_lower
import host_tools.drive as drive_tools
from framework.microvm import SnapshotType
from framework.properties import global_props
# Define 4 scratch drives.
scratch_drives = ["vdb", "vdc", "vdd", "vde"]
def test_restore_old_to_current(
microvm_factory, guest_kernel, rootfs_ubuntu_22, firecracker_release
):
"""
Restore snapshots from previous supported versions of Firecracker.
For each firecracker release:
1. Snapshot with the past release
2. Restore with the current build
"""
# due to bug fixed in commit 8dab78b
firecracker_version = firecracker_release.version_tuple
if global_props.instance == "m6a.metal" and firecracker_version < (1, 3, 3):
pytest.skip("incompatible with AMD and Firecracker <1.3.3")
# Microvm: 2vCPU 256MB RAM, balloon, 4 disks and 4 net devices.
diff_snapshots = True
vm = microvm_factory.build(
guest_kernel,
rootfs_ubuntu_22,
fc_binary_path=firecracker_release.path,
jailer_binary_path=firecracker_release.jailer,
monitor_memory=diff_snapshots,
)
vm.spawn()
vm.basic_config(track_dirty_pages=True)
snapshot = create_snapshot_helper(
vm,
drives=scratch_drives,
diff_snapshots=diff_snapshots,
balloon=diff_snapshots,
)
vm = microvm_factory.build()
vm.spawn()
vm.restore_from_snapshot(snapshot, resume=True)
validate_all_devices(vm, diff_snapshots)
print(vm.log_data)
def test_restore_current_to_old(microvm_factory, uvm_plain, firecracker_release):
"""
Restore current snapshot with previous versions of Firecracker.
For each firecracker release:
1. Snapshot with the current build
2. Restore with the past release
"""
# Microvm: 2vCPU 256MB RAM, balloon, 4 disks and 4 net devices.
vm = uvm_plain
vm.spawn()
vm.basic_config(track_dirty_pages=True)
# Create a snapshot with current FC version targeting the old version.
snapshot = create_snapshot_helper(
vm,
target_version=firecracker_release.snapshot_version,
drives=scratch_drives,
balloon=True,
diff_snapshots=True,
)
# Resume microvm using FC/Jailer binary artifacts.
vm = microvm_factory.build(
fc_binary_path=firecracker_release.path,
jailer_binary_path=firecracker_release.jailer,
monitor_memory=True,
)
vm.spawn()
vm.restore_from_snapshot(snapshot, resume=True)
validate_all_devices(vm, True)
print("========== Firecracker restore snapshot log ==========")
print(vm.log_data)
@pytest.mark.skipif(platform.machine() != "x86_64", reason="TSC is x86_64 specific.")
def test_save_tsc_old_version(uvm_nano):
"""
Test TSC warning message when saving old snapshot.
"""
uvm_nano.start()
uvm_nano.snapshot_full(target_version="0.24.0")
uvm_nano.check_log_message("Saving to older snapshot version, TSC freq")
def validate_all_devices(microvm, balloon):
"""Perform a basic validation for all devices of a microvm."""
# Test that net devices have connectivity after restore.
for iface in microvm.iface.values():
print("Testing net device", iface["iface"].dev_name)
microvm.guest_ip = iface["iface"].guest_ip
exit_code, _, _ = microvm.ssh.run("sync")
# Drop page cache.
# Ensure further reads are going to be served from emulation layer.
cmd = "sync; echo 1 > /proc/sys/vm/drop_caches"
exit_code, _, _ = microvm.ssh.run(cmd)
assert exit_code == 0
# Validate checksum of /dev/vdX/test.
# Should be ab893875d697a3145af5eed5309bee26 for 10 pages
# of zeroes.
for drive in list(microvm.disks)[1:]:
# Mount block device.
print("Testing drive ", drive)
cmd = f"mkdir -p /tmp/{drive} ; mount /dev/{drive} /tmp/{drive}"
exit_code, _, _ = microvm.ssh.run(cmd)
assert exit_code == 0
# Validate checksum.
cmd = f"md5sum /tmp/{drive}/test | cut -d ' ' -f 1"
exit_code, stdout, _ = microvm.ssh.run(cmd)
assert exit_code == 0
assert stdout.strip() == "ab893875d697a3145af5eed5309bee26"
print("* checksum OK.")
if balloon is True:
print("Testing balloon memory reclaim.")
# Call helper fn from balloon integration tests.
_test_rss_memory_lower(microvm)
def create_snapshot_helper(
vm,
target_version=None,
drives=None,
balloon=False,
diff_snapshots=False,
):
"""Create a snapshot with many devices."""
if diff_snapshots is False:
snapshot_type = SnapshotType.FULL
else:
# Version 0.24 and greater has Diff and balloon support.
snapshot_type = SnapshotType.DIFF
if balloon:
# Add a memory balloon with stats enabled.
vm.api.balloon.put(
amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=1
)
test_drives = [] if drives is None else drives
# Add disks.
for scratch in test_drives:
# Add a scratch 64MB RW non-root block device.
scratchdisk = drive_tools.FilesystemFile(tempfile.mktemp(), size=64)
vm.add_drive(scratch, scratchdisk.path)
# Workaround FilesystemFile destructor removal of file.
scratchdisk.path = None
for _ in range(4):
vm.add_net_iface()
vm.start()
# Iterate and validate connectivity on all ifaces after boot.
for i in range(4):
exit_code, _, _ = vm.ssh_iface(i).run("sync")
assert exit_code == 0
# Mount scratch drives in guest.
for blk in test_drives:
# Create mount point and mount each device.
cmd = f"mkdir -p /tmp/mnt/{blk} && mount /dev/{blk} /tmp/mnt/{blk}"
exit_code, _, _ = vm.ssh.run(cmd)
assert exit_code == 0
# Create file using dd using O_DIRECT.
# After resume we will compute md5sum on these files.
dd = f"dd if=/dev/zero of=/tmp/mnt/{blk}/test bs=4096 count=10 oflag=direct"
exit_code, _, _ = vm.ssh.run(dd)
assert exit_code == 0
# Unmount the device.
cmd = f"umount /dev/{blk}"
exit_code, _, _ = vm.ssh.run(cmd)
assert exit_code == 0
snapshot = vm.make_snapshot(snapshot_type, target_version=target_version)
print("========== Firecracker create snapshot log ==========")
print(vm.log_data)
vm.kill()
return snapshot
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,945
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/conftest.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Pytest fixtures and redefined-outer-name don't mix well. Disable it.
# pylint:disable=redefined-outer-name
"""Fixtures for performance tests"""
import json
import pytest
from framework import defs, utils
from framework.properties import global_props
from framework.stats import core
# pylint: disable=too-few-public-methods
class JsonFileDumper:
"""Class responsible with outputting test results to files."""
def __init__(self, test_name):
"""Initialize the instance."""
self._root_path = defs.TEST_RESULTS_DIR
# Create the root directory, if it doesn't exist.
self._root_path.mkdir(exist_ok=True)
kv = utils.get_kernel_version(level=1)
instance = global_props.instance
self._results_file = (
self._root_path / f"{test_name}_results_{instance}_{kv}.ndjson"
)
def dump(self, result):
"""Dump the results in JSON format."""
with self._results_file.open("a", encoding="utf-8") as file_fd:
json.dump(result, file_fd)
file_fd.write("\n") # Add newline cause Py JSON does not
file_fd.flush()
@pytest.fixture
def results_file_dumper(request):
"""Dump results of performance test as a file"""
# we want the test filename, like test_network_latency
return JsonFileDumper(request.node.parent.path.stem)
def send_metrics(metrics, stats: core.Core):
"""Extract metrics from a statistics run
Also converts the units to CloudWatch-compatible ones.
"""
unit_map = {
"ms": "Milliseconds",
"seconds": "Seconds",
"Mbps": "Megabits/Second",
"KiB/s": "Kilobytes/Second",
"io/s": "Count/Second",
"#": "Count",
"percentage": "Percent",
}
results = stats.statistics["results"]
for tag in results:
dimensions = stats.custom.copy()
# the last component of the tag is the test name
# for example vmlinux-4.14.bin/ubuntu-18.04.ext4/2vcpu_1024mb.json/tcp-p1024K-ws16K-bd
test = tag.split("/")[-1]
dimensions["test"] = test
dimensions["performance_test"] = stats.name
metrics.set_dimensions(dimensions)
metrics.set_property("tag", tag)
for key, val in results[tag].items():
for agg in val:
if agg == "_unit":
continue
metrics.put_metric(
f"{key}_{agg}", val[agg]["value"], unit=unit_map[val["_unit"]]
)
metrics.flush()
@pytest.fixture
def st_core(metrics, results_file_dumper, guest_kernel, rootfs, request):
"""Helper fixture to dump results and publish metrics"""
stats = core.Core()
guest_kernel_ver = guest_kernel.stem[2:]
stats.check_baseline = request.config.getoption("--perf-fail")
stats.env_id_prefix = f"{guest_kernel_ver}/{rootfs.name}"
stats.iterations = 1
stats.custom = {
"instance": global_props.instance,
"cpu_model": global_props.cpu_model,
"host_kernel": "linux-" + global_props.host_linux_version,
"guest_kernel": guest_kernel_ver,
"rootfs": rootfs.name,
}
stats.metrics = metrics
stats.metrics_test = request.function.__name__
yield stats
# If the test is skipped, there will be no results, so only dump if there
# is some.
if stats.statistics["results"]:
results_file_dumper.dump(stats.statistics)
send_metrics(metrics, stats)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,946
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/host_tools/memory.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utilities for measuring memory utilization for a process."""
import time
from queue import Queue
from threading import Lock, Thread
from framework import utils
class MemoryUsageExceededException(Exception):
"""A custom exception containing details on excessive memory usage."""
def __init__(self, usage, threshold, out):
"""Compose the error message containing the memory consumption."""
super().__init__(
f"Memory usage ({usage} KiB) exceeded maximum threshold "
f"({threshold} KiB).\n {out} \n"
)
class MemoryMonitor(Thread):
"""Class to represent an RSS memory monitor for a Firecracker process.
The guest's memory region is skipped, as the main interest is the
VMM memory usage.
"""
MEMORY_THRESHOLD = 5 * 1024
MEMORY_SAMPLE_TIMEOUT_S = 0.05
X86_MEMORY_GAP_START = 3407872
def __init__(self):
"""Initialize monitor attributes."""
Thread.__init__(self)
self._pid = None
self._guest_mem_mib = None
self._guest_mem_start_1 = None
self._guest_mem_end_1 = None
self._guest_mem_start_2 = None
self._guest_mem_end_2 = None
self._exceeded_queue = Queue()
self._pmap_out = None
self._threshold = self.MEMORY_THRESHOLD
self._should_stop = False
self._current_rss = 0
self._lock = Lock()
self.daemon = True
@property
def pid(self):
"""Get the pid."""
return self._pid
@property
def guest_mem_mib(self):
"""Get the guest memory in MiB."""
return self._guest_mem_mib
@property
def threshold(self):
"""Get the memory threshold."""
return self._threshold
@property
def exceeded_queue(self):
"""Get the exceeded queue."""
return self._exceeded_queue
@guest_mem_mib.setter
def guest_mem_mib(self, guest_mem_mib):
"""Set the guest memory MiB."""
self._guest_mem_mib = guest_mem_mib
@pid.setter
def pid(self, pid):
"""Set the pid."""
self._pid = pid
@threshold.setter
def threshold(self, threshold):
"""Set the threshold."""
self._threshold = threshold
def signal_stop(self):
"""Signal that the thread should stop."""
self._should_stop = True
def run(self):
"""Thread for monitoring the RSS memory usage of a Firecracker process.
`pmap` is used to compute the memory overhead. If it exceeds
the maximum value, it is pushed in a thread safe queue and memory
monitoring ceases. It is up to the caller to check the queue.
"""
pmap_cmd = "pmap -xq {}".format(self.pid)
while not self._should_stop:
mem_total = 0
try:
_, stdout, _ = utils.run_cmd(pmap_cmd)
pmap_out = stdout.split("\n")
except ChildProcessError:
return
for line in pmap_out:
tokens = line.split()
if not tokens:
break
try:
address = int(tokens[0].lstrip("0"), 16)
total_size = int(tokens[1])
rss = int(tokens[2])
except ValueError:
# This line doesn't contain memory related information.
continue
if self.update_guest_mem_regions(address, total_size):
continue
if self.is_in_guest_mem_regions(address):
continue
mem_total += rss
with self._lock:
self._current_rss = mem_total
if mem_total > self.threshold:
self.exceeded_queue.put(mem_total)
self._pmap_out = stdout
return
time.sleep(self.MEMORY_SAMPLE_TIMEOUT_S)
def update_guest_mem_regions(self, address, size_kib):
"""
If the address is recognised as a guest memory region,
cache it and return True, otherwise return False.
"""
# If x86_64 guest memory exceeds 3328M, it will be split
# in 2 regions: 3328M and the rest. We have 3 cases here
# to recognise a guest memory region:
# - its size matches the guest memory exactly
# - its size is 3328M
# - its size is guest memory minus 3328M.
if size_kib in (
self.guest_mem_mib * 1024,
self.X86_MEMORY_GAP_START,
self.guest_mem_mib * 1024 - self.X86_MEMORY_GAP_START,
):
if not self._guest_mem_start_1:
self._guest_mem_start_1 = address
self._guest_mem_end_1 = address + size_kib * 1024
return True
if not self._guest_mem_start_2:
self._guest_mem_start_2 = address
self._guest_mem_end_2 = address + size_kib * 1024
return True
return False
def is_in_guest_mem_regions(self, address):
"""Check if the address is inside a guest memory region."""
for guest_mem_start, guest_mem_end in [
(self._guest_mem_start_1, self._guest_mem_end_1),
(self._guest_mem_start_2, self._guest_mem_end_2),
]:
if (
guest_mem_start is not None
and guest_mem_start <= address < guest_mem_end
):
return True
return False
def check_samples(self):
"""Check that there are no samples over the threshold."""
if not self.exceeded_queue.empty():
raise MemoryUsageExceededException(
self.exceeded_queue.get(), self.threshold, self._pmap_out
)
@property
def current_rss(self):
"""Obtain current RSS for Firecracker's overhead."""
# This is to ensure that the monitor has updated itself.
time.sleep(self.MEMORY_SAMPLE_TIMEOUT_S + 0.5)
with self._lock:
return self._current_rss
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,947
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/parse_baselines/providers/types.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Define data types and abstractions for parsers."""
from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import Iterator
from typing import List
# pylint: disable=R0903
def nested_dict():
"""Create an infinitely nested dictionary."""
return defaultdict(nested_dict)
class DataParser(ABC):
"""Abstract class to be used for baselines extraction."""
def __init__(self, data_provider: Iterator, baselines_defs):
"""Initialize the data parser."""
self._data_provider = iter(data_provider)
self._baselines_defs = baselines_defs
# This object will hold the parsed data.
self._data = nested_dict()
@abstractmethod
def calculate_baseline(self, data: List[float]) -> dict:
"""Return the target and delta values, given a list of data points."""
def _format_baselines(self) -> List[dict]:
"""Return the computed baselines into the right serializable format."""
baselines = {}
for cpu_model in self._data:
baselines[cpu_model] = {
"model": cpu_model,
"baselines": self._data[cpu_model],
}
temp_baselines = baselines
baselines = []
for cpu_model in self._data:
baselines.append(temp_baselines[cpu_model])
return baselines
def _populate_baselines(self, key, parent):
"""Traverse the data dict and compute the baselines."""
# Initial case.
if key is None:
for k in parent:
self._populate_baselines(k, parent)
return
# Base case, reached a data list.
if isinstance(parent[key], list):
parent[key] = self.calculate_baseline(parent[key])
return
# Recurse for all children.
for k in parent[key]:
self._populate_baselines(k, parent[key])
def parse(self) -> dict:
"""Parse the rows and return baselines."""
for row in self._data_provider:
measurements = row["results"]
cpu_model = row["custom"]["cpu_model"]
# Consume the data and aggregate into lists.
for tag in measurements.keys():
for key in self._baselines_defs:
[ms_name, st_name] = key.split("/")
ms_data = measurements[tag].get(ms_name)
if ms_data is None:
continue
st_data = ms_data.get(st_name)["value"]
[
kernel_version,
rootfs_type,
microvm_config,
test_config,
] = tag.split("/")
data = self._data[cpu_model][ms_name]
data = data[kernel_version][rootfs_type]
data = data[microvm_config][st_name]
if isinstance(data[test_config], list):
data[test_config].append(st_data)
else:
data[test_config] = [st_data]
self._populate_baselines(None, self._data)
return self._format_baselines()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,948
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_pause_resume.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Basic tests scenarios for snapshot save/restore."""
import pytest
def verify_net_emulation_paused(metrics):
"""Verify net emulation is paused based on provided metrics."""
net_metrics = metrics["net"]
assert net_metrics["rx_queue_event_count"] == 0
assert net_metrics["rx_partial_writes"] == 0
assert net_metrics["rx_tap_event_count"] == 0
assert net_metrics["rx_bytes_count"] == 0
assert net_metrics["rx_packets_count"] == 0
assert net_metrics["rx_fails"] == 0
assert net_metrics["rx_count"] == 0
assert net_metrics["tap_read_fails"] == 0
assert net_metrics["tap_write_fails"] == 0
assert net_metrics["tx_bytes_count"] == 0
assert net_metrics["tx_fails"] == 0
assert net_metrics["tx_count"] == 0
assert net_metrics["tx_packets_count"] == 0
assert net_metrics["tx_queue_event_count"] == 0
print(net_metrics)
def test_pause_resume(uvm_nano):
"""
Test scenario: boot/pause/resume.
"""
microvm = uvm_nano
microvm.add_net_iface()
# Pausing the microVM before being started is not allowed.
with pytest.raises(RuntimeError):
microvm.api.vm.patch(state="Paused")
# Resuming the microVM before being started is also not allowed.
with pytest.raises(RuntimeError):
microvm.api.vm.patch(state="Resumed")
microvm.start()
# Verify guest is active.
exit_code, _, _ = microvm.ssh.run("ls")
assert exit_code == 0
# Pausing the microVM after it's been started is successful.
microvm.api.vm.patch(state="Paused")
# Flush and reset metrics as they contain pre-pause data.
microvm.flush_metrics()
# Verify guest is no longer active.
exit_code, _, _ = microvm.ssh.run("ls")
assert exit_code != 0
# Verify emulation was indeed paused and no events from either
# guest or host side were handled.
verify_net_emulation_paused(microvm.flush_metrics())
# Verify guest is no longer active.
exit_code, _, _ = microvm.ssh.run("ls")
assert exit_code != 0
# Pausing the microVM when it is already `Paused` is allowed
# (microVM remains in `Paused` state).
microvm.api.vm.patch(state="Paused")
# Resuming the microVM is successful.
microvm.api.vm.patch(state="Resumed")
# Verify guest is active again.
exit_code, _, _ = microvm.ssh.run("ls")
assert exit_code == 0
# Resuming the microVM when it is already `Resumed` is allowed
# (microVM remains in the running state).
microvm.api.vm.patch(state="Resumed")
# Verify guest is still active.
exit_code, _, _ = microvm.ssh.run("ls")
assert exit_code == 0
microvm.kill()
def test_describe_instance(uvm_nano):
"""
Test scenario: DescribeInstance different states.
"""
microvm = uvm_nano
# Check MicroVM state is "Not started"
response = microvm.api.describe.get()
assert "Not started" in response.text
# Start MicroVM
microvm.start()
# Check MicroVM state is "Running"
response = microvm.api.describe.get()
assert "Running" in response.text
# Pause MicroVM
microvm.api.vm.patch(state="Paused")
# Check MicroVM state is "Paused"
response = microvm.api.describe.get()
assert "Paused" in response.text
# Resume MicroVM
response = microvm.api.vm.patch(state="Resumed")
# Check MicroVM state is "Running" after VM is resumed
response = microvm.api.describe.get()
assert "Running" in response.text
microvm.kill()
def test_pause_resume_preboot(uvm_nano):
"""
Test pause/resume operations are not allowed pre-boot.
"""
basevm = uvm_nano
expected_err = "not supported before starting the microVM"
# Try to pause microvm when not running, it must fail.
with pytest.raises(RuntimeError, match=expected_err):
basevm.api.vm.patch(state="Paused")
# Try to resume microvm when not running, it must fail.
with pytest.raises(RuntimeError, match=expected_err):
basevm.api.vm.patch(state="Resumed")
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,949
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_rate_limiter.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that fail if network throughput does not obey rate limits."""
import time
from framework import utils
from host_tools import cpu_load
# The iperf version to run this tests with
IPERF_BINARY = "iperf3"
# Interval used by iperf to get maximum bandwidth
IPERF_TRANSMIT_TIME = 4
# Use a fixed-size TCP window so we get constant flow
IPERF_TCP_WINDOW = "256K"
# The rate limiting value
RATE_LIMIT_BYTES = 10485760
# The initial token bucket size
BURST_SIZE = 104857600
# The refill time for the token bucket
REFILL_TIME_MS = 100
RATE_LIMITER_NO_BURST = {
"bandwidth": {"size": RATE_LIMIT_BYTES, "refill_time": REFILL_TIME_MS}
}
RATE_LIMITER_WITH_BURST = {
"bandwidth": {
"size": RATE_LIMIT_BYTES,
"one_time_burst": BURST_SIZE,
"refill_time": REFILL_TIME_MS,
}
}
# Deltas that are accepted between expected values and achieved
# values throughout the tests
MAX_BYTES_DIFF_PERCENTAGE = 10
MAX_TIME_DIFF = 25
def test_tx_rate_limiting(test_microvm_with_api):
"""
Run iperf tx with and without rate limiting; check limiting effect.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
# For this test we will be adding three interfaces:
# 1. No rate limiting
test_microvm.add_net_iface()
# 2. Rate limiting without burst
test_microvm.add_net_iface(tx_rate_limiter=RATE_LIMITER_NO_BURST)
# 3. Rate limiting with burst
test_microvm.add_net_iface(tx_rate_limiter=RATE_LIMITER_WITH_BURST)
test_microvm.start()
_check_tx_rate_limiting(test_microvm)
_check_tx_rate_limit_patch(test_microvm)
def test_rx_rate_limiting(test_microvm_with_api):
"""
Run iperf rx with and without rate limiting; check limiting effect.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
# For this test we will be adding three interfaces:
# 1. No rate limiting
test_microvm.add_net_iface()
# 2. Rate limiting without burst
test_microvm.add_net_iface(rx_rate_limiter=RATE_LIMITER_NO_BURST)
# 3. Rate limiting with burst
test_microvm.add_net_iface(rx_rate_limiter=RATE_LIMITER_WITH_BURST)
# Start the microvm.
test_microvm.start()
_check_rx_rate_limiting(test_microvm)
_check_rx_rate_limit_patch(test_microvm)
def test_rx_rate_limiting_cpu_load(test_microvm_with_api):
"""
Run iperf rx with rate limiting; verify cpu load is below threshold.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
test_microvm.basic_config()
# Create interface with aggressive rate limiting enabled.
rx_rate_limiter_no_burst = {
"bandwidth": {"size": 65536, "refill_time": 1000} # 64KBytes # 1s
}
iface = test_microvm.add_net_iface(rx_rate_limiter=rx_rate_limiter_no_burst)
test_microvm.start()
# Start iperf server on guest.
_start_iperf_server_on_guest(test_microvm, iface.guest_ip)
# Run iperf client sending UDP traffic.
iperf_cmd = "{} {} -u -c {} -b 1000000000 -t{} -f KBytes".format(
test_microvm.jailer.netns_cmd_prefix(),
IPERF_BINARY,
iface.guest_ip,
IPERF_TRANSMIT_TIME * 5,
)
# Enable monitor that checks if the cpu load is over the threshold.
# After multiple runs, the average value for the cpu load
# seems to be around 10%. Setting the threshold a little
# higher to skip false positives.
# We want to monitor the emulation thread, which is currently
# the first one created.
# A possible improvement is to find it by name.
cpu_load_monitor = cpu_load.CpuLoadMonitor(
process_pid=test_microvm.jailer_clone_pid,
thread_pid=test_microvm.jailer_clone_pid,
threshold=20,
)
with cpu_load_monitor:
_run_iperf_on_host(iperf_cmd)
def _check_tx_rate_limiting(test_microvm):
"""Check that the transmit rate is within expectations."""
eth0 = test_microvm.iface["eth0"]["iface"]
eth1 = test_microvm.iface["eth1"]["iface"]
eth2 = test_microvm.iface["eth2"]["iface"]
# Start iperf server on the host as this is the tx rate limiting test.
_start_iperf_server_on_host(test_microvm.jailer.netns_cmd_prefix())
# First step: get the transfer rate when no rate limiting is enabled.
# We are receiving the result in KBytes from iperf.
print("Run guest TX iperf with no rate-limit")
rate_no_limit_kbps = _get_tx_bandwidth_with_duration(
test_microvm, eth0.guest_ip, eth0.host_ip, IPERF_TRANSMIT_TIME
)
print("TX rate_no_limit_kbps: {}".format(rate_no_limit_kbps))
# Calculate the number of bytes that are expected to be sent
# in each second once the rate limiting is enabled.
expected_kbps = int(RATE_LIMIT_BYTES / (REFILL_TIME_MS / 1000.0) / 1024)
print("Rate-Limit TX expected_kbps: {}".format(expected_kbps))
# Sanity check that bandwidth with no rate limiting is at least double
# than the one expected when rate limiting is in place.
assert _get_percentage_difference(rate_no_limit_kbps, expected_kbps) > 100
# Second step: check bandwidth when rate limiting is on.
_check_tx_bandwidth(test_microvm, eth1.guest_ip, eth1.host_ip, expected_kbps)
# Third step: get the number of bytes when rate limiting is on and there is
# an initial burst size from where to consume.
print("Run guest TX iperf with exact burst size")
# Use iperf to obtain the bandwidth when there is burst to consume from,
# send exactly BURST_SIZE packets.
iperf_cmd = "{} -c {} -n {} -f KBytes -w {} -N".format(
IPERF_BINARY, eth2.host_ip, BURST_SIZE, IPERF_TCP_WINDOW
)
iperf_out = _run_iperf_on_guest(test_microvm, iperf_cmd, eth2.guest_ip)
print(iperf_out)
_, burst_kbps = _process_iperf_output(iperf_out)
print("TX burst_kbps: {}".format(burst_kbps))
# Test that the burst bandwidth is at least as two times the rate limit.
assert _get_percentage_difference(burst_kbps, expected_kbps) > 100
# Since the burst should be consumed, check rate limit is in place.
_check_tx_bandwidth(test_microvm, eth2.guest_ip, eth2.host_ip, expected_kbps)
def _check_rx_rate_limiting(test_microvm):
"""Check that the receiving rate is within expectations."""
eth0 = test_microvm.iface["eth0"]["iface"]
eth1 = test_microvm.iface["eth1"]["iface"]
eth2 = test_microvm.iface["eth2"]["iface"]
# Start iperf server on guest.
_start_iperf_server_on_guest(test_microvm, eth0.guest_ip)
# First step: get the transfer rate when no rate limiting is enabled.
# We are receiving the result in KBytes from iperf.
print("Run guest RX iperf with no rate-limit")
rate_no_limit_kbps = _get_rx_bandwidth_with_duration(
test_microvm, eth0.guest_ip, IPERF_TRANSMIT_TIME
)
print("RX rate_no_limit_kbps: {}".format(rate_no_limit_kbps))
# Calculate the number of bytes that are expected to be sent
# in each second once the rate limiting is enabled.
expected_kbps = int(RATE_LIMIT_BYTES / (REFILL_TIME_MS / 1000.0) / 1024)
print("Rate-Limit RX expected_kbps: {}".format(expected_kbps))
# Sanity check that bandwidth with no rate limiting is at least double
# than the one expected when rate limiting is in place.
assert _get_percentage_difference(rate_no_limit_kbps, expected_kbps) > 100
# Second step: check bandwidth when rate limiting is on.
_check_rx_bandwidth(test_microvm, eth1.guest_ip, expected_kbps)
# Third step: get the number of bytes when rate limiting is on and there is
# an initial burst size from where to consume.
print("Run guest RX iperf with exact burst size")
# Use iperf to obtain the bandwidth when there is burst to consume from,
# send exactly BURST_SIZE packets.
iperf_cmd = "{} {} -c {} -n {} -f KBytes -w {} -N".format(
test_microvm.jailer.netns_cmd_prefix(),
IPERF_BINARY,
eth2.guest_ip,
BURST_SIZE,
IPERF_TCP_WINDOW,
)
iperf_out = _run_iperf_on_host(iperf_cmd)
print(iperf_out)
_, burst_kbps = _process_iperf_output(iperf_out)
print("RX burst_kbps: {}".format(burst_kbps))
# Test that the burst bandwidth is at least as two times the rate limit.
assert _get_percentage_difference(burst_kbps, expected_kbps) > 100
# Since the burst should be consumed, check rate limit is in place.
_check_rx_bandwidth(test_microvm, eth2.guest_ip, expected_kbps)
def _check_tx_rate_limit_patch(test_microvm):
"""Patch the TX rate limiters and check the new limits."""
eth0 = test_microvm.iface["eth0"]["iface"]
eth1 = test_microvm.iface["eth1"]["iface"]
bucket_size = int(RATE_LIMIT_BYTES * 2)
expected_kbps = int(bucket_size / (REFILL_TIME_MS / 1000.0) / 1024)
# Check that a TX rate limiter can be applied to a previously unlimited
# interface.
_patch_iface_bw(test_microvm, "eth0", "TX", bucket_size, REFILL_TIME_MS)
_check_tx_bandwidth(test_microvm, eth0.guest_ip, eth0.host_ip, expected_kbps)
# Check that a TX rate limiter can be updated.
_patch_iface_bw(test_microvm, "eth1", "TX", bucket_size, REFILL_TIME_MS)
_check_tx_bandwidth(test_microvm, eth1.guest_ip, eth1.host_ip, expected_kbps)
# Check that a TX rate limiter can be removed.
_patch_iface_bw(test_microvm, "eth0", "TX", 0, 0)
rate_no_limit_kbps = _get_tx_bandwidth_with_duration(
test_microvm, eth0.guest_ip, eth0.host_ip, IPERF_TRANSMIT_TIME
)
# Check that bandwidth when rate-limit disabled is at least 1.5x larger
# than the one when rate limiting was enabled.
assert _get_percentage_difference(rate_no_limit_kbps, expected_kbps) > 50
def _check_rx_rate_limit_patch(test_microvm):
"""Patch the RX rate limiters and check the new limits."""
eth0 = test_microvm.iface["eth0"]["iface"]
eth1 = test_microvm.iface["eth1"]["iface"]
bucket_size = int(RATE_LIMIT_BYTES * 2)
expected_kbps = int(bucket_size / (REFILL_TIME_MS / 1000.0) / 1024)
# Check that an RX rate limiter can be applied to a previously unlimited
# interface.
_patch_iface_bw(test_microvm, "eth0", "RX", bucket_size, REFILL_TIME_MS)
_check_rx_bandwidth(test_microvm, eth0.guest_ip, expected_kbps)
# Check that an RX rate limiter can be updated.
_patch_iface_bw(test_microvm, "eth1", "RX", bucket_size, REFILL_TIME_MS)
_check_rx_bandwidth(test_microvm, eth1.guest_ip, expected_kbps)
# Check that an RX rate limiter can be removed.
_patch_iface_bw(test_microvm, "eth0", "RX", 0, 0)
rate_no_limit_kbps = _get_rx_bandwidth_with_duration(
test_microvm, eth0.guest_ip, IPERF_TRANSMIT_TIME
)
# Check that bandwidth when rate-limit disabled is at least 1.5x larger
# than the one when rate limiting was enabled.
assert _get_percentage_difference(rate_no_limit_kbps, expected_kbps) > 50
def _check_tx_bandwidth(test_microvm, guest_ip, host_ip, expected_kbps):
"""Check that the rate-limited TX bandwidth is close to what we expect.
At this point, a daemonized iperf3 server is expected to be running on
the host.
"""
print("Check guest TX rate-limit; expected kbps {}".format(expected_kbps))
observed_kbps = _get_tx_bandwidth_with_duration(
test_microvm, guest_ip, host_ip, IPERF_TRANSMIT_TIME
)
diff_pc = _get_percentage_difference(observed_kbps, expected_kbps)
print("TX calculated diff percentage: {}\n".format(diff_pc))
if diff_pc >= MAX_BYTES_DIFF_PERCENTAGE:
print("Short duration test failed. Try another run with 10x duration.")
observed_kbps = _get_tx_bandwidth_with_duration(
test_microvm, guest_ip, host_ip, 10 * IPERF_TRANSMIT_TIME
)
diff_pc = _get_percentage_difference(observed_kbps, expected_kbps)
print("TX calculated diff percentage: {}\n".format(diff_pc))
assert diff_pc < MAX_BYTES_DIFF_PERCENTAGE
def _get_tx_bandwidth_with_duration(test_microvm, guest_ip, host_ip, duration):
"""Check that the rate-limited TX bandwidth is close to what we expect."""
iperf_cmd = "{} -c {} -t {} -f KBytes -w {} -N".format(
IPERF_BINARY, host_ip, duration, IPERF_TCP_WINDOW
)
iperf_out = _run_iperf_on_guest(test_microvm, iperf_cmd, guest_ip)
print(iperf_out)
_, observed_kbps = _process_iperf_output(iperf_out)
print("TX observed_kbps: {}".format(observed_kbps))
return observed_kbps
def _check_rx_bandwidth(test_microvm, guest_ip, expected_kbps):
"""Check that the rate-limited RX bandwidth is close to what we expect.
At this point, a daemonized iperf3 server is expected to be running on
the guest.
"""
print("Check guest RX rate-limit; expected kbps {}".format(expected_kbps))
observed_kbps = _get_rx_bandwidth_with_duration(
test_microvm, guest_ip, IPERF_TRANSMIT_TIME
)
diff_pc = _get_percentage_difference(observed_kbps, expected_kbps)
print("RX calculated diff percentage: {}\n".format(diff_pc))
if diff_pc >= MAX_BYTES_DIFF_PERCENTAGE:
print("Short duration test failed. Try another run with 10x duration.")
observed_kbps = _get_rx_bandwidth_with_duration(
test_microvm, guest_ip, 10 * IPERF_TRANSMIT_TIME
)
diff_pc = _get_percentage_difference(observed_kbps, expected_kbps)
print("TX calculated diff percentage: {}\n".format(diff_pc))
assert diff_pc < MAX_BYTES_DIFF_PERCENTAGE
def _get_rx_bandwidth_with_duration(test_microvm, guest_ip, duration):
"""Check that the rate-limited RX bandwidth is close to what we expect."""
iperf_cmd = "{} {} -c {} -t {} -f KBytes -w {} -N".format(
test_microvm.jailer.netns_cmd_prefix(),
IPERF_BINARY,
guest_ip,
duration,
IPERF_TCP_WINDOW,
)
iperf_out = _run_iperf_on_host(iperf_cmd)
print(iperf_out)
_, observed_kbps = _process_iperf_output(iperf_out)
print("RX observed_kbps: {}".format(observed_kbps))
return observed_kbps
def _patch_iface_bw(test_microvm, iface_id, rx_or_tx, new_bucket_size, new_refill_time):
"""Update the bandwidth rate limiter for a given interface.
Update the `rx_or_tx` rate limiter, on interface `iface_id` to the
new `bucket_size`.
"""
assert rx_or_tx in ["RX", "TX"]
args = {
"iface_id": iface_id,
"{}_rate_limiter".format(rx_or_tx.lower()): {
"bandwidth": {"size": new_bucket_size, "refill_time": new_refill_time}
},
}
test_microvm.api.network.patch(**args)
def _start_iperf_server_on_guest(test_microvm, hostname):
"""Start iperf in server mode through an SSH connection."""
test_microvm.guest_ip = hostname
iperf_cmd = "{} -sD -f KBytes\n".format(IPERF_BINARY)
test_microvm.ssh.run(iperf_cmd)
# Wait for the iperf daemon to start.
time.sleep(1)
def _run_iperf_on_guest(test_microvm, iperf_cmd, hostname):
"""Run a client related iperf command through an SSH connection."""
test_microvm.guest_ip = hostname
code, stdout, stderr = test_microvm.ssh.run(iperf_cmd)
assert code == 0, f"stdout: {stdout}\nstderr: {stderr}"
return stdout
def _start_iperf_server_on_host(netns_cmd_prefix):
"""Start iperf in server mode after killing any leftover iperf daemon."""
iperf_cmd = "pkill {}\n".format(IPERF_BINARY)
# Don't check the result of this command because it can fail if no iperf
# is running.
utils.run_cmd(iperf_cmd, ignore_return_code=True)
iperf_cmd = "{} {} -sD -f KBytes\n".format(netns_cmd_prefix, IPERF_BINARY)
utils.run_cmd(iperf_cmd)
# Wait for the iperf daemon to start.
time.sleep(1)
def _run_iperf_on_host(iperf_cmd):
"""Execute a client related iperf command locally."""
code, stdout, stderr = utils.run_cmd(iperf_cmd)
assert code == 0, f"stdout: {stdout}\nstderr: {stderr}"
return stdout
def _get_percentage_difference(measured, base):
"""Return the percentage delta between the arguments."""
if measured == base:
return 0
try:
return (abs(measured - base) / base) * 100.0
except ZeroDivisionError:
# It means base and only base is 0.
return 100.0
def _process_iperf_line(line):
"""Parse iperf3 summary line and return test time and bandwidth."""
test_time = line.split(" ")[2].split("-")[1].strip().split(" ")[0]
test_bw = line.split(" ")[5].split(" ")[0].strip()
return float(test_time), float(test_bw)
def _process_iperf_output(iperf_out):
"""Parse iperf3 output and return average test time and bandwidth."""
iperf_out_lines = iperf_out.splitlines()
for line in iperf_out_lines:
if line.find("sender") != -1:
send_time, send_bw = _process_iperf_line(line)
if line.find("receiver") != -1:
rcv_time, rcv_bw = _process_iperf_line(line)
iperf_out_time = (send_time + rcv_time) / 2.0
iperf_out_bw = (send_bw + rcv_bw) / 2.0
return float(iperf_out_time), float(iperf_out_bw)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,950
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/jailer.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Define a class for creating the jailed context."""
import os
import shutil
import stat
from pathlib import Path
from retry.api import retry_call
from framework import defs, utils
from framework.defs import FC_BINARY_NAME
# Default name for the socket used for API calls.
DEFAULT_USOCKET_NAME = "run/firecracker.socket"
# The default location for the chroot.
DEFAULT_CHROOT_PATH = f"{defs.DEFAULT_TEST_SESSION_ROOT_PATH}/jailer"
class JailerContext:
"""Represents jailer configuration and contains jailer helper functions.
Each microvm will have a jailer configuration associated with it.
"""
# Keep in sync with parameters from code base.
jailer_id = None
exec_file = None
uid = None
gid = None
chroot_base = None
netns = None
daemonize = None
new_pid_ns = None
extra_args = None
api_socket_name = None
cgroups = None
resource_limits = None
cgroup_ver = None
parent_cgroup = None
def __init__(
self,
jailer_id,
exec_file,
uid=1234,
gid=1234,
chroot_base=DEFAULT_CHROOT_PATH,
netns=None,
daemonize=True,
new_pid_ns=False,
cgroups=None,
resource_limits=None,
cgroup_ver=None,
parent_cgroup=None,
**extra_args,
):
"""Set up jailer fields.
This plays the role of a default constructor as it populates
the jailer's fields with some default values. Each field can be
further adjusted by each test even with None values.
"""
self.jailer_id = jailer_id
assert jailer_id is not None
self.exec_file = exec_file
self.uid = uid
self.gid = gid
self.chroot_base = chroot_base
self.netns = netns if netns is not None else jailer_id
self.daemonize = daemonize
self.new_pid_ns = new_pid_ns
self.extra_args = extra_args
self.api_socket_name = DEFAULT_USOCKET_NAME
self.cgroups = cgroups
self.resource_limits = resource_limits
self.cgroup_ver = cgroup_ver
self.parent_cgroup = parent_cgroup
# Disabling 'too-many-branches' warning for this function as it needs to
# check every argument, so the number of branches will increase
# with every new argument.
# pylint: disable=too-many-branches
def construct_param_list(self):
"""Create the list of parameters we want the jailer to start with.
We want to be able to vary any parameter even the required ones as we
might want to add integration tests that validate the enforcement of
mandatory arguments.
"""
jailer_param_list = []
# Pretty please, try to keep the same order as in the code base.
if self.jailer_id is not None:
jailer_param_list.extend(["--id", str(self.jailer_id)])
if self.exec_file is not None:
jailer_param_list.extend(["--exec-file", str(self.exec_file)])
if self.uid is not None:
jailer_param_list.extend(["--uid", str(self.uid)])
if self.gid is not None:
jailer_param_list.extend(["--gid", str(self.gid)])
if self.chroot_base is not None:
jailer_param_list.extend(["--chroot-base-dir", str(self.chroot_base)])
if self.netns is not None:
jailer_param_list.extend(["--netns", str(self.netns_file_path())])
if self.daemonize:
jailer_param_list.append("--daemonize")
if self.new_pid_ns:
jailer_param_list.append("--new-pid-ns")
if self.parent_cgroup:
jailer_param_list.extend(["--parent-cgroup", str(self.parent_cgroup)])
if self.cgroup_ver:
jailer_param_list.extend(["--cgroup-version", str(self.cgroup_ver)])
if self.cgroups is not None:
for cgroup in self.cgroups:
jailer_param_list.extend(["--cgroup", str(cgroup)])
if self.resource_limits is not None:
for limit in self.resource_limits:
jailer_param_list.extend(["--resource-limit", str(limit)])
# applying necessary extra args if needed
if len(self.extra_args) > 0:
jailer_param_list.append("--")
for key, value in self.extra_args.items():
jailer_param_list.append("--{}".format(key))
if value is not None:
jailer_param_list.append(value)
if key == "api-sock":
self.api_socket_name = value
return jailer_param_list
# pylint: enable=too-many-branches
def chroot_base_with_id(self):
"""Return the MicroVM chroot base + MicroVM ID."""
return os.path.join(
self.chroot_base if self.chroot_base is not None else DEFAULT_CHROOT_PATH,
Path(self.exec_file).name,
self.jailer_id,
)
def api_socket_path(self):
"""Return the MicroVM API socket path."""
return os.path.join(self.chroot_path(), self.api_socket_name)
def chroot_path(self):
"""Return the MicroVM chroot path."""
return os.path.join(self.chroot_base_with_id(), "root")
def jailed_path(self, file_path, create=False, subdir="."):
"""Create a hard link or block special device owned by uid:gid.
Create a hard link or block special device from the specified file,
changes the owner to uid:gid, and returns a path to the file which is
valid within the jail.
"""
file_path = Path(file_path)
chroot_path = Path(self.chroot_path())
global_p = chroot_path / subdir / file_path.name
global_p.parent.mkdir(parents=True, exist_ok=True)
jailed_p = Path("/") / subdir / file_path.name
if create:
stat_src = file_path.stat()
if file_path.is_block_device():
perms = stat.S_IRUSR | stat.S_IWUSR
os.mknod(global_p, mode=stat.S_IFBLK | perms, device=stat_src.st_rdev)
else:
stat_dst = chroot_path.stat()
if stat_src.st_dev == stat_dst.st_dev:
# if they are in the same device, hardlink
global_p.unlink(missing_ok=True)
global_p.hardlink_to(file_path)
else:
# otherwise, copy
shutil.copyfile(file_path, global_p)
os.chown(global_p, self.uid, self.gid)
return str(jailed_p)
def netns_file_path(self):
"""Get the host netns file path for a jailer context.
Returns the path on the host to the file which represents the netns,
and which must be passed to the jailer as the value of the --netns
parameter, when in use.
"""
if self.netns:
return "/var/run/netns/{}".format(self.netns)
return None
def netns_cmd_prefix(self):
"""Return the jailer context netns file prefix."""
if self.netns:
return "ip netns exec {} ".format(self.netns)
return ""
def setup(self):
"""Set up this jailer context."""
os.makedirs(
self.chroot_base if self.chroot_base is not None else DEFAULT_CHROOT_PATH,
exist_ok=True,
)
if self.netns and self.netns not in utils.run_cmd("ip netns list")[1]:
utils.run_cmd("ip netns add {}".format(self.netns))
def cleanup(self):
"""Clean up this jailer context."""
# pylint: disable=subprocess-run-check
if self.netns and os.path.exists("/var/run/netns/{}".format(self.netns)):
utils.run_cmd("ip netns del {}".format(self.netns))
# Remove the cgroup folders associated with this microvm.
# The base /sys/fs/cgroup/<controller>/firecracker folder will remain,
# because we can't remove it unless we're sure there's no other running
# microVM.
if self.cgroups:
controllers = set()
# Extract the controller for every cgroup that needs to be set.
for cgroup in self.cgroups:
controllers.add(cgroup.split(".")[0])
for controller in controllers:
# Obtain the tasks from each cgroup and wait on them before
# removing the microvm's associated cgroup folder.
try:
retry_call(
f=self._kill_cgroup_tasks,
fargs=[controller],
exceptions=TimeoutError,
max_delay=5,
)
except TimeoutError:
pass
# Remove cgroups and sub cgroups.
back_cmd = r"-depth -type d -exec rmdir {} \;"
cmd = "find /sys/fs/cgroup/{}/{}/{} {}".format(
controller, FC_BINARY_NAME, self.jailer_id, back_cmd
)
# We do not need to know if it succeeded or not; afterall,
# we are trying to clean up resources created by the jailer
# itself not the testing system.
utils.run_cmd(cmd, ignore_return_code=True)
def _kill_cgroup_tasks(self, controller):
"""Simulate wait on pid.
Read the tasks file and stay there until /proc/{pid}
disappears. The retry function that calls this code makes
sure we do not timeout.
"""
# pylint: disable=subprocess-run-check
tasks_file = "/sys/fs/cgroup/{}/{}/{}/tasks".format(
controller, FC_BINARY_NAME, self.jailer_id
)
# If tests do not call start on machines, the cgroups will not be
# created.
if not os.path.exists(tasks_file):
return True
cmd = "cat {}".format(tasks_file)
result = utils.run_cmd(cmd)
tasks_split = result.stdout.splitlines()
for task in tasks_split:
if os.path.exists("/proc/{}".format(task)):
raise TimeoutError
return True
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,951
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/framework/stats/consumer.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Module for multiple statistics consumers."""
from abc import ABC, abstractmethod
from collections import defaultdict
from numbers import Number
from typing import Any, Callable
from framework.utils import ExceptionAggregator
from .criteria import CriteriaException
from .metadata import Provider as MetadataProvider
from .types import MeasurementDef
class ProcessingException(ExceptionAggregator):
"""Exception to be raised when criteria fails."""
def __init__(self, stats=None, custom=None):
"""Initialize the exception."""
super().__init__()
self.stats = stats
self.custom = custom
class Consumer(ABC):
"""Base class for statistics aggregation class."""
UNIT_KEY = "_unit"
DATA_KEY = "_data"
# pylint: disable=W0102
def __init__(self, metadata_provider: MetadataProvider = None, custom=None):
"""Initialize a consumer."""
self._iteration = 0
self._results = defaultdict() # Aggregated results.
self._custom = {} if not custom else custom
self._metadata_provider = metadata_provider
self._measurements_defs = {}
if metadata_provider:
self._measurements_defs = metadata_provider.measurements
# Final statistics.
self._statistics = {}
self._failure_aggregator = ProcessingException()
@abstractmethod
def ingest(self, iteration: int, raw_data: Any):
"""Abstract method for ingesting the raw result."""
def consume_data(self, ms_name: str, value: Number):
"""Aggregate measurement."""
results = self._results.get(ms_name)
if not results:
self._results[ms_name] = {}
self._results[ms_name][self.DATA_KEY] = []
self._results[ms_name][self.DATA_KEY].append(value)
def consume_stat(self, st_name: str, ms_name: str, value: Number):
"""Aggregate statistics."""
results = self._results.get(ms_name)
if not results:
self._results[ms_name] = {}
self._results[ms_name][st_name] = value
def consume_custom(self, name: str, value: Any):
"""Aggregate custom information."""
if not self._custom.get(self._iteration):
self._custom[self._iteration] = {}
if not self._custom[self._iteration].get(name):
self._custom[self._iteration][name] = []
self._custom[self._iteration][name].append(value)
def set_measurement_def(self, value: MeasurementDef):
"""Set measurement definition."""
self._measurements_defs[value.name] = value
def _validate(self):
"""Verify that the statistics/measurements correspondence...
is backed by corresponding measurements definitions.
"""
for ms_name in self._results:
if ms_name not in self._measurements_defs:
self._failure_aggregator.add_row(
f"'{ms_name}' measurement does not have a "
"corresponding measurement definition."
)
if self._failure_aggregator.has_any():
raise self._failure_aggregator
def _reset(self):
"""Reset the results of this consumer, used in a previous exercise."""
self._results = defaultdict()
def process(self, check=True, fail_fast=False) -> (dict, dict):
"""Generate statistics as a dictionary."""
self._validate()
for ms_name in self._results:
self._statistics.setdefault(ms_name, {})[
self.UNIT_KEY
] = self._measurements_defs[ms_name].unit
has_data = Consumer.DATA_KEY in self._results[ms_name]
st_defs = self._measurements_defs[ms_name].statistics
for st_def in st_defs:
if st_def.name not in self._results[ms_name]:
if not has_data:
self._failure_aggregator.add_row(
f"Processing '{st_def.name}' statistic failed due "
f"to lack of data points for '{ms_name}' "
"measurement."
)
continue
self._statistics[ms_name][st_def.name] = {
"value": st_def.func(self._results[ms_name][self.DATA_KEY])
}
else:
self._statistics[ms_name][st_def.name] = {
"value": self._results[ms_name][st_def.name]
}
pass_criteria = st_def.pass_criteria
if check and pass_criteria:
# if the statistic definition contains a criteria but the
# corresponding baseline is not defined, the test should fail.
if pass_criteria.baseline == {}:
self._failure_aggregator.add_row(
f"Baseline data is not defined for '{ms_name}/{st_def.name}"
f"/{pass_criteria.name}'."
)
continue
self._statistics[ms_name][st_def.name]["pass_criteria"] = {
pass_criteria.name: pass_criteria.baseline
}
res = self._statistics[ms_name][st_def.name]["value"]
try:
pass_criteria.check(res)
self._statistics[ms_name][st_def.name]["outcome"] = "PASSED"
except CriteriaException as err:
# pylint: disable=W0707
self._statistics[ms_name][st_def.name]["outcome"] = "FAILED"
fail_msg = f"'{ms_name}/{st_def.name}': {err}"
self._failure_aggregator.add_row(fail_msg)
if fail_fast:
raise self._failure_aggregator
self._reset()
if self._failure_aggregator.has_any():
self._failure_aggregator.stats = self._statistics
self._failure_aggregator.custom = self._custom
raise self._failure_aggregator
return self._statistics, self._custom
class LambdaConsumer(Consumer):
"""Consumer which executes a function in the ingestion step.
The function called in the ingestion step must have the following
signature: `def func_name(cons: Consumer, raw_output: Any, **kw_args)`.
"""
def __init__(
self,
func: Callable,
func_kwargs=None,
metadata_provider: MetadataProvider = None,
):
"""Initialize the LambdaConsumer."""
super().__init__(metadata_provider)
self._func = func
self._func_kwargs = func_kwargs or {}
def ingest(self, iteration, raw_data):
"""Execute the function."""
self._iteration = iteration
return self._func(self, raw_data, **self._func_kwargs)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,952
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/style/test_rust.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests ensuring codebase style compliance for Rust."""
import subprocess
from framework import utils
def test_rust_order():
"""
Tests that `Cargo.toml` dependencies are alphabetically ordered.
@type: style
"""
# Runs `cargo-sort` with the current working directory (`cwd`) as the repository root.
_, _, _ = utils.run_cmd(cmd="cargo-sort --workspace --check --grouped", cwd="..")
def test_rust_style():
"""
Test that rust code passes style checks.
"""
# ../src/io_uring/src/bindings.rs
config = open("fmt.toml", encoding="utf-8").read().replace("\n", ",")
# Check that the output is empty.
_, stdout, _ = utils.run_cmd(f"cargo fmt --all -- --check --config {config}")
# rustfmt prepends `"Diff in"` to the reported output.
assert "Diff in" not in stdout
def test_ensure_mod_tests():
"""
Check that files containing unit tests have a 'tests' module defined.
"""
excluding = [
"_gen/",
"/tests/",
"/test_utils",
"build/",
"src/vmm/src/io_uring/bindings.rs",
]
# Files with `#[test]` without `mod tests`.
cmd = 'find ../src -type f -name "*.rs" |xargs grep --files-without-match "mod tests {" |xargs grep --files-with-matches "#\\[test\\]"'
res = subprocess.run(cmd, shell=True, capture_output=True, check=True)
tests_without_mods = res.stdout.decode("utf-8").split("\n")
# Files with `#[test]` without `mod tests` excluding file paths which contain any string from
# `excluding` or are empty.
final = [
f
for f in tests_without_mods
if not any(x in f for x in excluding) and len(f) > 0
]
# Assert `final` is empty.
assert (
final == []
), "`#[test]`s found in files without `mod tests`s. Code coverage requires that tests are in test modules."
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,953
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/security/test_vulnerabilities.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# N.B.: Although this repository is released under the Apache-2.0, part of its test requires a
# script from the third party "Spectre & Meltdown Checker" project. This script is under the
# GPL-3.0-only license.
"""Tests vulnerabilities mitigations."""
import pytest
import requests
from framework import utils
from framework.properties import global_props
from framework.utils_cpu_templates import nonci_on_arm
CHECKER_URL = "https://meltdown.ovh"
CHECKER_FILENAME = "spectre-meltdown-checker.sh"
@pytest.fixture(name="microvm")
def microvm_fxt(uvm_plain):
"""Microvm fixture"""
uvm_plain.spawn()
uvm_plain.basic_config(
vcpu_count=2,
mem_size_mib=256,
)
uvm_plain.add_net_iface()
uvm_plain.start()
return uvm_plain
@pytest.fixture(name="microvm_with_template")
def microvm_with_template_fxt(uvm_plain, cpu_template):
"""Microvm fixture with a CPU template"""
uvm_plain.spawn()
uvm_plain.basic_config(
vcpu_count=2,
mem_size_mib=256,
cpu_template=cpu_template,
)
uvm_plain.add_net_iface()
uvm_plain.start()
return uvm_plain
@pytest.fixture(name="microvm_with_custom_cpu_template")
def microvm_with_custom_template_fxt(uvm_plain, custom_cpu_template):
"""Microvm fixture with a CPU template"""
uvm_plain.spawn()
uvm_plain.basic_config(
vcpu_count=2,
mem_size_mib=256,
)
uvm_plain.api.cpu_config.put(**custom_cpu_template["template"])
uvm_plain.add_net_iface()
uvm_plain.start()
return uvm_plain
@pytest.fixture(scope="session", name="spectre_meltdown_checker")
def download_spectre_meltdown_checker(tmp_path_factory):
"""Download spectre / meltdown checker script."""
resp = requests.get(CHECKER_URL, timeout=5)
resp.raise_for_status()
path = tmp_path_factory.mktemp("tmp", True) / CHECKER_FILENAME
path.write_bytes(resp.content)
return path
def run_spectre_meltdown_checker_on_guest(
microvm,
spectre_meltdown_checker,
):
"""Run the spectre / meltdown checker on guest"""
remote_path = f"/tmp/{CHECKER_FILENAME}"
microvm.ssh.scp_put(spectre_meltdown_checker, remote_path)
ecode, stdout, stderr = microvm.ssh.run(f"sh {remote_path} --explain")
assert ecode == 0, f"stdout:\n{stdout}\nstderr:\n{stderr}\n"
@pytest.mark.no_block_pr
@pytest.mark.skipif(
global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14",
reason="c7g host 4.14 requires modifications to the 5.10 guest kernel to boot successfully.",
)
def test_spectre_meltdown_checker_on_host(spectre_meltdown_checker):
"""
Test with the spectre / meltdown checker on host.
"""
utils.run_cmd(f"sh {spectre_meltdown_checker} --explain")
@pytest.mark.no_block_pr
@pytest.mark.skipif(
global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14",
reason="c7g host 4.14 requires modifications to the 5.10 guest kernel to boot successfully.",
)
def test_spectre_meltdown_checker_on_guest(spectre_meltdown_checker, microvm):
"""
Test with the spectre / meltdown checker on guest.
"""
run_spectre_meltdown_checker_on_guest(
microvm,
spectre_meltdown_checker,
)
@pytest.mark.no_block_pr
@pytest.mark.skipif(
global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14",
reason="c7g host 4.14 requires modifications to the 5.10 guest kernel to boot successfully.",
)
def test_spectre_meltdown_checker_on_restored_guest(
spectre_meltdown_checker,
microvm,
microvm_factory,
):
"""
Test with the spectre / meltdown checker on a restored guest.
"""
snapshot = microvm.snapshot_full()
# Create a destination VM
dst_vm = microvm_factory.build()
dst_vm.spawn()
# Restore the destination VM from the snapshot
dst_vm.restore_from_snapshot(snapshot, resume=True)
run_spectre_meltdown_checker_on_guest(
dst_vm,
spectre_meltdown_checker,
)
@pytest.mark.no_block_pr
@pytest.mark.skipif(
global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14",
reason="c7g host 4.14 requires modifications to the 5.10 guest kernel to boot successfully.",
)
@nonci_on_arm
def test_spectre_meltdown_checker_on_guest_with_template(
spectre_meltdown_checker,
microvm_with_template,
):
"""
Test with the spectre / meltdown checker on guest with CPU template.
"""
run_spectre_meltdown_checker_on_guest(
microvm_with_template,
spectre_meltdown_checker,
)
@pytest.mark.no_block_pr
@pytest.mark.skipif(
global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14",
reason="c7g host 4.14 requires modifications to the 5.10 guest kernel to boot successfully.",
)
@nonci_on_arm
def test_spectre_meltdown_checker_on_guest_with_custom_template(
spectre_meltdown_checker,
microvm_with_custom_cpu_template,
):
"""
Test with the spectre / meltdown checker on guest with a custom CPU template.
"""
microvm = microvm_with_custom_cpu_template
run_spectre_meltdown_checker_on_guest(
microvm,
spectre_meltdown_checker,
)
@pytest.mark.no_block_pr
@pytest.mark.skipif(
global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14",
reason="c7g host 4.14 requires modifications to the 5.10 guest kernel to boot successfully.",
)
@nonci_on_arm
def test_spectre_meltdown_checker_on_restored_guest_with_template(
spectre_meltdown_checker,
microvm_with_template,
microvm_factory,
):
"""
Test with the spectre / meltdown checker on a restored guest with a CPU template.
"""
snapshot = microvm_with_template.snapshot_full()
# Create a destination VM
dst_vm = microvm_factory.build()
dst_vm.spawn()
# Restore the destination VM from the snapshot
dst_vm.restore_from_snapshot(snapshot, resume=True)
run_spectre_meltdown_checker_on_guest(
dst_vm,
spectre_meltdown_checker,
)
@pytest.mark.no_block_pr
@pytest.mark.skipif(
global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14",
reason="c7g host 4.14 requires modifications to the 5.10 guest kernel to boot successfully.",
)
@nonci_on_arm
def test_spectre_meltdown_checker_on_restored_guest_with_custom_template(
spectre_meltdown_checker,
microvm_with_custom_cpu_template,
microvm_factory,
):
"""
Test with the spectre / meltdown checker on a restored guest with a custom CPU template.
"""
src_vm = microvm_with_custom_cpu_template
snapshot = src_vm.snapshot_full()
dst_vm = microvm_factory.build()
dst_vm.spawn()
# Restore the destination VM from the snapshot
dst_vm.restore_from_snapshot(snapshot, resume=True)
run_spectre_meltdown_checker_on_guest(
dst_vm,
spectre_meltdown_checker,
)
@pytest.mark.no_block_pr
def check_vulnerabilities_files_on_guest(microvm):
"""
Check that the guest's vulnerabilities files do not contain `Vulnerable`.
See also: https://elixir.bootlin.com/linux/latest/source/Documentation/ABI/testing/sysfs-devices-system-cpu
and search for `vulnerabilities`.
"""
vuln_dir = "/sys/devices/system/cpu/vulnerabilities"
ecode, stdout, stderr = microvm.ssh.run(
f"grep -r Vulnerable {vuln_dir} | grep -v mmio_stale_data:"
)
assert ecode == 1, f"stdout:\n{stdout}\nstderr:\n{stderr}\n"
@pytest.mark.no_block_pr
def test_vulnerabilities_files_on_guest(microvm):
"""
Test vulnerabilities files on guest.
"""
check_vulnerabilities_files_on_guest(microvm)
@pytest.mark.no_block_pr
def test_vulnerabilities_files_on_restored_guest(
microvm,
microvm_factory,
):
"""
Test vulnerabilities files on a restored guest.
"""
snapshot = microvm.snapshot_full()
# Create a destination VM
dst_vm = microvm_factory.build()
dst_vm.spawn()
# Restore the destination VM from the snapshot
dst_vm.restore_from_snapshot(snapshot, resume=True)
check_vulnerabilities_files_on_guest(dst_vm)
@pytest.mark.no_block_pr
@nonci_on_arm
def test_vulnerabilities_files_on_guest_with_template(
microvm_with_template,
):
"""
Test vulnerabilities files on guest with CPU template.
"""
check_vulnerabilities_files_on_guest(microvm_with_template)
@pytest.mark.no_block_pr
@nonci_on_arm
def test_vulnerabilities_files_on_guest_with_custom_template(
microvm_with_custom_cpu_template,
):
"""
Test vulnerabilities files on guest with a custom CPU template.
"""
check_vulnerabilities_files_on_guest(microvm_with_custom_cpu_template)
@pytest.mark.no_block_pr
@nonci_on_arm
def test_vulnerabilities_files_on_restored_guest_with_template(
microvm_with_template,
microvm_factory,
):
"""
Test vulnerabilities files on a restored guest with a CPU template.
"""
snapshot = microvm_with_template.snapshot_full()
# Create a destination VM
dst_vm = microvm_factory.build()
dst_vm.spawn()
# Restore the destination VM from the snapshot
dst_vm.restore_from_snapshot(snapshot, resume=True)
check_vulnerabilities_files_on_guest(dst_vm)
@pytest.mark.no_block_pr
@nonci_on_arm
def test_vulnerabilities_files_on_restored_guest_with_custom_template(
microvm_with_custom_cpu_template,
microvm_factory,
):
"""
Test vulnerabilities files on a restored guest with a custom CPU template.
"""
src_vm = microvm_with_custom_cpu_template
snapshot = src_vm.snapshot_full()
dst_vm = microvm_factory.build()
dst_vm.spawn()
# Restore the destination VM from the snapshot
dst_vm.restore_from_snapshot(snapshot, resume=True)
check_vulnerabilities_files_on_guest(dst_vm)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,954
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_max_devices.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests scenario for adding the maximum number of devices to a microVM."""
import platform
import pytest
# IRQs are available from 5 to 23, so the maximum number of devices
# supported at the same time is 19.
MAX_DEVICES_ATTACHED = 19
@pytest.mark.skipif(
platform.machine() != "x86_64", reason="Firecracker supports 24 IRQs on x86_64."
)
def test_attach_maximum_devices(test_microvm_with_api):
"""
Test attaching maximum number of devices to the microVM.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up a basic microVM.
test_microvm.basic_config()
# Add (`MAX_DEVICES_ATTACHED` - 1) devices because the rootfs
# has already been configured in the `basic_config()`function.
for _ in range(MAX_DEVICES_ATTACHED - 1):
test_microvm.add_net_iface()
test_microvm.start()
# Test that network devices attached are operational.
for i in range(MAX_DEVICES_ATTACHED - 1):
# Verify if guest can run commands.
exit_code, _, _ = test_microvm.ssh_iface(i).run("sync")
assert exit_code == 0
@pytest.mark.skipif(
platform.machine() != "x86_64", reason="Firecracker supports 24 IRQs on x86_64."
)
def test_attach_too_many_devices(test_microvm_with_api):
"""
Test attaching to a microVM more devices than available IRQs.
"""
test_microvm = test_microvm_with_api
test_microvm.spawn()
# Set up a basic microVM.
test_microvm.basic_config()
# Add `MAX_DEVICES_ATTACHED` network devices on top of the
# already configured rootfs.
for _ in range(MAX_DEVICES_ATTACHED):
test_microvm.add_net_iface()
# Attempting to start a microVM with more than
# `MAX_DEVICES_ATTACHED` devices should fail.
error_str = (
"Failed to allocate requested resource: The requested resource"
" is not available."
)
with pytest.raises(RuntimeError, match=error_str):
test_microvm.start()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,955
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/functional/test_signals.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests scenarios for Firecracker signal handling."""
import json
import os
import resource as res
from signal import SIGBUS, SIGHUP, SIGILL, SIGPIPE, SIGSEGV, SIGSYS, SIGXCPU, SIGXFSZ
from time import sleep
import pytest
from framework import utils
signum_str = {
SIGBUS: "sigbus",
SIGSEGV: "sigsegv",
SIGXFSZ: "sigxfsz",
SIGXCPU: "sigxcpu",
SIGPIPE: "sigpipe",
SIGHUP: "sighup",
SIGILL: "sigill",
SIGSYS: "sigsys",
}
@pytest.mark.parametrize(
"signum", [SIGBUS, SIGSEGV, SIGXFSZ, SIGXCPU, SIGPIPE, SIGHUP, SIGILL, SIGSYS]
)
def test_generic_signal_handler(test_microvm_with_api, signum):
"""
Test signal handling for all handled signals.
"""
microvm = test_microvm_with_api
microvm.spawn()
# We don't need to monitor the memory for this test.
microvm.memory_monitor = None
microvm.basic_config()
microvm.start()
firecracker_pid = int(microvm.jailer_clone_pid)
sleep(0.5)
metrics_jail_path = microvm.metrics_file
metrics_fd = open(metrics_jail_path, encoding="utf-8")
line_metrics = metrics_fd.readlines()
assert len(line_metrics) == 1
os.kill(firecracker_pid, signum)
# Firecracker gracefully handles SIGPIPE (doesn't terminate).
if signum == int(SIGPIPE):
msg = "Received signal 13"
# Flush metrics to file, so we can see the SIGPIPE at bottom assert.
# This is going to fail if process has exited.
microvm.api.actions.put(action_type="FlushMetrics")
else:
microvm.expect_kill_by_signal = True
# Ensure that the process was terminated.
utils.wait_process_termination(firecracker_pid)
msg = "Shutting down VM after intercepting signal {}".format(signum)
microvm.check_log_message(msg)
if signum != SIGSYS:
metric_line = json.loads(metrics_fd.readlines()[0])
assert metric_line["signals"][signum_str[signum]] == 1
def test_sigxfsz_handler(uvm_plain_rw):
"""
Test intercepting and handling SIGXFSZ.
"""
microvm = uvm_plain_rw
microvm.spawn()
# We don't need to monitor the memory for this test.
microvm.memory_monitor = None
# We need to use the Sync file engine type. If we use io_uring we will not
# get a SIGXFSZ. We'll instead get an errno 27 File too large as the
# completed entry status code.
microvm.basic_config(rootfs_io_engine="Sync")
microvm.start()
metrics_jail_path = microvm.metrics_file
metrics_fd = open(metrics_jail_path, encoding="utf-8")
line_metrics = metrics_fd.readlines()
assert len(line_metrics) == 1
firecracker_pid = int(microvm.jailer_clone_pid)
size = os.path.getsize(metrics_jail_path)
# The SIGXFSZ is triggered because the size of rootfs is bigger than
# the size of metrics file times 3. Since the metrics file is flushed
# twice we have to make sure that the limit is bigger than that
# in order to make sure the SIGXFSZ metric is logged
res.prlimit(firecracker_pid, res.RLIMIT_FSIZE, (size * 3, res.RLIM_INFINITY))
while True:
try:
utils.run_cmd("ps -p {}".format(firecracker_pid))
sleep(1)
except ChildProcessError:
break
microvm.expect_kill_by_signal = True
msg = "Shutting down VM after intercepting signal 25, code 0"
microvm.check_log_message(msg)
metric_line = json.loads(metrics_fd.readlines()[0])
assert metric_line["signals"]["sigxfsz"] == 1
def test_handled_signals(test_microvm_with_api):
"""
Test that handled signals don't kill the microVM.
"""
microvm = test_microvm_with_api
microvm.spawn()
# We don't need to monitor the memory for this test.
microvm.memory_monitor = None
microvm.basic_config(vcpu_count=2)
microvm.add_net_iface()
microvm.start()
firecracker_pid = int(microvm.jailer_clone_pid)
# Open a SSH connection to validate the microVM stays alive.
# Just validate a simple command: `nproc`
cmd = "nproc"
_, stdout, stderr = microvm.ssh.run(cmd)
assert stderr == ""
assert int(stdout) == 2
# We have a handler installed for this signal.
# The 35 is the SIGRTMIN for musl libc.
# We hardcode this value since the SIGRTMIN python reports
# is 34, which is likely the one for glibc.
os.kill(firecracker_pid, 35)
# Validate the microVM is still up and running.
_, stdout, stderr = microvm.ssh.run(cmd)
assert stderr == ""
assert int(stdout) == 2
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,956
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/security/test_sec_audit.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests ensuring security vulnerabilities are not present in dependencies."""
import pytest
from framework import defs
from framework.utils_cpuid import CpuVendor, get_cpu_vendor
from host_tools.cargo_build import cargo
@pytest.mark.skipif(
get_cpu_vendor() != CpuVendor.INTEL,
reason="The audit is based on cargo.lock which " "is identical on all platforms",
)
def test_cargo_audit():
"""
Run cargo audit to check for crates with security vulnerabilities.
"""
# Run command and raise exception if non-zero return code
cargo(
"audit",
"--deny warnings -q",
cwd=defs.FC_WORKSPACE_DIR,
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,957
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/configs/defs.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Module with definitions relevant for performance tests configs usage."""
from pathlib import Path
CFG_LOCATION = Path(__file__).parent.resolve()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,958
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/.buildkite/pipeline_perf.py
|
#!/usr/bin/env python3
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Generate Buildkite performance pipelines dynamically"""
from common import COMMON_PARSER, group, overlay_dict, pipeline_to_json
perf_test = {
"block": {
"label": "🖴 Block Performance",
"test_path": "integration_tests/performance/test_block_performance.py",
"devtool_opts": "-c 1-10 -m 0",
"timeout_in_minutes": 240,
},
"snapshot-latency": {
"label": "📸 Snapshot Latency",
"test_path": "integration_tests/performance/test_snapshot_restore_performance.py",
"devtool_opts": "-c 1-12 -m 0",
"timeout_in_minutes": 60,
},
"vsock-throughput": {
"label": "🧦 Vsock Throughput",
"test_path": "integration_tests/performance/test_vsock_throughput.py",
"devtool_opts": "-c 1-10 -m 0",
"timeout_in_minutes": 20,
},
"network-latency": {
"label": "🖧 Network Latency",
"test_path": "integration_tests/performance/test_network_latency.py",
"devtool_opts": "-c 1-10 -m 0",
"timeout_in_minutes": 10,
},
"network-throughput": {
"label": "🖧 Network TCP Throughput",
"test_path": "integration_tests/performance/test_network_tcp_throughput.py",
"devtool_opts": "-c 1-10 -m 0",
"timeout_in_minutes": 45,
},
}
def build_group(test):
"""Build a Buildkite pipeline `group` step"""
devtool_opts = test.pop("devtool_opts")
test_path = test.pop("test_path")
retries = test.pop("retries")
return group(
label=test.pop("label"),
command=f"./tools/devtool -y test {devtool_opts} -- -m nonci --reruns {retries} {test_path}",
artifacts=["./test_results/*"],
instances=test.pop("instances"),
platforms=test.pop("platforms"),
# and the rest can be command arguments
**test,
)
parser = COMMON_PARSER
parser.add_argument(
"--test",
required=True,
choices=list(perf_test.keys()),
help="performance test",
action="append",
)
parser.add_argument("--retries", type=int, default=0)
args = parser.parse_args()
group_steps = []
tests = [perf_test[test] for test in args.test]
for test_data in tests:
test_data.setdefault("platforms", args.platforms)
test_data.setdefault("instances", args.instances)
test_data.setdefault("agents", {"ag": 1})
test_data["retries"] = args.retries
test_data["timeout_in_minutes"] *= args.retries + 1
test_data = overlay_dict(test_data, args.step_param)
test_data["retry"] = {
"automatic": [
# Agent was lost, retry one time
# this can happen if we terminate the instance or the agent gets
# disconnected for whatever reason
{"exit_status": -1, "limit": 1},
]
}
group_steps.append(build_group(test_data))
pipeline = {
"env": {},
"steps": group_steps,
}
print(pipeline_to_json(pipeline))
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,959
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_benchmarks.py
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Optional benchmarks-do-not-regress test"""
import json
import logging
import os
import platform
import pytest
from framework import utils
from host_tools.cargo_build import cargo
TARGET_BRANCH = os.environ.get("BUILDKITE_PULL_REQUEST_BASE_BRANCH") or "main"
LOGGER = logging.getLogger(__name__)
def cargo_bench():
"""Executes all benchmarks by running "cargo bench --no-run", finding the executables, and running them pinned to some CPU"""
# Passing --message-format json to cargo tells it to print its log in a json format. At the end, instead of the
# usual "placed executable <...> at <...>" we'll get a json object with an 'executable' key, from which we
# extract the path to the compiled benchmark binary.
_, stdout, _ = cargo(
"bench",
f"--all --quiet --target {platform.machine()}-unknown-linux-musl --message-format json --no-run",
)
executables = []
for line in stdout.split("\n"):
if line:
msg = json.loads(line)
executable = msg.get("executable")
if executable:
executables.append(executable)
output = ""
for executable in executables:
output += utils.run_cmd(
f"CARGO_TARGET_DIR=../build/cargo_target taskset -c 1 {executable} --bench"
).stdout
return output
@pytest.mark.no_block_pr
@pytest.mark.timeout(600)
def test_no_regression_relative_to_target_branch():
"""
Run the microbenchmarks in this repository, comparing results from pull
request target branch against what's achieved on HEAD
"""
# First, run benchmarks on pull request target branch (usually main). For this, cache the commit at which
# the test was originally executed
_, pr_head_commit_sha, _ = utils.run_cmd("git rev-parse HEAD")
utils.run_cmd(f"git switch {TARGET_BRANCH}")
cargo_bench()
# Switch back to pull request, and run benchmarks again. Criterion will automatically notice that
# data from a previous run exists, and do a comparison
utils.run_cmd(f"git checkout {pr_head_commit_sha}")
criterion_output = cargo_bench()
# Criterion separates reports for benchmarks by two newlines. We filter and print the ones
# that contain the string 'Performance has regression.', which criterion uses to indicate a regression
regressions_only = "\n\n".join(
result
for result in criterion_output.split("\n\n")
if "Performance has regressed." in result
)
for benchmark in os.listdir("../build/cargo_target/criterion"):
with open(
f"../build/cargo_target/criterion/{benchmark}/new/estimates.json",
encoding="utf-8",
) as file:
data = json.load(file)
average_ns = data["mean"]["point_estimate"]
LOGGER.info("%s mean: %iµs", benchmark, average_ns / 1000)
# If this string is anywhere in stdout, then at least one of our benchmarks
# is now performing worse with the PR changes.
assert not regressions_only, "\n" + regressions_only
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,960
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/style/test_python.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests ensuring codebase style compliance for Python."""
import sys
from subprocess import run
import pytest
@pytest.mark.parametrize("formatter", ["black", "isort"])
def test_python_style(formatter):
"""
Test that python code passes `formatter`
"""
run(
f"{formatter} --check --diff . ..",
stdout=sys.stdout,
stderr=sys.stderr,
shell=True,
check=True,
)
def test_python_pylint():
"""
Test that python code passes linter checks.
"""
# List of linter commands that should be executed for each file
linter_cmd = (
# Pylint
"pylint --jobs=0 --persistent=no --score=no "
'--output-format=colorized --attr-rgx="[a-z_][a-z0-9_]{1,30}$" '
'--argument-rgx="[a-z_][a-z0-9_]{1,35}$" '
'--variable-rgx="[a-z_][a-z0-9_]{1,30}$" --disable='
"fixme,too-many-instance-attributes,import-error,"
"too-many-locals,too-many-arguments,consider-using-f-string,"
"consider-using-with,implicit-str-concat,line-too-long,"
"broad-exception-raised,duplicate-code tests tools .buildkite/*.py"
)
run(
linter_cmd,
# we let pytest capture stdout/stderr for us
stdout=sys.stdout,
stderr=sys.stderr,
shell=True,
cwd="..",
check=True,
)
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,961
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/.buildkite/pipeline_cross.py
|
#!/usr/bin/env python3
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Generate Buildkite Cross Snapshot/Restore pipelines dynamically
1. Generate snapshots for each instance and kernel version
2. wait
3. Restore snapshots across instances and kernels
"""
import itertools
from common import DEFAULT_PLATFORMS, group, pipeline_to_json
def restore_step(label, src_instance, src_kv, dst_instance, dst_os, dst_kv):
"""Generate a restore step"""
pytest_keyword_for_instance = {
"m5d.metal": "-k 'not None'",
"m6i.metal": "-k 'not None'",
"m6a.metal": "",
}
k_val = pytest_keyword_for_instance[dst_instance]
return {
"command": [
f"buildkite-agent artifact download snapshots/{src_instance}_{src_kv}/* .",
f"mv -v snapshots/{src_instance}_{src_kv} snapshot_artifacts",
f"./tools/devtool -y test -- -m nonci {k_val} integration_tests/functional/test_snapshot_restore_cross_kernel.py",
],
"label": label,
"timeout": 30,
"agents": {"instance": dst_instance, "kv": dst_kv, "os": dst_os},
}
def cross_steps():
"""Generate group steps"""
snap_instances = ["m5d.metal", "m6i.metal", "m6a.metal"]
groups = []
commands = [
"./tools/devtool -y sh ./tools/create_snapshot_artifact/main.py",
"mkdir -pv snapshots/{instance}_{kv}",
"sudo chown -Rc $USER: snapshot_artifacts",
"mv -v snapshot_artifacts/* snapshots/{instance}_{kv}",
]
groups.append(
group(
"📸 create snapshots",
commands,
timeout=30,
artifact_paths="snapshots/**/*",
instances=snap_instances,
platforms=DEFAULT_PLATFORMS,
)
)
groups.append("wait")
# allow-list of what instances can be restores on what other instances (in
# addition to itself)
supported = {
"m5d.metal": ["m6i.metal"],
"m6i.metal": ["m5d.metal"],
}
instances_x86_64 = ["m5d.metal", "m6i.metal", "m6a.metal"]
# https://github.com/firecracker-microvm/firecracker/blob/main/docs/kernel-policy.md#experimental-snapshot-compatibility-across-kernel-versions
# We currently have nothing for aarch64
perms_aarch64 = []
perms_x86_64 = itertools.product(
instances_x86_64, DEFAULT_PLATFORMS, instances_x86_64, DEFAULT_PLATFORMS
)
steps = []
for (
src_instance,
(_, src_kv),
dst_instance,
(dst_os, dst_kv),
) in itertools.chain(perms_x86_64, perms_aarch64):
# the integration tests already test src == dst, so we skip it
if src_instance == dst_instance and src_kv == dst_kv:
continue
# 5.10 -> 4.14 is not supported
if src_kv > dst_kv:
continue
if src_instance != dst_instance and dst_instance not in supported.get(
src_instance, []
):
continue
step = restore_step(
f"🎬 {src_instance} {src_kv} ➡️ {dst_instance} {dst_kv}",
src_instance,
src_kv,
dst_instance,
dst_os,
dst_kv,
)
steps.append(step)
groups.append({"group": "🎬 restore across instances and kernels", "steps": steps})
return groups
if __name__ == "__main__":
pipeline = {"steps": cross_steps()}
print(pipeline_to_json(pipeline))
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,962
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tests/integration_tests/performance/test_snapshot_perf.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Basic tests scenarios for snapshot save/restore."""
import platform
import pytest
from framework.artifacts import kernel_params
from framework.properties import global_props
from framework.stats import consumer, producer, types
from framework.utils import CpuMap
# How many latencies do we sample per test.
SAMPLE_COUNT = 3
USEC_IN_MSEC = 1000
PLATFORM = platform.machine()
# measurement without pass criteria = test is infallible but still submits metrics. Nice!
LATENCY_MEASUREMENT = types.MeasurementDef.create_measurement(
"latency",
"ms",
[],
{},
)
# The guest kernel does not "participate" in snapshot restore, so just pick
# some arbitrary one
only_one_guest_kernel = pytest.mark.parametrize(
"guest_kernel", list(kernel_params("vmlinux-4.14*")), indirect=True
)
def snapshot_create_producer(vm, target_version):
"""Produce results for snapshot create tests."""
vm.snapshot_full(target_version=target_version)
metrics = vm.flush_metrics()
value = metrics["latencies_us"]["full_create_snapshot"] / USEC_IN_MSEC
print(f"Latency {value} ms")
return value
def snapshot_resume_producer(microvm_factory, snapshot):
"""Produce results for snapshot resume tests."""
microvm = microvm_factory.build()
microvm.spawn()
microvm.restore_from_snapshot(snapshot, resume=True)
# Attempt to connect to resumed microvm.
# Verify if guest can run commands.
exit_code, _, _ = microvm.ssh.run("ls")
assert exit_code == 0
value = 0
# Parse all metric data points in search of load_snapshot time.
metrics = microvm.get_all_metrics()
for data_point in metrics:
cur_value = data_point["latencies_us"]["load_snapshot"] / USEC_IN_MSEC
if cur_value > 0:
value = cur_value
break
print("Latency {value} ms")
return value
@only_one_guest_kernel
def test_older_snapshot_resume_latency(
microvm_factory,
guest_kernel,
rootfs,
firecracker_release,
io_engine,
st_core,
):
"""
Test scenario: Older snapshot load performance measurement.
With each previous firecracker version, create a snapshot and try to
restore in current version.
"""
# due to bug fixed in commit 8dab78b
firecracker_version = firecracker_release.version_tuple
if global_props.instance == "m6a.metal" and firecracker_version < (1, 3, 3):
pytest.skip("incompatible with AMD and Firecracker <1.3.3")
vcpus, guest_mem_mib = 2, 512
microvm_cfg = f"{vcpus}vcpu_{guest_mem_mib}mb.json"
vm = microvm_factory.build(
guest_kernel,
rootfs,
monitor_memory=False,
fc_binary_path=firecracker_release.path,
jailer_binary_path=firecracker_release.jailer,
)
vm.spawn()
vm.basic_config(vcpu_count=vcpus, mem_size_mib=guest_mem_mib)
vm.add_net_iface()
vm.start()
# Check if guest works.
exit_code, _, _ = vm.ssh.run("ls")
assert exit_code == 0
snapshot = vm.snapshot_full()
st_core.name = "older_snapshot_resume_latency"
st_core.iterations = SAMPLE_COUNT
st_core.custom["guest_config"] = microvm_cfg.strip(".json")
st_core.custom["io_engine"] = io_engine
st_core.custom["snapshot_type"] = "FULL"
prod = producer.LambdaProducer(
func=snapshot_resume_producer,
func_kwargs={
"microvm_factory": microvm_factory,
"snapshot": snapshot,
},
)
cons = consumer.LambdaConsumer(
func=lambda cons, result: cons.consume_stat(
st_name="max", ms_name="latency", value=result
),
func_kwargs={},
)
cons.set_measurement_def(LATENCY_MEASUREMENT)
st_core.add_pipe(producer=prod, consumer=cons, tag=microvm_cfg)
# Gather results and verify pass criteria.
st_core.run_exercise()
@only_one_guest_kernel
def test_snapshot_create_latency(
microvm_factory,
guest_kernel,
rootfs,
st_core,
):
"""Measure the latency of creating a Full snapshot"""
guest_mem_mib = 512
vcpus = 2
microvm_cfg = f"{vcpus}vcpu_{guest_mem_mib}mb.json"
vm = microvm_factory.build(guest_kernel, rootfs, monitor_memory=False)
vm.spawn()
vm.basic_config(
vcpu_count=vcpus,
mem_size_mib=guest_mem_mib,
)
vm.start()
# Check if the needed CPU cores are available. We have the API
# thread, VMM thread and then one thread for each configured vCPU.
assert CpuMap.len() >= 2 + vm.vcpus_count
# Pin uVM threads to physical cores.
current_cpu_id = 0
assert vm.pin_vmm(current_cpu_id), "Failed to pin firecracker thread."
current_cpu_id += 1
assert vm.pin_api(current_cpu_id), "Failed to pin fc_api thread."
for idx_vcpu in range(vm.vcpus_count):
current_cpu_id += 1
assert vm.pin_vcpu(
idx_vcpu, current_cpu_id + idx_vcpu
), f"Failed to pin fc_vcpu {idx_vcpu} thread."
st_core.name = "snapshot_create_SnapshotType.FULL_latency"
st_core.iterations = SAMPLE_COUNT
st_core.custom["guest_config"] = microvm_cfg.strip(".json")
st_core.custom["snapshot_type"] = "FULL"
prod = producer.LambdaProducer(
func=snapshot_create_producer,
func_kwargs={
"vm": vm,
"target_version": None,
},
)
cons = consumer.LambdaConsumer(
func=lambda cons, result: cons.consume_stat(
st_name="max", ms_name="latency", value=result
),
func_kwargs={},
)
cons.set_measurement_def(LATENCY_MEASUREMENT)
st_core.add_pipe(producer=prod, consumer=cons, tag=microvm_cfg)
# Gather results and verify pass criteria.
st_core.run_exercise()
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,191,963
|
firecracker-microvm/firecracker
|
refs/heads/main
|
/tools/compare_baselines/utils/__init__.py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utilities for comparing performance baselines"""
from . import comparator, defs, fetcher
|
{"/tests/framework/stats/core.py": ["/tests/framework/stats/consumer.py", "/tests/framework/stats/producer.py"], "/tests/framework/stats/metadata.py": ["/tests/framework/stats/baseline.py", "/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py", "/tests/framework/stats/types.py"], "/tests/framework/stats/types.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/function.py"], "/tests/framework/stats/consumer.py": ["/tests/framework/stats/criteria.py", "/tests/framework/stats/metadata.py", "/tests/framework/stats/types.py"]}
|
44,219,771
|
AbelMRobra/EcoomerceChallenge
|
refs/heads/main
|
/api/urls.py
|
from api import views
from rest_framework import routers
from django.urls import path
from django.conf.urls import include
from rest_framework.authtoken.views import obtain_auth_token
router = routers.DefaultRouter()
router.register(r'users', views.UserViewset)
router.register(r'products', views.ProductViewset)
router.register(r'orders', views.OrderViewset)
router.register(r'orderdetails', views.OrderDetailViewset)
urlpatterns = [
path('', include(router.urls)),
path('auth/', obtain_auth_token),
]
|
{"/api/serializers.py": ["/ecommerce/models.py"], "/ecommerce/admin.py": ["/ecommerce/models.py"], "/api/views.py": ["/ecommerce/models.py", "/api/serializers.py"]}
|
44,219,772
|
AbelMRobra/EcoomerceChallenge
|
refs/heads/main
|
/ecommerce/models.py
|
import requests
import numpy as np
from enum import unique
from django.db import models
from django.core.validators import MinValueValidator
from django.db.models.fields import AutoField
# Create your models here.
class Product(models.Model):
id = models.CharField(max_length=30, verbose_name="Product ID", unique=True, primary_key=True)
name = models.CharField(max_length=50, verbose_name="Product Name")
price = models.FloatField(validators=[MinValueValidator(0.0)], verbose_name="Product Price")
stock = models.IntegerField(validators=[MinValueValidator(0)], verbose_name="Product Stock")
class Meta:
verbose_name = "Product"
verbose_name_plural = "Products"
unique_together = (('name', 'price'),)
def __str__(self):
return f'{self.id}: {self.name}'
class Order(models.Model):
id = models.AutoField(primary_key = True, verbose_name="Order ID")
date_time = models.DateTimeField(verbose_name="Order date time")
class Meta:
verbose_name = "Order"
verbose_name_plural = "Orders"
def get_total(self):
orders_detailes = OrderDetail.objects.filter(order = self)
total = sum(np.array(orders_detailes.values_list("cuantity", flat = True))*np.array(orders_detailes.values_list("product__price", flat = True)))
return round(total, 2)
def get_total_usd(self):
url = "https://www.dolarsi.com/api/api.php?type=valoresprincipales"
request = requests.get(url)
for request_dic in request.json():
if request_dic['casa']['nombre'] == "Dolar Blue":
valor_usd_blue = request_dic['casa']['venta']
break
total_usd = round(self.get_total()/float(str(valor_usd_blue).replace(",", ".")), 2)
return total_usd
def __str__(self):
return f'Order nº: {self.id}'
class OrderDetail(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE, verbose_name="Order", related_name="order_detail")
product = models.ForeignKey(Product, on_delete=models.CASCADE, verbose_name="Product")
cuantity = models.IntegerField(validators=[MinValueValidator(1)], verbose_name="Product quantity")
class Meta:
verbose_name = "Order detail"
verbose_name_plural = "Order details"
unique_together = (('order', 'product'),)
def __str__(self):
return f'{self.order.id}: {self.product.name}'
|
{"/api/serializers.py": ["/ecommerce/models.py"], "/ecommerce/admin.py": ["/ecommerce/models.py"], "/api/views.py": ["/ecommerce/models.py", "/api/serializers.py"]}
|
44,219,773
|
AbelMRobra/EcoomerceChallenge
|
refs/heads/main
|
/ecommerce/admin.py
|
from django.contrib import admin
from .models import *
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
search_filds = ('id', 'name')
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
search_filds = ('id',)
@admin.register(OrderDetail)
class OrderDetailAdmin(admin.ModelAdmin):
search_filds = ('product__name',)
|
{"/api/serializers.py": ["/ecommerce/models.py"], "/ecommerce/admin.py": ["/ecommerce/models.py"], "/api/views.py": ["/ecommerce/models.py", "/api/serializers.py"]}
|
44,219,774
|
AbelMRobra/EcoomerceChallenge
|
refs/heads/main
|
/api/serializers.py
|
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from ecommerce.models import Product, Order, OrderDetail
from django.contrib.auth.models import User
class UserSerialiazers(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True, 'required': False}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
Token.objects.create(user=user)
return user
class ProductSerializers(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('id', 'name', 'price', 'stock')
class OrderDetailFullSerializers(serializers.ModelSerializer):
class Meta:
model = OrderDetail
fields = ('id','cuantity', 'order', 'product')
class OrderDetailSerializers(serializers.ModelSerializer):
class Meta:
model = OrderDetail
fields = ('id','cuantity', 'product')
class OrderFullSerializers(serializers.ModelSerializer):
order_detail = OrderDetailSerializers(many=True)
class Meta:
model = Order
fields = ('id', 'date_time', 'get_total', 'get_total_usd', 'order_detail')
class OrderSerializers(serializers.ModelSerializer):
class Meta:
model = Order
fields = ('id', 'date_time', 'get_total', 'get_total_usd',)
class OrderDateSerializers(serializers.Serializer):
date_time = serializers.DateTimeField(required = True)
|
{"/api/serializers.py": ["/ecommerce/models.py"], "/ecommerce/admin.py": ["/ecommerce/models.py"], "/api/views.py": ["/ecommerce/models.py", "/api/serializers.py"]}
|
44,219,775
|
AbelMRobra/EcoomerceChallenge
|
refs/heads/main
|
/api/views.py
|
from ecommerce.models import Product, Order
from .serializers import *
from django.contrib.auth.models import User
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import action
class UserViewset(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerialiazers
def list(self, request, *args, **kwargs):
response = {'massege': 'Not available'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, *args, **kwargs):
response = {'massege': 'Not available'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class ProductViewset(viewsets.ModelViewSet):
queryset = Product.objects.all()
serializer_class = ProductSerializers
def update(self, request, *args, **kwargs):
if 'only_stock' in request.data:
try:
cantidad = int(request.data.dict()[f'only_stock'])
except:
response = {'massege': 'Cuantity must be a number'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
instance = self.get_object()
instance.stock = cantidad
response = {'massege': 'Stock changed success!'}
return Response(response, status=status.HTTP_200_OK)
else:
viewsets.ModelViewSet.update(self, request, *args, **kwargs)
class OrderDetailViewset(viewsets.ModelViewSet):
queryset = OrderDetail.objects.all()
serializer_class = OrderDetailFullSerializers
def create(self, request, *args, **kwargs):
response = {'massege': 'Not available'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
product = Product.objects.get(id = instance.product.id)
product.stock += instance.cuantity
product.save()
instance.delete()
response = {'massege': 'Success!, stock restored'}
return Response(response, status=status.HTTP_200_OK)
def update(self, request, *args, **kwargs):
instance = self.get_object()
product = Product.objects.get(id = instance.product.id)
try:
cantidad_verificar = int(request.data.dict()['cuantity'])
except:
response = {'massege': 'Cuantity must be a number'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
if cantidad_verificar > (instance.cuantity + product.stock):
response = {'massege': f'Insufficient stock for {product.name}'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
else:
instance.cuantity = int(request.data.dict()['cuantity'])
instance.save()
response = {'massege': 'Updates success!'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class OrderViewset(viewsets.ModelViewSet):
queryset = Order.objects.all()
serializer_class = OrderSerializers
@action(detail=True, methods=["GET"])
def detail_info(self, request, pk):
order = Order.objects.get(id = pk)
serializer = OrderFullSerializers(order, many=False)
response = {'Detail': serializer.data}
return Response(response, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
orders_details = OrderDetail.objects.filter(order = instance)
for order_detail in orders_details:
product = Product.objects.get(id = order_detail.product.id)
product.stock += order_detail.cuantity
product.save()
instance.delete()
response = {'massege': 'Success!, stock restored'}
return Response(response, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
serializer_order = OrderDateSerializers(data=request.data)
if serializer_order.is_valid():
if 'product' in request.data and 'cuantity' in request.data:
## -> Vamos a chequear toda la combinación de product - cuantity. Aqui supongo que el Front me mandara cada registro con un numero que me indique la posición
product_list = []
product_to_update = []
for key_name in request.data.dict().keys():
if 'product' in key_name:
## -> Verificación de valor
if len(key_name.split("_")) > 1:
producto_verificar = request.data.dict()[f'product_{key_name.split("_")[1]}']
try:
cantidad_verificar = int(request.data.dict()[f'cuantity_{key_name.split("_")[1]}'])
except:
response = {'massege': 'Cuantity must be a number'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
else:
producto_verificar = request.data.dict()['product']
try:
cantidad_verificar = int(request.data.dict()['cuantity'])
except:
response = {'massege': 'Cuantity must be a number'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
## -> Verificación de exitencia
try:
producto = Product.objects.get(id = producto_verificar)
except:
response = {'massege': f'Not match for product ID {producto_verificar}'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
## -> Verificación de minimo
if cantidad_verificar == 0:
response = {'massege': f'Not cuantity for {producto.name}'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
## -> Verificación de cantidad
if cantidad_verificar > producto.stock:
response = {'massege': f'Insufficient stock for {producto.name}'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
## -> Verificación de no duplicado
if producto_verificar in product_list:
response = {'massege': f'Duplicate product: {producto_verificar}'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
else:
product_list.append(producto_verificar)
## -> Superado este punto, podemos crear la orden, por lo cual almacenare la conbinación producto-cantidad
product_to_update.append((producto.id, cantidad_verificar))
new_order = Order.objects.create(date_time = serializer_order.data.get("date_time"))
for order_detail in product_to_update:
product = Product.objects.get(id = order_detail[0])
new_order_detail = OrderDetail.objects.create(order = new_order, product = product, cuantity = order_detail[1])
product.stock -= new_order_detail.cuantity
product.save()
serializer_order_detail = OrderFullSerializers(new_order, many=False)
response = {'massege': 'Order created success!', 'result': serializer_order_detail.data}
return Response(response, status=status.HTTP_201_CREATED)
|
{"/api/serializers.py": ["/ecommerce/models.py"], "/ecommerce/admin.py": ["/ecommerce/models.py"], "/api/views.py": ["/ecommerce/models.py", "/api/serializers.py"]}
|
44,219,776
|
AbelMRobra/EcoomerceChallenge
|
refs/heads/main
|
/ecommerce/migrations/0002_auto_20211104_1610.py
|
# Generated by Django 3.1.7 on 2021-11-04 19:10
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='orderdetail',
name='cuantity',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1)], verbose_name='Product quantity'),
),
migrations.AlterUniqueTogether(
name='orderdetail',
unique_together={('order', 'product')},
),
]
|
{"/api/serializers.py": ["/ecommerce/models.py"], "/ecommerce/admin.py": ["/ecommerce/models.py"], "/api/views.py": ["/ecommerce/models.py", "/api/serializers.py"]}
|
44,219,777
|
AbelMRobra/EcoomerceChallenge
|
refs/heads/main
|
/ecommerce/migrations/0001_initial.py
|
# Generated by Django 3.1.7 on 2021-11-04 18:17
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='Order ID')),
('date_time', models.DateTimeField(verbose_name='Order date time')),
],
options={
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.CharField(max_length=30, primary_key=True, serialize=False, unique=True, verbose_name='Product ID')),
('name', models.CharField(max_length=50, verbose_name='Product Name')),
('price', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)], verbose_name='Product Price')),
('stock', models.IntegerField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Product Stock')),
],
options={
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
'unique_together': {('name', 'price')},
},
),
migrations.CreateModel(
name='OrderDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cuantity', models.IntegerField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Product quantity')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.order', verbose_name='Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.product', verbose_name='Product')),
],
options={
'verbose_name': 'Order detail',
'verbose_name_plural': 'Order details',
},
),
]
|
{"/api/serializers.py": ["/ecommerce/models.py"], "/ecommerce/admin.py": ["/ecommerce/models.py"], "/api/views.py": ["/ecommerce/models.py", "/api/serializers.py"]}
|
44,360,120
|
lyf7115/Winglets
|
refs/heads/main
|
/testAPI.py
|
import Winglets
import json
## Prepare Data
dataDict = {}
dataArray = []
f = open('./testFile.json', 'r')
dataDict = json.loads(f.read())
for curKey in dataDict['dots'].keys():
curArrDictData = dataDict['dots'][curKey]
curKeyArr = []
for i in range(len(curArrDictData)):
curKeyArr.append([curArrDictData[i]['x'], curArrDictData[i]['y']])
dataArray.append(curKeyArr)
## Test Circle
# Winglets.drawCirlce(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'], False)
# Winglets.drawCirlce(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'], False)
# Winglets.drawCirlce(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawCirlce(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawCirlce(dataDict['dots'], ['#d7191c', '#abdda4','#2b83ba'])
## Test Winglets
# Winglets.drawWinglets(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawWinglets(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
Winglets.drawWinglets(dataDict['dots'], ['#399939', '#D5241F', '#2073AA','#EF7D1B'], False)
# Winglets.drawWinglets(dataArray, ['#d7191c', '#fdae61', '#abdda4'])
## Test CommonFate
# Winglets.drawCommonFate(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawCommonFate(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawCommonFate(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4'])
## Test Proximity
# Winglets.drawProximity(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawProximity(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
|
{"/build/lib/Winglets/__init__.py": ["/build/lib/Winglets/handler/handler_all.py"]}
|
44,360,121
|
lyf7115/Winglets
|
refs/heads/main
|
/build/lib/Winglets/handler/handler_all.py
|
# from handler.mongohandler import MONGODBHANDLER
# from handler.kdehandler import KDEHandler
# from handler.wingletstephandler import WingletsStepHandler
# from handler.drawhandler import DrawAllHandler
from .kdehandler import KDEHandler
from .wingletstephandler import WingletsStepHandler
from .drawhandler import DrawAllHandler
# myDB = MONGODBHANDLER()
# myDB.connectDB('First', 'localhost', 27017)
# myKdeHandler = KDEHandler()
# wingletsStepHandler = WingletsStepHandler()
# cv = Canvas(root, bg='white')
# class DataInputHandler():
# def __init__(self, inputType):
# self.dataInputType = inputType
# myDB = MONGODBHANDLER()
# myDB.connectDB('First', 'localhost', 27017)
class OperationHandler():
def __init__(self):
# self.curData = DataInputHandler()
self.kdeHandler = KDEHandler()
self.wingletsStepHandler = WingletsStepHandler()
self.drawHandler = DrawAllHandler()
self.drawCircleHandler, self.drawKDEHandler, self.drawMainContourHandler, self.drawContourHandler, self.drawWingletsHandler = self.drawHandler.init()
def drawWinglets(self, data):
# dots = myDB.getDots(fieldsName)['dots']
modifiedDots, clusterInfo, globalMaxDensityPoints, proximityPoints = self.kdeHandler.computeKDE(data)
self.drawHandler.getInfo(clusterInfo['clusters'], globalMaxDensityPoints, proximityPoints)
self.drawCircleHandler.drawCircleTest(clusterInfo['clusters'], globalMaxDensityPoints)
# drawCircleHandler.drawProximityCircle(proximityPoints)
# drawCircleHandler.drawCommonFateCircle(clusterInfo['clusters'])
# drawKDEHandler.drawKDEMap(clusterInfo['clusters'])
self.drawContourHandler.drawContour(clusterInfo['clusters'])
self.drawMainContourHandler.drawMainContour(clusterInfo['clusters'])
# print('*******')
curClusterInfo, mapClassIdDotIndexStroke, liMainContour = self.wingletsStepHandler.startDrawWinglets(data, clusterInfo)
self.drawMainContourHandler.drawTwoPointLine(curClusterInfo, mapClassIdDotIndexStroke)
self.drawWingletsHandler.generateWings(curClusterInfo, mapClassIdDotIndexStroke)
def endDraw(self):
self.drawHandler.endDraw()
# def drawWinglets(self):
# def outputDots(fieldsName):
# drawHandler = DrawAllHandler()
# drawCircleHandler, drawKDEHandler, drawMainContourHandler, drawContourHandler, drawWingletsHandler = drawHandler.init()
# dots = myDB.getDots(fieldsName)['dots']
# modifiedDots, clusterInfo, globalMaxDensityPoints, proximityPoints = myKdeHandler.computeKDE(dots)
# drawHandler.getInfo(clusterInfo['clusters'], globalMaxDensityPoints, proximityPoints)
# drawCircleHandler.drawCircleTest(clusterInfo['clusters'], globalMaxDensityPoints)
# # drawCircleHandler.drawProximityCircle(proximityPoints)
# # drawCircleHandler.drawCommonFateCircle(clusterInfo['clusters'])
# # drawKDEHandler.drawKDEMap(clusterInfo['clusters'])
# drawContourHandler.drawContour(clusterInfo['clusters'])
# drawMainContourHandler.drawMainContour(clusterInfo['clusters'])
# print('*******')
# curClusterInfo, mapClassIdDotIndexStroke, liMainContour = wingletsStepHandler.startDrawWinglets(dots, clusterInfo)
# drawMainContourHandler.drawTwoPointLine(curClusterInfo, mapClassIdDotIndexStroke)
# drawWingletsHandler.generateWings(curClusterInfo, mapClassIdDotIndexStroke)
# drawHandler.endDraw()
|
{"/build/lib/Winglets/__init__.py": ["/build/lib/Winglets/handler/handler_all.py"]}
|
44,360,122
|
lyf7115/Winglets
|
refs/heads/main
|
/build/lib/Winglets/__init__.py
|
import tornado.ioloop
import tornado.options
import tornado.httpserver
import tornado.web
from tornado.options import options, define
import os
# import sys
# sys.path.insert(0, './')
from .handler.handler_all import *
# from handler import handler_all as AllHandler
# from .handler.handler_all import OperationHandler
# from handler.handler_all import OperationHandler
# import handler.handler_all as AllHandler
# setting = dict(
# static_path=os.path.join(os.path.dirname(__file__), './'),
# template_path=os.path.join(os.path.dirname(__file__), './'),
# )
# url = []
# application = tornado.web.Application(
# handlers=url,
# debug=True,
# **setting
# )
# serverPort = 30001
# define("port", default=serverPort, help="run on the given port", type=int)
# def main():
# tornado.options.parse_command_line()
# http_server = tornado.httpserver.HTTPServer(application)
# print('Development server is running at http://127.0.0.1:%s/' % options.port)
# print('Quit the server with Control-C')
# tornado.ioloop.IOLoop.instance().start()
# if __name__ == '__main__':
# testName = 'dots'
# outputDots(testName)
# main()
def draw(data, dataInputType='normal Array'):
# outputDots(testName)
operationInstance = OperationHandler()
operationInstance.drawWinglets(data)
if dataInputType == 'mongodbData':
print('input type', dataInputType)
else:
print('input type', dataInputType)
operationInstance.endDraw()
# main()
|
{"/build/lib/Winglets/__init__.py": ["/build/lib/Winglets/handler/handler_all.py"]}
|
44,401,686
|
Denvernoell/excel_helper
|
refs/heads/master
|
/streamlit_helper.py
|
import streamlit as st
import pyperclip
import xlwings as xw
from pathlib import Path
# app = xw.App(visible=False, add_book=False)
app = xw.apps.active
wb = app.books.active
sht = wb.sheets.active
rng = wb.selection
ps = sht.api.PageSetup
file_path = Path(wb.fullname)
folder_path = file_path.parent
workbook_name = file_path.name
my_action = st.sidebar.radio(
'Action', ['PDF', 'Sizing', 'Page Setup', 'Tables'])
st.title('Akel Helper')
st.subheader(my_action)
st.markdown(f"""
Workbook: {workbook_name}
Sheet: {sht.name}
""")
# -------------------------- Addresses -------------------------------------
if my_action == 'Address':
row_absolute = st.checkbox('row absolute')
column_absolute = st.checkbox('column absolute')
include_sheetname = st.checkbox('include sheetname')
external = st.checkbox('external')
if st.button('Show address'):
# st.write(address_type)
my_address = rng.get_address(
row_absolute=row_absolute,
column_absolute=column_absolute,
include_sheetname=include_sheetname,
external=external
)
pyperclip.copy(my_address)
st.write(str(my_address))
# st.markdown(my_address)
# -------------------------- Pdfing -------------------------------------
if my_action == 'PDF':
import pdfing
c1, c2 = st.beta_columns(2)
with c1:
pdf_types = ['current', 'chapter', 'all']
for t in pdf_types:
if st.button(t):
app.screen_updating = False
pdfing.export_pdf(wb, quantity=t)
app.screen_updating = True
st.success(pyperclip.paste())
with c2:
# with st.beta_expander("Help"):
st.video('pdfing.webm')
if my_action == 'Page Setup':
# Print settings
import print_settings
page_type = st.radio('Page Type', ['Table', 'Figure'])
orienation = st.radio('Orienation', ['Portrait', 'Landscape'])
size = st.radio('Page Size', ['Normal', 'Extended'])
width = st.number_input('Width', min_value=1)
height = st.number_input('Height', min_value=1)
if st.button('Set Page Size'):
print_settings.margins(ps, page_type)
print_settings.print_properties(ps, orienation, size, width, height)
# -------------------------- Colors -------------------------------------
if my_action == 'colors':
if st.button('Display color'):
import colors
st.write(colors.cell_color(rng))
# -------------------------- Sizing -------------------------------------
if my_action == 'Sizing':
import sizing
a, b = st.beta_columns(2)
sizing_dict = {
'Display Height': sizing.display_height,
'Set Height': sizing.set_height,
'Display Width': sizing.display_width,
'Set Width': sizing.set_width,
}
# image_dict = {
# 'Display Height': '.\\right-arrow-forward.png',
# 'Set Height': '.\\right-arrow-forward.png',
# 'Display Width': '.\\right-arrow-forward.png',
# 'Set Width': '.\\right-arrow-forward.png',
# }
# st.image('.\\right-arrow-forward.png')
c1, c2 = st.beta_columns(2)
with c1:
for key, value in sizing_dict.items():
# st.beta_columns(len(sizing_dict))
if st.button(key):
value(rng)
with c2:
st.video('Sizing.webm')
# -------------------------- Tables -------------------------------------
if my_action == 'Tables':
import tables
import borders
options_dict = {
'Highlighter': tables.table_highligher,
'Grey borders': tables.table_grey_lines,
}
c1, c2 = st.beta_columns(2)
with c1:
for key, value in options_dict.items():
# st.beta_columns(len(sizing_dict))
if st.button(key):
value(rng)
with c2:
st.video('Tables.webm')
|
{"/streamlit_helper.py": ["/pdfing.py", "/print_settings.py", "/sizing.py", "/tables.py", "/borders.py", "/colors.py"], "/borders.py": ["/colors.py"], "/tables.py": ["/borders.py"], "/pdfing.py": ["/colors.py"]}
|
44,401,687
|
Denvernoell/excel_helper
|
refs/heads/master
|
/borders.py
|
def borderer(rng, thickness='thick', placement='all', color='Black'):
"""Adds border to selection"""
if thickness == 'thin':
weight = 2
if thickness == 'thick':
weight = 3
# 7 through 13 == xlEdgeTop,xlEdgeBottom,xlEdgeRight,xlEdgeLeft,xlInsideHorizontal,xlInsideVertical
borders = {
'all': '',
'top': 3,
'bottom': 4,
'right': 2,
'left': 1,
}
if placement == 'all':
for i in range(7, 11):
# rng.api.Borders(i).Weight=weight
# sht.range(rng.address).api.Borders(i).Weight = weight
border = rng.api.Borders(i)
else:
# sht.range(rng.address).api.Borders(borders[placement]).Weight = weight
border = rng.api.Borders(borders[placement])
border.Weight = weight
import colors
border.Color = colors.akel_single(color)
|
{"/streamlit_helper.py": ["/pdfing.py", "/print_settings.py", "/sizing.py", "/tables.py", "/borders.py", "/colors.py"], "/borders.py": ["/colors.py"], "/tables.py": ["/borders.py"], "/pdfing.py": ["/colors.py"]}
|
44,401,688
|
Denvernoell/excel_helper
|
refs/heads/master
|
/sizing.py
|
# import xlwings as xw
# wb = xw.books.active
# sht = wb.sheets.active
# rng = wb.selection
def display_height(rng):
"""Displays row height of each cell in selection."""
for row in rng.rows:
if row.api.EntireRow.Hidden == False:
row.value = row.row_height
def set_height(rng):
"""Sets row height of each row in selection equal to value of selection."""
for row in rng.rows:
if row.api.EntireRow.Hidden == False:
if row.value != None:
row.row_height = row.value
def display_width(rng):
"""Displays row width of each cell in selection."""
for column in rng.columns:
if column.api.EntireRow.Hidden == False:
column.value = column.column_width
def set_width(rng):
"""Sets column width of each column in selection equal to value of selection."""
for column in rng.columns:
if column.api.EntireRow.Hidden == False:
if column.value != None:
column.column_width = column.value
|
{"/streamlit_helper.py": ["/pdfing.py", "/print_settings.py", "/sizing.py", "/tables.py", "/borders.py", "/colors.py"], "/borders.py": ["/colors.py"], "/tables.py": ["/borders.py"], "/pdfing.py": ["/colors.py"]}
|
44,401,689
|
Denvernoell/excel_helper
|
refs/heads/master
|
/tables.py
|
def table_highligher(rng):
"""Highlights each other row in selection if the row is not hidden in light blue"""
from itertools import cycle
mycolor = cycle([None, (231, 238, 245)])
for row in rng.rows:
if row.api.EntireRow.Hidden == False:
row.color = next(mycolor)
def table_grey_lines(rng):
"""Makes grey top and bottom lines for each line in table"""
import borders
# Excludes first and last row
for row in [r for r in rng.rows][1:-1]:
if row.api.EntireRow.Hidden == False:
borders.borderer(row, thickness='thin',
placement='top', color='Grey3')
borders.borderer(row, thickness='thin',
placement='bottom', color='Grey3')
|
{"/streamlit_helper.py": ["/pdfing.py", "/print_settings.py", "/sizing.py", "/tables.py", "/borders.py", "/colors.py"], "/borders.py": ["/colors.py"], "/tables.py": ["/borders.py"], "/pdfing.py": ["/colors.py"]}
|
44,401,690
|
Denvernoell/excel_helper
|
refs/heads/master
|
/colors.py
|
from math import floor
akel_colors = {
"Yellow1": (255, 255, 204),
"Orange1": (253, 233, 217),
"Blue1": (79, 129, 189),
"Red1": (192, 80, 77),
"Grey1": (217, 217, 217),
"Grey2": (128, 128, 128),
"Grey3": (191, 191, 191),
"DarkGreen": (0, 176, 80),
"Purple": (112, 48, 160),
"Black": (0, 0, 0),
}
def cell_color(rng):
"""Returns cell color"""
return rng.color
def akel_rgb(color):
"""Returns color as RGB"""
return akel_colors[color]
def akel_single(color):
"""Returns color as single """
return to_single(akel_rgb(color))
def to_single(color):
"""Returns RGB color as single color"""
return (color[2] * 256 * 256) + (color[1] * 256) + (color[0])
def to_rgb(color):
"""Returns single color from RGB color"""
return (
floor(color % 256),
floor((color / 256) % 256),
floor((color / (256 * 256) % 256))
)
|
{"/streamlit_helper.py": ["/pdfing.py", "/print_settings.py", "/sizing.py", "/tables.py", "/borders.py", "/colors.py"], "/borders.py": ["/colors.py"], "/tables.py": ["/borders.py"], "/pdfing.py": ["/colors.py"]}
|
44,401,691
|
Denvernoell/excel_helper
|
refs/heads/master
|
/pdfing.py
|
# from PyPDF2 import PdfFileReader, PdfFileWriter
# from pathlib import Path
# import os
# import pyperclip
def create_room(file_path):
"""Takes file path and moves it to OldPdfs in order to create room to make another file with the same name"""
p = Path(file_path)
try:
os.makedirs(Path(f"{p.parent}/OldPdfs"))
except:
pass
num_list = ["1st", "2nd", "3rd"] + [f"{i}th" for i in range(4, 100)]
for i in num_list:
try:
replacement_path = (Path(f"{p.parent}\OldPdfs\{i}{p.name}"))
os.rename(p, replacement_path)
break
except:
continue
# create_room("P:\Support\Admin\Timesheets\Denver Noell_MISC\work_scripts\Akel_helper\Replacent_alternatives_071921.pdf")
def reorder_pdf(file_path, order_list):
"""Takes file path and order list and reorders the pages in the file to match the order_list"""
from PyPDF2 import PdfFileReader, PdfFileWriter
pdf_writer = PdfFileWriter()
source = PdfFileReader(str(replacement_path))
for new_page in order_list:
for page in range(source.getNumPages()):
page += 1
if page == new_page:
pdf_writer.addPage(source.getPage(page - 1))
break
with open(p, 'wb') as out:
pdf_writer.write(out)
# reorder_pdf(".pdf", [3, 6, 9, 1, 4, 7, 2, 5, 8])
def extract_pages(pdf_path, pages, pdf_name):
from PyPDF2 import PdfFileReader, PdfFileWriter
pdf_writer = PdfFileWriter()
pdf_reader = PdfFileReader(pdf_path)
for page in pages:
# -1 is to account for 0 indexing
pdf_writer.addPage(pdf_reader.getPage(page-1))
with open(f'{pdf_name}.pdf', 'wb') as fh:
pdf_writer.write(fh)
def export_pdf(wb, quantity='current'):
"""Creates PDF of all dark green sheets in workbook"""
from PyPDF2 import PdfFileReader, PdfFileWriter
# import xlwings as xw
# wb = xw.books.active
from datetime import datetime
import colors
sht = wb.sheets.active
from pathlib import Path
import subprocess
import re
file_path = Path(wb.fullname)
folder_path = file_path.parent
wb_name_with_date = file_path.stem
wb_name = re.sub(r'_\d{6}', '', wb_name_with_date)
todays_date = datetime.strftime(datetime.now(), '%m%d%y')
if quantity == 'current':
pdf_name = f"{sht.name}_{todays_date}.pdf"
my_sheets = sht.name
if quantity == 'chapter':
# [colors.to_rgb(s.api.Tab.Color) for s in wb.sheets]
chapter_starts = {
s.name: s.index for s in wb.sheets if s.api.Tab.Color == colors.akel_single("Purple")}
chapter_starts['end'] = wb.sheets[-1].index
green_sheets = {
s.name: s.index for s in wb.sheets if s.api.Tab.Color == colors.akel_single("DarkGreen")}
chapter, sht_start = [
[key, value] for key, value in chapter_starts.items() if value < sht.index][-1]
sht_end = [value for key, value in chapter_starts.items()
if value > sht_start][0]
my_sheets = [key for key, value in green_sheets.items()
if sht_start <= value <= sht_end]
pdf_name = f"{wb_name}_{chapter.strip()}_{todays_date}.pdf"
if quantity == 'all':
pdf_name = f"{wb_name}_{todays_date}.pdf"
my_sheets = [s.name for s in wb.sheets if s.api.Tab.Color ==
colors.akel_single("DarkGreen")]
pdf_path = str(folder_path / pdf_name)
create_room(pdf_path)
wb.to_pdf(pdf_path, include=my_sheets)
os.system(pdf_path)
subprocess.Popen([pdf_path], shell=True)
pyperclip.copy(pdf_path)
# quantities = {
# 'current':
# }
# export_pdf(wb, quantity='current')
def export_word():
from docx2pdf import convert
my_path = r"P:\PROJECTS\Yakima\20562 - 2020 Sewer-Storm System Master Plans\Meetings\210810 - Master Plan Workshop"
for f in os.listdir(my_path):
try:
os.mkdir(f"{my_path}\PDFs")
except:
pass
p = Path(my_path + "\\" + f)
# Documents
if p.suffix == ".docx":
word_path = p
pdf_path = f"{p.parent}\\PDFs\\{p.stem}.pdf"
# print(word_path)
# print(pdf_path)
convert(word_path, pdf_path)
|
{"/streamlit_helper.py": ["/pdfing.py", "/print_settings.py", "/sizing.py", "/tables.py", "/borders.py", "/colors.py"], "/borders.py": ["/colors.py"], "/tables.py": ["/borders.py"], "/pdfing.py": ["/colors.py"]}
|
44,401,692
|
Denvernoell/excel_helper
|
refs/heads/master
|
/print_settings.py
|
# import pyperclip
# import xlwings as xw
# from pathlib import Path
# app = xw.apps.active
# wb = app.books.active
# sht = wb.sheets.active
# rng = wb.selection
# ps = sht.api.PageSetup
# PrintType = "Table"
# PrintType = "Figure"
def InchesToPoints(inches):
return inches * 72.0
def margins(ps, PrintType):
if PrintType == "Table":
ps.LeftMargin = InchesToPoints(0.5)
ps.RightMargin = InchesToPoints(0.5)
ps.TopMargin = InchesToPoints(0.5)
ps.BottomMargin = InchesToPoints(0.5)
ps.HeaderMargin = InchesToPoints(0)
ps.FooterMargin = InchesToPoints(0)
ps.CenterHorizontally = True
ps.CenterVertically = False
if PrintType == "Figure":
ps.LeftMargin = InchesToPoints(0)
ps.RightMargin = InchesToPoints(0)
ps.TopMargin = InchesToPoints(0)
ps.BottomMargin = InchesToPoints(0)
ps.HeaderMargin = InchesToPoints(0)
ps.FooterMargin = InchesToPoints(0)
ps.CenterHorizontally = True
ps.CenterVertically = False
def print_properties(ps, orienation, size, width, height):
orientations = {
"Portrait": 1,
"Landscape": 2,
}
sizes = {
"Normal": 1,
"Extended": 3,
}
# Orientation
# 1 = Portrait
# 2 = Landscape
# PaperSize
# 8.5 x 11 = 1
# 11 x 17 = 3
ps.Orientation = orientations[orienation]
ps.PaperSize = sizes[size]
ps.FitToPagesWide = width
ps.FitToPagesTall = height
# print(ps.Orientation)
# print(ps.PaperSize)
# print(ps.FitToPagesWide)
# print(ps.FitToPagesTall)
|
{"/streamlit_helper.py": ["/pdfing.py", "/print_settings.py", "/sizing.py", "/tables.py", "/borders.py", "/colors.py"], "/borders.py": ["/colors.py"], "/tables.py": ["/borders.py"], "/pdfing.py": ["/colors.py"]}
|
44,401,693
|
Denvernoell/excel_helper
|
refs/heads/master
|
/total_review.py
|
import xlwings as xw
wb = xw.books.active
sht_names = [s for s in wb.sheets]
|
{"/streamlit_helper.py": ["/pdfing.py", "/print_settings.py", "/sizing.py", "/tables.py", "/borders.py", "/colors.py"], "/borders.py": ["/colors.py"], "/tables.py": ["/borders.py"], "/pdfing.py": ["/colors.py"]}
|
44,444,160
|
poudyalsraj/pis
|
refs/heads/main
|
/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy, reverse
from django.contrib.messages.views import SuccessMessageMixin
from .models import Staff, Office,Darbandi, Post, Address, Family, Appointment, DesiredPerson, Service, EducationalInfo, LeaveInfo, PunishmentInfo, Treatment, Document
# from .models import Question, Answer
from django.shortcuts import get_object_or_404
from django.db.models import Q
from extra_views import CreateWithInlinesView, UpdateWithInlinesView, InlineFormSetFactory, InlineFormSetView
class AddressInline(InlineFormSetFactory):
model = Address
fields = ['country', 'province', 'district', 'municipal']
factory_kwargs = {'can_delete': False}
class FamilyInline(InlineFormSetFactory):
model = Family
fields = '__all__'
factory_kwargs = {'can_delete': False}
# fields = ['country'
class AppointmentInline(InlineFormSetFactory):
model = Appointment
fields = '__all__'
factory_kwargs = {'can_delete': False}
class DesiredPersonInline(InlineFormSetFactory):
model = DesiredPerson
fields = '__all__'
factory_kwargs = {'can_delete': False}
class ServiceInline(InlineFormSetFactory):
model = Service
fields = '__all__'
factory_kwargs = {'can_delete': False}
class EducationalInfoInline(InlineFormSetFactory):
model = EducationalInfo
fields = '__all__'
factory_kwargs = {'can_delete': False, 'max_num': 3}
class DocumentInline(InlineFormSetFactory):
model = Document
fields = '__all__'
factory_kwargs = {'can_delete': False}
class StaffCreateView(SuccessMessageMixin, CreateWithInlinesView):
model = Staff
inlines = [AddressInline, FamilyInline,DesiredPersonInline, AppointmentInline, EducationalInfoInline, DocumentInline ]
fields = ['name', 'post', 'photo', 'ph_num', 'dob', 'office' ]
template_name = 'OFFICE_PIS/staff_form.html'
success_url = reverse_lazy('staff-add')
success_message = "%(name)s is added successfully......"
class StaffUpdateView(UpdateWithInlinesView):
model = Staff
inlines = [AddressInline, FamilyInline,DesiredPersonInline, AppointmentInline, EducationalInfoInline, DocumentInline ]
fields = ['name', 'post', 'photo', 'ph_num', 'dob', 'office' ]
template_name = 'OFFICE_PIS/staff_form.html'
success_url = reverse_lazy('staff-list')
success_message = "%(name)s is updated successfully......"
class StaffListView(ListView):
model = Staff
class OfficeDetailView(ListView):
model = Office
class DarbandiCreateView(CreateView):
model= Darbandi
fields = '__all__'
success_url = reverse_lazy('darbandi-view')
class DarbandiUpdateView(UpdateView):
model= Darbandi
fields = '__all__'
success_url = reverse_lazy('darbandi-view')
# there is only one darbandi so for detail view we use Listview of darbandi
class DarbandiListView(ListView):
model = Darbandi
class SearchResultView(ListView):
model = Staff
def get_queryset(self):
query = self.request.GET.get('q')
"""Q if you need to execute more complex queries
(for example, queries with OR statements), you can use Q objects(djnago.db.models.Q)
eg. Q ((name__icontains=query) | (post__icontains = query))
"""
object_list= Staff.objects.filter(
Q(name__icontains = query) |
Q(post__icontains = query)
)
return object_list
class PostCreateView(CreateView):
model= Post
fields = '__all__'
template_name = 'OFFICE_PIS/post_form.html'
success_url = reverse_lazy('post-add')
"""----------Staff Details View------------"""
class StaffDetailView(DetailView):
model= Staff
fields = '__all__'
def get_context_data(self, **kwargs):
context = super(StaffDetailView, self).get_context_data(**kwargs)
context['services'] = Service.objects.filter(staff=self.kwargs.get('pk'))
context['educations'] = EducationalInfo.objects.filter(staff=self.kwargs.get('pk'))
context['leaveinfos'] = LeaveInfo.objects.filter(staff=self.kwargs.get('pk'))
context['punishments'] = PunishmentInfo.objects.filter(staff=self.kwargs.get('pk'))
context['treatments'] = Treatment.objects.filter(staff=self.kwargs.get('pk'))
context['documents'] = Document.objects.filter(staff=self.kwargs.get('pk'))
return context
class ServiceCreateView(CreateView):
model= Service
fields = ['start_date', 'end_date', 'appointment_type', 'office', 'post']
# to automatic select the related staff object while creating service form
def form_valid(self, form):
form.instance.staff_id = self.kwargs.get('pk')
return super(ServiceCreateView, self).form_valid(form)
# to forward to related staff detail view / return to details of related staff field
def get_success_url(self):
staff_id= self.kwargs['pk']
return reverse_lazy ('staff-detail', kwargs = {'pk': staff_id})
class LeaveCreateView(CreateView):
model= LeaveInfo
fields = ['leave_type', 'Leave_days']
#automatic selects ta staff field
def form_valid(self, form):
form.instance.staff_id = self.kwargs.get('pk')
return super(LeaveCreateView, self).form_valid(form)
# to forward to related staff detail view / return to details of related staff field
def get_success_url(self):
staff_id= self.kwargs['pk']
return reverse_lazy ('staff-detail', kwargs = {'pk': staff_id})
class PunishmentCreateView(CreateView):
model= PunishmentInfo
fields = ['punishment_type', 'order_date']
#automatic selects ta staff field
def form_valid(self, form):
form.instance.staff_id = self.kwargs.get('pk')
return super(PunishmentCreateView, self).form_valid(form)
# to forward to related staff detail view / return to details of related staff field
def get_success_url(self):
staff_id= self.kwargs['pk']
return reverse_lazy ('staff-detail', kwargs = {'pk': staff_id})
class TreatmentCreateView(CreateView):
model= Treatment
fields = ['amount', 'date']
#automatic selects ta staff field
def form_valid(self, form):
form.instance.staff_id = self.kwargs.get('pk')
return super(TreatmentCreateView, self).form_valid(form)
# to forward to related staff detail view / return to details of related staff field
def get_success_url(self):
staff_id= self.kwargs['pk']
return reverse_lazy ('staff-detail', kwargs = {'pk': staff_id})
def pdfView(request, pk):
"""pk id bata data filter gareko,
for getting only one object use method :get_object_or_404(Staff, pk=pk) ,
for getting more than one object use filter i.e. : Staff.objects.filter(per_nonper='permanent')
"""
document = get_object_or_404(Document, pk=pk)
pdf_data= open (document.doc_file.path, 'rb').read()
return HttpResponse(pdf_data, content_type='application/pdf')
|
{"/views.py": ["/models.py"], "/admin.py": ["/models.py"], "/forms.py": ["/models.py"]}
|
44,444,161
|
poudyalsraj/pis
|
refs/heads/main
|
/admin.py
|
from django.contrib import admin
from django.contrib import admin
from .models import Staff, Office,Darbandi, Post, Address, Family, Appointment, DesiredPerson, Service, EducationalInfo, LeaveInfo, PunishmentInfo, Treatment
@admin.register(Staff, Office,Darbandi, Post, Address, Family, Appointment, DesiredPerson, Service, EducationalInfo, LeaveInfo, PunishmentInfo, Treatment
)
class PersonAdmin(admin.ModelAdmin):
pass
|
{"/views.py": ["/models.py"], "/admin.py": ["/models.py"], "/forms.py": ["/models.py"]}
|
44,444,162
|
poudyalsraj/pis
|
refs/heads/main
|
/urls.py
|
from django.conf.urls import url,include
from django.urls import path
from . import views
from django.views.generic.base import TemplateView
from django.contrib.auth import views as auth_views
# views as auth_views
# app_name = 'office_pis'
urlpatterns =[
path(r'staff/', views.StaffCreateView.as_view(), name ='staff-add'),
path(r'staff/update/<pk>', views.StaffUpdateView.as_view(), name ='staff-update'),
path(r'darbandi/view', views.DarbandiListView.as_view(), name ='darbandi-view'),
path(r'staff', views.StaffListView.as_view(), name ='staff-list'),
path(r'post/create', views.PostCreateView.as_view(), name ='post-add'),
path(r'service/<pk>/create', views.ServiceCreateView.as_view(), name ='service-add'),
path(r'leave/<pk>/create', views.LeaveCreateView.as_view(), name ='leave-add'),
path(r'punishment/<pk>/create', views.PunishmentCreateView.as_view(), name ='punishment-add'),
path(r'treatment/<pk>/create', views.TreatmentCreateView.as_view(), name ='treatment-add'),
path(r'staff/<pk>', views.StaffDetailView.as_view(), name ='staff-detail'),
path(r'darbandi/create', views.DarbandiCreateView.as_view(), name ='darbandi-add'),
path(r'darbandi/update/<pk>', views.DarbandiUpdateView.as_view(), name ='darbandi-update'),
path(r'staff/document/<pk>', views.pdfView, name ='staff-document-view'),
path(r'staff/search/', views.SearchResultView.as_view(), name ='search-results'),
# #user authentication
path ('login/', auth_views.LoginView.as_view(), name = 'login' ),
path (r'pis', auth_views.LogoutView.as_view(), {'next_page' : '/'}, name = 'logout' ),
path ('', TemplateView.as_view (template_name = 'home.html'), name ='home'),
]
|
{"/views.py": ["/models.py"], "/admin.py": ["/models.py"], "/forms.py": ["/models.py"]}
|
44,444,163
|
poudyalsraj/pis
|
refs/heads/main
|
/forms.py
|
from django import forms
from .models import Staff
# not needed
class Staff_form(forms.ModelForm):
class meta:
model=Blogpost
fields= ['name','address', 'position', 'post']
|
{"/views.py": ["/models.py"], "/admin.py": ["/models.py"], "/forms.py": ["/models.py"]}
|
44,444,164
|
poudyalsraj/pis
|
refs/heads/main
|
/models.py
|
from __future__ import unicode_literals
from django.db import models
from django.core.files.storage import FileSystemStorage
from datetime import datetime
from django.utils import timezone
per_nonper_choices= (
('permanent','permanent'),
('non_permanent', 'non_permanent')
)
# main PIS database is here.... """
class Office(models.Model):
name = models.CharField(max_length=255)
ph_num= models.CharField(max_length = 20)
email= models.CharField(max_length = 255)
def __str__(self):
return self.name
class Post(models.Model):
name = models.CharField(max_length=255)
level = models.CharField(max_length=255)
salary= models.IntegerField(default=0)
def __str__(self):
return self.name
class Staff(models.Model):
name = models.CharField(max_length=255)
photo=models.ImageField(blank=True, upload_to = 'pis/')
ph_num= models.CharField(max_length = 20)
dob = models.DateField(default= timezone.now)
post = models.ForeignKey(Post, on_delete=models.SET_NULL, null = True, blank=True)
office = models.ForeignKey(Office, on_delete=models.SET_NULL, null = True, blank=True)
def __str__(self):
return self.name
class Darbandi (models.Model):
office = models.ForeignKey(Office, on_delete=models.CASCADE, null=True, blank=True)
post = models.ForeignKey(Post, on_delete=models.CASCADE, null = True, blank=True)
total_post = models.IntegerField(default=1)
current_post = models.IntegerField(default=1)
class Address(models.Model):
country = models.CharField(max_length=255)
province = models.CharField(max_length=255)
district = models.CharField(max_length=255)
municipal = models.CharField(max_length=255)
staff = models.OneToOneField(Staff, on_delete=models.CASCADE, null=True, blank=True)
office = models.ForeignKey(Office, on_delete=models.CASCADE, null = True, blank=True)
def __str__(self):
return self.district
class Family(models.Model):
mother_name = models.CharField(max_length = 255)
father_name= models.CharField(max_length = 255)
staff = models.OneToOneField(Staff, on_delete=models.CASCADE)
def __str__(self):
return self.father_name
class DesiredPerson (models.Model):
name = models.CharField(max_length=255)
relation= models.CharField(max_length=255)
staff = models.OneToOneField(Staff, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Appointment (models.Model):
staff = models.OneToOneField(Staff, on_delete=models.CASCADE )
office = models.ForeignKey(Office, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
appointment_date = models.DateField(default= timezone.now)
class Service (models.Model):
start_date = models.DateField(default= timezone.now)
end_date = models.DateField(default= timezone.now)
appointment_type = models.CharField(max_length=255, default='new')
staff = models.ForeignKey(Staff, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
office = models.ForeignKey(Office, on_delete=models.CASCADE)
def __str__(self):
return self.appointment_type
class EducationalInfo (models.Model):
Degree = models.CharField(max_length=255)
address = models.CharField(max_length=255)
institue_name = models.CharField(max_length=255)
staff = models.ForeignKey(Staff, on_delete=models.CASCADE)
def __str__(self):
return self.Degree
class LeaveInfo (models.Model):
leave_type = models.CharField(max_length = 255)
Leave_days = models.IntegerField(default = 0)
staff = models.ForeignKey(Staff, on_delete = models.CASCADE)
def __str__(self):
return self.leave_type
class PunishmentInfo (models.Model):
punishment_type = models.CharField(max_length=255)
order_date = models.DateField(default= timezone.now)
staff = models.ForeignKey(Staff, on_delete=models.CASCADE)
def __str__(self):
return self.punishment_type
class Treatment (models.Model):
amount = models.IntegerField (default=0)
date = models.DateField (default=timezone.now)
staff = models.ForeignKey(Staff, on_delete=models.CASCADE)
class Document (models.Model):
name = models.CharField(max_length=255)
doc_file = models.FileField(upload_to = 'documents/', blank= True)
staff = models.ForeignKey(Staff, on_delete=models.CASCADE)
def __str__(self):
return self.name
|
{"/views.py": ["/models.py"], "/admin.py": ["/models.py"], "/forms.py": ["/models.py"]}
|
44,444,165
|
poudyalsraj/pis
|
refs/heads/main
|
/apps.py
|
from django.apps import AppConfig
class OfficePisConfig(AppConfig):
name = 'OFFICE_PIS'
|
{"/views.py": ["/models.py"], "/admin.py": ["/models.py"], "/forms.py": ["/models.py"]}
|
44,540,490
|
risabhRizz/goodwill-barter
|
refs/heads/master
|
/products/models.py
|
from django.db import models
from django.contrib.auth.models import User
class Product(models.Model):
product_id = models.CharField(max_length=100)
username = models.ForeignKey(User, on_delete=models.CASCADE)
photo_id = models.CharField(max_length=64)
available = models.BooleanField()
description = models.TextField()
|
{"/users/views.py": ["/users/models.py", "/products/models.py"], "/products/urls.py": ["/products/views.py"], "/users/models.py": ["/products/models.py"], "/products/views.py": ["/products/models.py"]}
|
44,540,491
|
risabhRizz/goodwill-barter
|
refs/heads/master
|
/products/views.py
|
from django.shortcuts import render
def home(request):
context = {
'goods': ['Harry Potter and the Sorcerer\'s Stone', 'Silence of the lambs'],
}
return render(request, 'products/home.html', context)
|
{"/users/views.py": ["/users/models.py", "/products/models.py"], "/products/urls.py": ["/products/views.py"], "/users/models.py": ["/products/models.py"], "/products/views.py": ["/products/models.py"]}
|
44,623,829
|
gabrielDonnantuoni/Discord-Music-Bot
|
refs/heads/master
|
/main.py
|
print('rodou')
|
{"/playground.py": ["/ytdl/YTDLSource.py", "/bot/Queue.py", "/database/main.py"], "/bot/Player.py": ["/ytdl/YTDLSource.py", "/ytdl/TTS.py", "/bot/Queue.py", "/database/main.py"], "/bot/Queue.py": ["/ytdl/YTDLSource.py"], "/ytdl/YTDLSource.py": ["/ytdl/config.py", "/env.py"], "/main.py": ["/flask_app.py", "/bot/main.py"], "/bot/main.py": ["/env.py", "/bot/Player.py"], "/ytdl/TTS.py": ["/ytdl/config.py"]}
|
44,638,457
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/users/routes.py
|
from flask import Blueprint, request, redirect, url_for, render_template
from flask_login import current_user, login_user, logout_user
from breadlog.users.forms import RegisterForm, LoginForm
from breadlog.models import User
from breadlog.extensions import bcrypt, db
users = Blueprint('users', __name__)
@users.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect('/recipes')
form = RegisterForm()
if request.method == 'POST' and not form.validate():
errors = []
for field, error in form.errors.items():
for err in error:
errors.append([field, err])
return ' '.join([str(i) for i in errors])
if form.validate_on_submit():
hashed_pw = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
new_user = User(form.name.data, form.email.data, hashed_pw)
try:
db.session.add(new_user)
db.session.commit()
return redirect(url_for('users.login'))
except:
return 'There was an error creating user'
return render_template('register.html', form=form)
@users.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect('/recipes') # TODO: replace with something that makes sense
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
# Check if user exists and the password matches
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user)
return redirect(url_for('recipes.get_recipes'))
else:
return 'Login unsuccessful' # TODO: change to a flash message
return render_template('login.html', form=form)
@users.route('/logout')
def logout():
logout_user()
return redirect('/')
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,638,458
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/__init__.py
|
from flask import Flask
from breadlog.config import Config
from .extensions import db, login_manager, bcrypt
from .commands import create_tables, seed_db
def create_app(config=Config):
app = Flask(__name__)
app.config.from_object(config)
db.init_app(app)
login_manager.init_app(app)
login_manager.login_view = '__users.login__'
bcrypt.init_app(app)
from breadlog.users.routes import users
from breadlog.recipes.routes import recipes
from breadlog.about.routes import about
from breadlog.errors.handlers import errors
app.register_blueprint(users)
app.register_blueprint(recipes)
app.register_blueprint(about)
app.register_blueprint(errors)
app.cli.add_command(create_tables)
app.cli.add_command(seed_db)
return app
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,638,459
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/recipes/routes.py
|
from flask import render_template, url_for, request, redirect, jsonify, make_response, Blueprint
from flask_login import current_user
from breadlog.models import Recipe, Step, User, StepIngredient
from breadlog.recipes.forms import RecipeForm
from breadlog.recipes.utils import make_err_response, sum_recipe_ingredients
from sqlalchemy import or_
from sqlalchemy.exc import SQLAlchemyError
from breadlog.extensions import db
recipes = Blueprint('recipes', __name__)
@recipes.route('/recipes', methods=['GET', 'POST'])
def get_recipes():
form = RecipeForm()
user_id = current_user.id
default_user = User.query.filter_by(name='Sample').first()
recipe_list = Recipe.query.filter(or_(Recipe.user_id == user_id, Recipe.user_id == default_user.id)).order_by(Recipe.created_at).all()
if request.method == 'POST':
if form.validate_on_submit():
recipe_name = form.recipe_name.data
new_recipe = Recipe(name=recipe_name, user_id=user_id)
# recipe_id = new_recipe.id
try:
db.session.add(new_recipe)
db.session.commit()
return redirect(url_for('recipes.edit_recipe', recipe_id=new_recipe.id))
except SQLAlchemyError as e:
return make_err_response(e)
if len(recipe_list) == 0:
return render_template('recipes.html', form=form, recipes=[],
hours=0, minutes=0, ingredients={})
else:
# Time calculation for first recipe displayed if recipes exist
hours = recipe_list[0].total_minutes // 60
minutes = recipe_list[0].total_minutes % 60
ingredients = sum_recipe_ingredients(recipe_list[0])
return render_template('recipes.html', form=form, recipes=recipe_list,
hours=hours, minutes=minutes,ingredients=ingredients)
@recipes.route('/recipes/edit/<int:recipe_id>', methods=['GET'])
def edit_recipe(recipe_id):
recipe = Recipe.query.get_or_404(recipe_id)
ingredients = sum_recipe_ingredients(recipe)
return render_template('edit_recipe.html', recipe=recipe, ingredients=ingredients)
@recipes.route('/recipes/<int:recipe_id>/add_step', methods=['POST'])
def add_step(recipe_id):
req = request.get_json()
recipe = Recipe.query.get_or_404(recipe_id)
hours = req['hours']
minutes = req['minutes']
total_steps = len(recipe.steps) + 1
new_step = Step(step_number=total_steps,
hours=hours,
minutes=minutes,
notes=req['notes'],
recipe_id=recipe_id)
recipe.total_minutes += int(hours) * 60 + int(minutes)
recipe.total_steps = total_steps
try:
db.session.add(new_step)
db.session.commit()
db.session.refresh(new_step)
return make_response(jsonify({
'step_number': total_steps,
'step_id': new_step.id,
'recipe_id': recipe.id,
'minutes': new_step.minutes,
'hours': new_step.hours,
'notes': new_step.notes,
'item': 'step',
'action': 'add'
}), 200)
except SQLAlchemyError as e:
return f'Error {e.orig} Parameters {e.params}'
@recipes.route('/delete_step/<int:step_id>', methods=['POST'])
def delete_step(step_id):
step_to_delete = Step.query.get_or_404(step_id)
step_id = step_to_delete.id
# recipe_id = step_to_delete.recipe.id
# Time calculation
if step_to_delete.minutes > 0 or step_to_delete.hours > 0:
step_to_delete.recipe.total_minutes -= step_to_delete.hours * 60 + step_to_delete.minutes
step_to_delete.recipe.total_steps = len(step_to_delete.recipe.steps) - 1
step_number = step_to_delete.step_number
# shift step numbers after step up
for step in step_to_delete.recipe.steps:
if step.step_number > step_number:
step.step_number -= 1
try:
db.session.delete(step_to_delete)
db.session.commit()
res = make_response(jsonify({
'step_id': step_id,
'step_number': step_number,
'item': 'step',
'action': 'delete'}), 200)
return res
except SQLAlchemyError as e:
return f'Error {e.orig} Parameters {e.params}'
@recipes.route('/move_step_up/<int:step_id>', methods=['POST'])
def move_step_up(step_id):
step_to_move = Step.query.get_or_404(step_id)
new_step_number = step_to_move.step_number - 1
recipe = step_to_move.recipe
step_to_increment = [step for step in recipe.steps if step.step_number == new_step_number][0]
step_to_increment.step_number += 1
step_to_move.step_number = new_step_number
try:
db.session.commit()
resdata = {
'item': 'step',
'action': 'moveup',
'step_id': step_to_move.id,
'shifted_step_id': step_to_increment.id
}
return make_response(jsonify(resdata), 200)
except SQLAlchemyError as e:
return make_err_response(e)
@recipes.route('/move_step_down/<int:step_id>', methods=['POST'])
def move_step_down(step_id):
step_to_move = Step.query.get_or_404(step_id)
new_step_number = step_to_move.step_number + 1
recipe = step_to_move.recipe
step_to_decrement = [step for step in recipe.steps if step.step_number == new_step_number][0]
step_to_decrement.step_number -= 1
step_to_move.step_number = new_step_number
try:
db.session.commit()
resdata = {
'item': 'step',
'action': 'movedown',
'shifted_step_id': step_to_decrement.id,
'step_id': step_to_move.id
}
return make_response(jsonify(resdata), 200)
except SQLAlchemyError as e:
return make_err_response(e)
@recipes.route('/delete_recipe/<int:recipe_id>', methods=['GET', 'POST'])
def delete_recipe(recipe_id):
recipe_to_delete = Recipe.query.get_or_404(recipe_id)
try:
db.session.delete(recipe_to_delete)
db.session.commit()
return redirect('/recipes')
except SQLAlchemyError as e:
return make_err_response(e)
# Add ingredient to step
@recipes.route('/step/<int:step_id>/add_step_ingredient', methods=['POST'])
def add_step_ingredient(step_id):
req = request.get_json()
ingredient = req['ingredient']
weight = req['weight']
new_step_ingredient = StepIngredient(step_id=step_id, ingredient=ingredient, weight=weight)
try:
db.session.add(new_step_ingredient)
db.session.commit()
db.session.refresh(new_step_ingredient)
resdata = {
'step_ingredient_id': new_step_ingredient.id,
'step_id': new_step_ingredient.step_id,
'ingredient': ingredient,
'weight': weight,
'action': 'add',
'item': 'ingredient'
}
res = make_response(jsonify(resdata), 200)
return res
except SQLAlchemyError as e:
return make_err_response(e)
@recipes.route('/step/<int:step_id>/step_ingredient/<int:step_ingredient_id>/delete', methods=['POST'])
def delete_step_ingredient(step_id, step_ingredient_id):
step_ingredient = StepIngredient.query.get_or_404(step_ingredient_id)
try:
db.session.delete(step_ingredient)
db.session.commit()
resdata = {
'step_ingredient_id': step_ingredient.id,
'action': 'delete',
'item': 'ingredient'
}
return make_response(jsonify(resdata), 200)
except SQLAlchemyError as e:
return make_err_response(e)
# Gets all recipes
@recipes.route('/recipe', methods=['GET'])
def recipe():
recipes = Recipe.query.all()
return jsonify(recipes)
# Get a recipe by ID
@recipes.route('/recipe/id/<int:recipe_id>', methods=['GET'])
def recipe_id(recipe_id):
recipe = Recipe.query.get_or_404(recipe_id)
return jsonify(recipe)
@recipes.route('/recipe/id/<int:recipe_id>/step/<int:step_id>', methods=['GET'])
def step_id(recipe_id, step_id):
steps = Recipe.query.get_or_404(recipe_id).steps
step = [step for step in steps if step.id == step_id][0]
return jsonify(step)
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,638,460
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/commands.py
|
import click
from flask.cli import with_appcontext
from .extensions import db, bcrypt
from .models import User, Recipe, Step, StepIngredient
@click.command(name='create_tables')
@with_appcontext
def create_tables():
db.create_all()
@click.command(name='seed_db')
@with_appcontext
def seed_db():
hashed_pw = bcrypt.generate_password_hash('pw123').decode('utf-8')
sample_user = User('Sample', 'sample@domain.com', hashed_pw)
db.session.add(sample_user)
db.session.commit()
sample_recipe = Recipe('Sample Recipe', sample_user.id)
db.session.add(sample_recipe)
db.session.commit()
recipe_id = sample_recipe.id
step1_notes = '''
Mix the levain. Add 50g of 50% hydration mature starter (50/50 flour water ratio) to a bowl.
Add an additional 25g bread flour and 25g water. Mix until all ingredients are incorporated and
let rest overnight for 12 hours.
'''
step1 = Step(1, 12, 0, step1_notes, recipe_id)
db.session.add(step1)
db.session.commit()
sample_recipe.total_minutes = 12 * 60
step1_flour = StepIngredient(step_id=step1.id, ingredient='BREAD FLOUR', weight=50)
step1_water = StepIngredient(step_id=step1.id, ingredient='WATER', weight=50)
db.session.add_all([step1_flour, step1_water])
db.session.commit()
step2_notes = '''
Combine 800g bread flour with 100g whole wheat flour in a large bowl. Add 585g water and mix until a shaggy dough forms.
Add the levain from step 1. Squeeze and pinch while mixing to combine doughs. Allow to autolyse for 20 minutes. After autolysing, the dough should appear smoother and plumper as the flour has
had time absorb the water.
'''
step2 = Step(2,0,20, step2_notes, recipe_id)
db.session.add(step2)
db.session.commit()
sample_recipe.total_minutes += 20
step2_flour = StepIngredient(step_id=step2.id, ingredient='BREAD FLOUR', weight=800)
step2_wwflour = StepIngredient(step_id=step2.id, ingredient='WHOLE WHEAT FLOUR', weight=100)
step2_water = StepIngredient(step_id=step2.id, ingredient='WATER', weight=585)
db.session.add_all([step2_flour, step2_wwflour, step2_water])
db.session.commit()
step3_notes = '''
Measure 20g salt and mix with 30g water to dissolve.
Add mixture to bowl and combine completely with the dough.
'''
step3 = Step(3, 0,10, step3_notes, recipe_id)
db.session.add(step3)
db.session.commit()
sample_recipe.total_minutes += 10
step3_salt = StepIngredient(step_id=step3.id, ingredient='SALT', weight=20)
step3_water = StepIngredient(step_id=step3.id, ingredient='WATER', weight=30)
db.session.add_all([step3_salt, step3_water])
db.session.commit()
step4_notes = '''
Knead the dough using a slap and fold technique. Using the table as an anchoring point for the dough,
pull the dough towards yourself and fold back over.
Flip the dough 90 degrees, anchor on the table, and stretch again.
Take care to not split the dough.
Knead for 30 minutes or until the dough is smooth and highly elastic.
'''
step4 = Step(4, 0, 30, step4_notes, recipe_id)
db.session.add(step4)
db.session.commit()
sample_recipe.total_minutes += 30
step5_notes = '''
Begin bulk fermentation. Cover dough and allow it to rise in a warm place.
After 30 minutes, perform first stretch and fold. Using a hand to scoop under the dough,
stretch the dough gently up, shaking carefully to allow further stretching. Do not break the dough.
Fold the stretched portion of the dough over the top of the remaining dough.
Turn the bowl 90 degrees and repeat the stretch and fold process four times.
'''
step5 = Step(5, 0, 30, step5_notes, recipe_id)
db.session.add(step5)
db.session.commit()
sample_recipe.total_minutes += 30
step6_notes = 'Rest for another 30 minutes and repeat the stretch and fold.'
step6 = Step(6, 0, 30, step6_notes, recipe_id)
db.session.add(step6)
db.session.commit()
sample_recipe.total_minutes += 30
step7_notes = 'Rest for another hour and do one last stretch and fold'
step7 = Step(7, 1, 0, step7_notes, recipe_id)
db.session.add(step7)
db.session.commit()
sample_recipe.total_minutes += 60
step8_notes = 'Allow the dough to rest for 2 hours'
step8 = Step(8, 2, 0, step8_notes, recipe_id)
db.session.add(step7)
db.session.commit()
sample_recipe.total_minutes += 120
step9_notes = '''
Divide the dough into two parts. Lightly shape each part into a round ball.
Let the dough rest for 20 minutes to relax the gluten for final shaping.
'''
step9 = Step(9, 0, 20, step9_notes, recipe_id)
db.session.add(step9)
db.session.commit()
sample_recipe.total_minutes += 20
step10_notes = '''Pull the dough balls tightly into boules or batards.
Use a table or a bench scraper to help create tension in the rounds.
Place seam side up in a proofing basket dusted with rice flour to prevent sticking.
'''
step10 = Step(10, 0, 10, step10_notes, recipe_id)
db.session.add(step10)
db.session.commit()
sample_recipe.total_minutes += 10
step11_notes = '''Place proofing baskets covered in the fridge for 12 hours overnight.
The next morning, take the baskets out and allow them to come to room temperature as the oven is heated.
Score the top of the loaves to allow the bread the release steam in the oven
'''
step11 = Step(11, 12, 30, step11_notes, recipe_id)
db.session.add(step11)
db.session.commit()
sample_recipe.total_minutes += 12 * 60 + 30
step12_notes = '''Preheat a cast iron combo cooker at 485F and bake for rounds covered for 20 minutes.
Remove the cover and lower the heat to 425F. Bake for another 20-30 minutes or until golden brown. Let loaves cool
for at least 1 hour prior to slicing.
'''
step12 = Step(12, 0, 45, step12_notes, recipe_id)
db.session.add(step12)
db.session.commit()
sample_recipe.total_minutes += 45
sample_recipe.total_steps = 12
db.session.commit()
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,638,461
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/recipes/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, Length
class RecipeForm(FlaskForm):
recipe_name = StringField('New Recipe',
render_kw={"placeholder": "Recipe name"},
validators=[DataRequired(), Length(min=1, max=50)])
submit = SubmitField('Create')
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,638,462
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/users/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import DataRequired, EqualTo
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(message='Email is required')])
password = PasswordField('Password', validators=[DataRequired(message='Password is required')])
submit = SubmitField('Log In')
class RegisterForm(FlaskForm):
name = StringField('Username', validators=[DataRequired(message="Name is required")])
email = StringField('Email', validators=[DataRequired(message='Email not validated')])
password = PasswordField('Password', validators=[DataRequired(message='password not validated')])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(message='confirm password not validated'),
EqualTo('password', message='confirm passwords not the same')])
submit = SubmitField('Create Account')
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,638,463
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/about/routes.py
|
from flask import render_template, Blueprint
about = Blueprint('about', __name__)
@about.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,638,464
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/models.py
|
from datetime import datetime
from sqlalchemy.dialects.postgresql.base import UUID
from .extensions import db, login_manager
from flask_login import UserMixin
from dataclasses import dataclass
import uuid
db.UUID = UUID
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@dataclass # Decorator to allow model data to be JSON serializable
class Recipe(db.Model):
id: int
name: str
total_steps: int
total_minutes: int
is_public: bool
created_at: datetime
user_id: str
steps: list
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), nullable=False)
total_steps = db.Column(db.Integer, nullable=False)
total_minutes = db.Column(db.Integer, nullable=True)
is_public = db.Column(db.Boolean, nullable=False, default=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(UUID(as_uuid=True), db.ForeignKey('user.id'),
nullable=False) # User does not have to be logged in
steps = db.relationship('Step', backref='recipe', lazy=False, order_by='Step.step_number', cascade="all, delete-orphan")
def __init__(self, name, user_id):
self.name = name
self.total_steps = 0
self.total_minutes = 0
self.user_id = user_id
@dataclass
class Step(db.Model):
id: int
recipe_id: int
step_number: int
created_at: datetime
hours: int
minutes: int
notes: str
ingredients: list
id = db.Column(db.Integer, primary_key=True)
recipe_id = db.Column(db.Integer, db.ForeignKey('recipe.id'), nullable=False)
step_number = db.Column(db.Integer, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
hours = db.Column(db.Integer, nullable=True, default=0)
minutes = db.Column(db.Integer, nullable=False)
notes = db.Column(db.Text, nullable=True)
ingredients = db.relationship('StepIngredient', backref='step',
lazy=False, order_by='StepIngredient.weight', cascade="all, delete-orphan")
def __init__(self, step_number, hours, minutes, notes, recipe_id):
self.step_number = step_number
self.hours = hours
self.minutes = minutes
self.notes = notes
self.recipe_id = recipe_id
class Ingredient(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
name = db.Column(db.String(120), nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
def __init__(self, name):
self.name = name
def __repr__(self):
return f'Ingredient({self.name})'
@dataclass
class StepIngredient(db.Model):
id: int
ingredient: str
step_id: int
weight: float
created_at: datetime
id = db.Column(db.Integer, primary_key=True)
ingredient = db.Column(db.String(256), nullable=True)
step_id = db.Column(db.Integer, db.ForeignKey('step.id'), nullable=False)
weight = db.Column(db.Float, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
def __init__(self, step_id, ingredient, weight):
self.step_id = step_id
self.ingredient = ingredient
self.weight = weight
class User(db.Model, UserMixin):
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
name = db.Column(db.String(120), nullable=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
recipes = db.relationship('Recipe', backref='author', lazy=True, order_by='desc(Recipe.created_at)')
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
def __repr__(self):
return f'User {self.email} '
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,638,465
|
kailin-lu/BreadLog
|
refs/heads/main
|
/breadlog/recipes/utils.py
|
from flask import make_response, jsonify
from collections import defaultdict
from sqlalchemy.exc import SQLAlchemyError
def make_err_response(e):
"""Helper to return error type and route parameters"""
err = {
'orig': str(e.orig),
'params': str(e.params)
}
return make_response(jsonify(err), 404)
# sum ingredient totals in recipe
def sum_recipe_ingredients(recipe):
ingredient_list = defaultdict(list)
flour_weight = 0
for step in recipe.steps:
for ingr in step.ingredients:
if ingr.ingredient in ingredient_list.keys():
ingredient_list[ingr.ingredient][0] += ingr.weight
else:
ingredient_list[ingr.ingredient].append(ingr.weight)
# if ingredient name contains flour add to flour weight
if 'FLOUR' in ingr.ingredient:
flour_weight += ingr.weight
for ingr, val in ingredient_list.items():
if flour_weight != 0:
ingredient_list[ingr].append(round(val[0]*100 / flour_weight,1))
else:
ingredient_list[ingr].append(0)
return ingredient_list
|
{"/breadlog/routes.py": ["/breadlog/__init__.py", "/breadlog/models.py", "/breadlog/forms.py"], "/breadlog/models.py": ["/breadlog/__init__.py"], "/breadlog/users/routes.py": ["/breadlog/users/forms.py", "/breadlog/models.py"], "/breadlog/__init__.py": ["/breadlog/commands.py", "/breadlog/users/routes.py", "/breadlog/recipes/routes.py", "/breadlog/about/routes.py"], "/breadlog/recipes/routes.py": ["/breadlog/models.py", "/breadlog/recipes/forms.py", "/breadlog/recipes/utils.py"], "/breadlog/commands.py": ["/breadlog/models.py"]}
|
44,713,836
|
chetangartoula/farmersnepal
|
refs/heads/master
|
/blog/models.py
|
from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=500,null=True)
discription = models.TextField(max_length=2000, null=True)
post_date =models.DateField(auto_now_add=True,null=True)
def __str__(self):
return self.title
class Comments(models.Model):
blog= models.ForeignKey(Blog,max_length=1500, on_delete=models.CASCADE,null=True)
comments = models.TextField(max_length=1500, null=True)
status = models.BooleanField(default=False)
date_post = models.DateField(auto_now_add=True,null=True)
def __str__(self):
return self.blog.title
|
{"/farmers/admin.py": ["/farmers/models.py"], "/farmers/forms.py": ["/farmers/models.py"], "/farmers/views.py": ["/farmers/models.py", "/farmers/forms.py"]}
|
44,713,837
|
chetangartoula/farmersnepal
|
refs/heads/master
|
/market/models.py
|
from django.db import models
from phone_field import PhoneField
from django.contrib.auth.models import User
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=250, null=True, unique=True)
def __str__(self):
return self.name
class Vendor(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=250, null=True, unique=True)
location = models.CharField(max_length=250, null=True, blank=True)
phone = PhoneField(blank=True, help_text='Contact phone number')
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=250, null=True)
vendor = models.ForeignKey(Vendor, on_delete=models.CASCADE, null=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE, null=True)
price = models.IntegerField(null=True)
description = models.CharField(max_length=250, null=True)
image = models.ImageField(upload_to='products/', null=True)
originalprice = models.IntegerField(null=True, blank=True)
quantity = models.IntegerField(null=True, default=0)
sales = models.IntegerField(null=True, default=0)
post_date = models.DateTimeField(auto_now_add=True, null=True)
unit = models.CharField(max_length=250, null=True)
def __str__(self):
return self.name
class Order(models.Model):
order_by = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
order_date = models.DateTimeField(auto_now_add=True, null=True)
status = models.BooleanField(default=False, null=True, blank=False)
phone = PhoneField(blank=True, help_text='Contact phone number')
location = models.CharField(max_length=250, null=True)
def __str__(self):
return self.order_by.first_name
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE, null=True)
quantity = models.IntegerField(null=True)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
deliveryCharge = models.IntegerField(null=True, blank=True)
total = models.IntegerField(null=True)
def __str__(self):
return self.product.name
@property
def order_by(self):
name = self.order.order_by
return name
@property
def order_date(self):
return self.order.order_date
|
{"/farmers/admin.py": ["/farmers/models.py"], "/farmers/forms.py": ["/farmers/models.py"], "/farmers/views.py": ["/farmers/models.py", "/farmers/forms.py"]}
|
44,720,334
|
jameskennelly4/cwruclasses-ratemyprofessor-website
|
refs/heads/main
|
/models.py
|
from django.db import models
import csv
class Class(models.Model):
title = models.CharField(max_length=50)
name = models.CharField(max_length=50)
description = models.CharField(max_length=100000)
class Professor(models.Model):
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=50)
link = models.CharField(max_length=2000)
rating = models.CharField(max_length=50)
class_taught1 = models.CharField(max_length=50, default='')
class_taught2 = models.CharField(max_length=50, default='')
class_taught3 = models.CharField(max_length=50, default='')
class_taught4 = models.CharField(max_length=50, default='')
class_taught5 = models.CharField(max_length=50, default='')
class_taught6 = models.CharField(max_length=50, default='')
class Review(models.Model):
name = models.CharField(max_length=50)
class_reviewed = models.CharField(max_length=50)
quality = models.FloatField(default=0)
difficulty = models.FloatField(default=0)
date = models.CharField(max_length=50)
review_content = models.CharField(max_length=100000)
with open("/Users/jameskennelly/Desktop/mysite/Classes.csv") as f:
reader = csv.reader(f)
for row in reader:
_, created = Class.objects.get_or_create(
title=row[0],
name=row[1],
description=row[2],
)
with open("/Users/jameskennelly/Desktop/mysite/Professors.csv") as f:
reader = csv.reader(f)
for row in reader:
_, created = Professor.objects.get_or_create(
firstname=row[0],
lastname=row[1],
link=row[2],
rating=row[3],
class_taught1=row[4],
class_taught2=row[5],
class_taught3=row[6],
class_taught4=row[7],
class_taught5=row[8],
class_taught6=row[9],
)
with open("/Users/jameskennelly/Desktop/mysite/Reviews.csv") as f:
reader = csv.reader(f)
for row in reader:
_, created = Review.objects.get_or_create(
name=row[0],
class_reviewed=row[1],
quality=row[2],
difficulty=row[3],
date=row[4],
review_content=row[5],
)
|
{"/mysite/myapp/views.py": ["/mysite/myapp/models.py"]}
|
44,729,239
|
HobbesE/Study-Buddy-Finder
|
refs/heads/main
|
/seed_database.py
|
"""Script to seed Study Buddy database."""
import os
from random import choice, randint
#from datetime import datetime
import crud
import model
import server
os.system('dropdb hackbrighter')
os.system('createdb hackbrighter')
model.connect_to_db(server.app)
model.db.create_all()
student_data= [
{
"username":"JBland07",
"password": "megajess",
"first_name": "Jessica",
"last_name": "Blandley",
"email":"jbland07@yahoo.com",
"icon_url":"static/Creative-Tail-Animal-squirrel.svg.png",
"cohort_name":"Ada",
"cohort_year":"2018",
},
{
"username":"japanpanda",
"password": "yum",
"first_name": "Cassity",
"last_name": "Jefferson",
"email":"dgillespie2@gmail.com",
"icon_url":"static/Creative-Tail-Animal-tiger.svg.png",
"cohort_name":"Katherine",
"cohort_year":"2020",
},
{
"username":"notthefool",
"password": "mamaraised",
"first_name": "d",
"last_name": "Gillespie",
"email":"dgillespie@gmail.com",
"icon_url":"static/Creative-Tail-Animal-fox.svg.png",
"cohort_name":"Ada",
"cohort_year":"2021",
},
{
"username":"notthefool2",
"password": "duplicate_account",
"first_name": "deborah",
"last_name": "Gillespie",
"email":"notthefool@gmail.com",
"icon_url":"static/Creative-Tail-Animal-elephant.svg.png",
"cohort_name":"Ada",
"cohort_year":"2021",
},
{
"username":"mamamaya",
"password": "mamamaya",
"first_name": "Maya",
"last_name": "Lou",
"email":"maya@gmail.com",
"icon_url":"static/Creative-Tail-Animal-duck.svg.png",
"cohort_name":"Katherine",
"cohort_year":"2019",
},
{
"username":"susieq",
"password": "86theboyz",
"first_name": "Susan",
"last_name": "Wesolek",
"email":"susieq86@gmail.com",
"icon_url":"static/Creative-Tail-Animal-bee.svg.png",
"cohort_name":"Ada",
"cohort_year":"2020",
},
{
"username":"balloonicorn",
"password": "omgIHATEchoosingpasswords!!!",
"first_name": "Balloonicorn",
"last_name": "The Unicorn",
"email":"balloonicorn@hackbright.com",
"icon_url":"static/Creative-Tail-Animal-zebra.svg.png",
"cohort_name":"Katherine",
"cohort_year":"2021",
}]
students_in_db = []
for student in student_data:
username, password, first_name, last_name, email, cohort_name, cohort_year, icon_url = (
student["username"],
student["password"],
student['first_name'],
student['last_name'],
student['email'],
student['cohort_name'],
student['cohort_year'],
student['icon_url']
)
db_student = crud.create_student(username, password, first_name, last_name, email, cohort_name, cohort_year, icon_url)
students_in_db.append(db_student)
# sam = Student(first_name = 'Sam' last_name= 'Bradley', email= 'testy_sam@test.test', password='testypassword', icon_url='testy icon', cohort='Test Cohort 2021', location='Topeka, KS')
# kevyn = Student(student_name = 'Kevyn Bradley', email= 'testy_kevyn@test.test', password='testypassword', icon_url='testy icon', cohort='Test Cohort 2022', location='Auburn, KS')
# maya = Student(student_name = 'Testy Name', email= 'testy_maya@test.test', password='testypassword', icon_url='testy icon', cohort='Test Cohort 2023', location='Kansas City, MO')
# gillespie = Student(student_name = 'Testy Name', email= 'testy_gillespie@test.test', password='testypassword', icon_url='testy icon', cohort='Test Cohort 2024', location='Test City, OK')
# test_attendence = Attendence()
# test_session = StudySession(proposed_time = 'High noon')
# test_topic = Topic(topic_description='Test Topic numero uno-- the first topic we will test!', topic_title='Test 1')
|
{"/seed_database.py": ["/crud.py", "/model.py", "/server.py"], "/server.py": ["/model.py", "/crud.py"], "/crud.py": ["/model.py"]}
|
44,729,240
|
HobbesE/Study-Buddy-Finder
|
refs/heads/main
|
/model.py
|
"""Models for Study Buddy Finder app"""
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, LoginManager, login_user, login_required
import datetime
db = SQLAlchemy()
def connect_to_db(flask_app, db_uri='postgresql:///hackbrighter', echo=True):
flask_app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
flask_app.config['SQLALCHEMY_ECHO'] = echo
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
flask_app.config['REMEMBER-COOKIE_DURATION'] = datetime.timedelta(days=30)
db.app = flask_app
db.init_app(flask_app)
# db.create_all()
print('Connected to the db!')
class Student(db.Model):
"""A student user."""
__tablename__ = 'students'
# def __init__(self, username, password, first_name, last_name, email, cohort_name, cohort_year):
# self.username = username
# self.password = password
# self.first_name = first_name
# self.last_name = last_name
# self.email = email
# self.icon_url = icon_url
# self.cohort_name = cohort_name
# self.cohort_year = cohort_year
#^ Normally, this init would be necessary; in this project, SQLAlchemy does it for me.
user_id= db.Column(db.Integer,
autoincrement=True,
primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
password = db.Column(db.String(20), nullable=False)
first_name = db.Column(db.String(20), nullable=False)
last_name = db.Column(db.String(20))
email = db.Column(db.String, unique=True, nullable=False)
icon_url = db.Column(db.String)
cohort_name = db.Column(db.String, nullable=False)
cohort_year = db.Column(db.String, nullable=False)
# sessions_attended = db.Column(db.Integer)
def __repr__(self):
return (f"<Student username={self.username} id={self.user_id} ")
# def study(self, sessions_attended=1):
# """Add one study session to student's count"""
# self.study += sessions_attended
class Personal(db.Model):
"""Comments for chatting within a study session"""
__tablename__ = "personal_info"
user_id = db.Column(db.Integer, db.ForeignKey("students.user_id"), autoincrement=True, primary_key=True)
pronouns = db.Column(db.String)
location = db.Column(db.String)
goals = db.Column(db.Text)
past_roles = db.Column(db.Text)
github = db.Column(db.String)
linkedin = db.Column(db.String)
spotify = db.Column(db.String)
instagram = db.Column(db.String)
def __repr__(self):
return f"<Personal user_id={self.user_id} pronouns={self.pronouns} location={self.location}>"
class Attendence(db.Model):
"""association between student and study opportunity posting"""
__tablename__ = 'attendences'
attendence_id = db.Column(db.Integer,
autoincrement = True,
primary_key = True)
study_session_id = db.Column(db.Integer, db.ForeignKey('study_sessions.study_session_id'))
user_id = db.Column(db.Integer, db.ForeignKey('students.user_id'))
study_session = db.relationship('StudySession', backref='attendences')
student = db.relationship('Student', backref='attendences') #TODO: Add uniqueness constraight to user, study_session
# student = Student.query.get(user_id) <Student username="JBland07"
# student.username => "JBland07"
def __repr__(self):
return f'<Attendence attendence_id= {self.attendence_id} user_id = {self.user_id}> study_session_id {self.study_session_id}>'
class StudySession(db.Model):
"""an opportunity for study buddies to join"""
__tablename__ = 'study_sessions'
study_session_id = db.Column(db.Integer,
autoincrement = True,
primary_key = True,
unique = True)
participant = db.Column(db.Integer,
db.ForeignKey('students.user_id'))
# participant_id = db.Column # This attribute needs to make room for multiple students)
# (db.Integer,
# #autoincrement = True, foreign keys don't need to be auto-incremented because primary keys already are!
# db.ForeignKey('students.user_id'))
proposed_date = db.Column(db.DateTime )
proposed_time = db.Column(db.String, nullable=False)
topic = db.Column(db.String, nullable=False)
capacity = db.Column(db.String)
prerequisites = db.Column(db.String)
active = db.Column(db.Boolean, default=True)
creator = db.relationship('Student', backref='study_sessions')
comments = db.relationship('Comment', backref='study_session')
# participant = db.relationship('Student', backref='study_sessions')
#^Since there can be multiple participants in a study session, we will actually reference the Attendence table
# topic = db.relationship('Topic', backref='study_sessions')
#^Since I decided to change topic from a drop down menu to an open-ended input, I removed the topic table.
# attempting to get all study sessions based off a user
# From a student: => get the student obj
# >> user = Student.query.get(*put in a student id*)
# >> sess = user.sutdy_sessions
# [<StudySession study_session_id=1 participant=1 proposed_time=noon topic_id = None active = True>,
# <StudySession study_session_id=2 participant=1 proposed_time=noon topic_id = None active = True>]
# >> sess[0].proposed_time
# 'noon'
def __repr__(self):
return f'<StudySession study_session_id={self.study_session_id} proposed_time={self.proposed_time} participant={self.participant} topic = {self.topic} active = {self.active}>'
# class Comment(db.Model):
# """Comments within a study session page"""
# __tablename__: 'comments'
# comment_id = db.Column(db.Integer, autoincrement = True, primary_key=True)
# author_id = db.Column(db.Integer), db.ForeignKey('students.user_id')
# message = db.Column(db.Text)
# timestamp = db.Column(db.DateTime())
# status = db.Column(db.Boolean, default=False)
# study_session_id = db.Column(db.Integer), db.ForeignKey('study_sessions.study_session_id')
# def __repr__(self):
# return f'<Comment comment_id={self.comment_id}> text={self.text} user_id={self.user_id}'
class Comment(db.Model):
"""Comments for chatting within a study session"""
__tablename__ = "comments"
comment_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
comment = db.Column(db.String)
study_session_id = db.Column(db.Integer, db.ForeignKey("study_sessions.study_session_id"))
user_id = db.Column(db.Integer, db.ForeignKey("students.user_id"))
def __repr__(self):
return f"<Comment comment_id={self.user_id} comment={self.comment} study_session_id={self.study_session_id}>"
class Resource(db.Model):
"""Comments for chatting within a study session"""
__tablename__ = "resources"
resource_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
resource = db.Column(db.String)
description = db.Column(db.String)
study_session_id = db.Column(db.Integer, db.ForeignKey("study_sessions.study_session_id"))
user_id = db.Column(db.Integer, db.ForeignKey("students.user_id"))
def __repr__(self):
return f"<Resource user_id={self.user_id} resource={self.resource} study_session_id={self.study_session_id}>"
# class Topic(db.Model):
# __tablename__ = 'topics'
# topic = db.Column(db.String,
# primary_key = True)
# topic_description = db.Column(db.String) #pre-requisite recommentations
# topic_title = db.Column(db.String)
# def __repr__(self):
# return f'<Topic topic_id={self.topic_id} topic_title={self.topic_title}>'
#removed cohort_name as a foreign key to COHORT table for the time being-- color coding feature will fall in later sprint.
# class Cohort(db.Model):
# __tablename__ = 'cohorts'
# cohort_name = db.Column(db.String, primary_key = True)
# cohort_color_code = db.Column(db.String, db.ForeignKey('study_sessions.participant')
# )
# def __repr__(self):
# return f'<Cohort cohort_name = {self.cohort_name} cohort_color_code = {self.cohort_color_code}>'
# def get_rosters():
# study_sessions=get_study_sessions()
# rosters=[]
# for study_session in study_sessions:
# roster = take_attendence(study_session.study_session_id)
# rosters.append(roster)
# print('&&&&&&&&&&&&&&&&&&&&&&')
# print (rosters)
# return rosters
#Test data:
# test_student_sam = Student(student_name = 'Sam Bradley', email= 'testy_sam@test.test', password='testypassword', icon_url='testy icon', cohort='Test Cohort 2021', location='Topeka, KS')
# test_student_kevyn = Student(student_name = 'Kevyn Bradley', email= 'testy_kevyn@test.test', password='testypassword', icon_url='testy icon', cohort='Test Cohort 2022', location='Auburn, KS')
# test_student_maya = Student(student_name = 'Testy Name', email= 'testy_maya@test.test', password='testypassword', icon_url='testy icon', cohort='Test Cohort 2023', location='Kansas City, MO')
# test_student_gillespie = Student(student_name = 'Testy Name', email= 'testy_gillespie@test.test', password='testypassword', icon_url='testy icon', cohort='Test Cohort 2024', location='Test City, OK')
# test_attendence = Attendence()
# test_session = StudySession(proposed_time = 'High noon')
# test_topic = Topic(topic_description='Test Topic numero uno-- the first topic we will test!', topic_title='Test 1')
if __name__=='__main__':
from flask import Flask
app=Flask(__name__)
connect_to_db(app)
|
{"/seed_database.py": ["/crud.py", "/model.py", "/server.py"], "/server.py": ["/model.py", "/crud.py"], "/crud.py": ["/model.py"]}
|
44,729,241
|
HobbesE/Study-Buddy-Finder
|
refs/heads/main
|
/server.py
|
"""Server for Study Buddy Finder app."""
import requests
import json
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, render_template, redirect, request, session, flash
from flask_login import LoginManager, login_user, login_required, logout_user
from model import Student, Attendence, StudySession, connect_to_db, db
from datetime import timedelta, datetime, timezone
from crud import *
import crud
from jinja2 import StrictUndefined
from sqlalchemy.orm.attributes import flag_modified
app = Flask(__name__)
app.secret_key = "DEBUG"
app.jinja_env.undefined = StrictUndefined
# connect_to_db(app)
login_manager = LoginManager()
login_manager.init_app(app)
@app.route('/')
def home():
"""Return main study buddy table as homepage."""
if not session.get('logged_in'):
return render_template('login.html')
else:
study_sessions=get_study_sessions()
study_sessions_to_show=[]
for study_session in study_sessions:
study_session_time = datetime.strptime(study_session.proposed_time, "%Y-%m-%dT%H:%M")
now= datetime.now()
if study_session_time >= now:
study_sessions_to_show.append(study_session)
return render_template('index.html', study_sessions=study_sessions, study_sessions_to_show=study_sessions_to_show)
@app.route('/register') #same endpoint for a different method
def render_register_page():
"""Return account registration """
return render_template("register.html")
@app.route('/register', methods = ['POST']) #same endpoint for a different method
def create_student_view():
"""create a new student"""
#retrieve values from user's registration form
username = request.form.get('username')
password = request.form.get('password')
first_name = request.form.get('first_name')
last_name = request.form.get('last_name')
email = request.form.get('email')
cohort_name = request.form.get('cohort_name')
cohort_year = request.form.get('cohort_year')
icon_url = choose_icon()
pronouns= ""
location= ""
goals= ""
past_roles= ""
github= ""
linkedin= ""
spotify= ""
instagram= ""
# Check if user exists before adding them
user = get_user_by_email(email)
if user:
flash("This email address is already in use.")
return redirect('/register')
else:
new_user = create_student(username, password, email, first_name, last_name, cohort_name, cohort_year, icon_url)
new_personal_info= create_personal_info(pronouns, location, goals, past_roles, github, linkedin, spotify, instagram)
# TODO: Sign newly registered user in automatically
return redirect('/login')
@login_manager.user_loader
def load_user(user_id):
"""Load a student user"""
return Student.query.get(user_id)
@app.route('/define-cohort')
def display_chort_help():
"""Display page to explain Hackbright cohorts"""
return render_template("define-cohort.html")
@app.route('/login')
def display_login():
"""Display login"""
return render_template("login.html")
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Return log in page"""
if request.method == 'POST':
username = request.form.get("username")
password = request.form.get("password")
student = Student.query.filter_by(username=username).first()
if not student:
flash("Hmm.. that didn't quite work.")
return redirect("/login")
# login_user(user, remember=True)
if student.password == password:
#Call flask_login.login_user to log in a student user
session['logged_in'] = student.user_id
# login_user(student)
flash("You're in!")
return redirect("/")
else:flash("Ope. That didn't go well.")
return redirect("/login")
@app.route("/logout")
# @login_required
def display_logout():
return render_template("logout.html")
@app.route("/logout", methods=['POST'])
# @login_required
def logout():
"""log a student out"""
logout_user()
return redirect("/")
@app.route('/study-session/<int:study_session_id>', methods=['POST', 'GET'])
def display_study_sess(study_session_id):
# grab the corresponding study_session from the id given
# query for the specific study_session based on the study_session_id given to us
if session['logged_in']:
if request.form.get("add_comment"):
comment = request.form.get("comment")
user_id = session["logged_in"]
create_comment(comment, study_session_id, user_id)
return redirect(f"/study-session/{study_session_id}")
elif request.form.get("add_resource"):
resource = request.form.get("resource")
description = request.form.get("description")
user_id = session["logged_in"]
create_resource(resource, description, study_session_id, user_id)
return redirect(f"/study-session/{study_session_id}")
roster = take_attendence(study_session_id)
study_session = get_study_session_by_id(study_session_id)
comments = get_comments(study_session_id)
resources = get_resources(study_session_id)
user_id = session["logged_in"]
return render_template("study-session.html", study_session=study_session, roster=roster, study_session_id=study_session_id, comments=comments, resources=resources, user_id=user_id)
# render template => load this html file
# redirect => take this user to another route
@app.route('/student/<username>')
# @login_required
def profile(username):
"""Return student profile page"""
user_id=session['logged_in']
student=Student.query.get(user_id)
username=student.username
student_obj = Student.query.filter_by(username=username).first() #what we want to filter by=the subjective attribute we're going to be filtering for (JBland07)
personal_obj = Personal.query.get(user_id)
created_sessions = student_obj.study_sessions
# one student can create many study sessions
# a study session can only be created by one user
# student.study_sessions = [] <-- "many" of our "one to many" rlsp
# study_session.creator = <Student> <-- "one"
# Put a conditional here to stop creator from joining.
participating_sessions = get_user_study_sessions(student_obj)
print("*"*30)
print(personal_obj)
# print('*****************IN USER PROFILE ROUTE!******************')
# print(student_sessions) #when you print in a view function it prints in the ~terminal~!
# participants_for_study_sessions(participant_id)
study_sessions=get_study_sessions()
study_sessions_to_show=[]
completed_sessions=[]
for study_session in study_sessions:
study_session_time = datetime.strptime(study_session.proposed_time, "%Y-%m-%dT%H:%M")
now= datetime.now()
if study_session_time >= now:
study_sessions_to_show.append(study_session)
elif study_session_time <= now:
completed_sessions.append(study_session)
return render_template("profile2.html", student_obj=student_obj, personal_obj=personal_obj, created_sessions=created_sessions, participating_sessions=participating_sessions, study_sessions_to_show=study_sessions_to_show, completed_sessions=completed_sessions)
@app.route('/student')
# @login_required
def reroute_profile():
"""Return student profile page"""
id=session['logged_in']
student=Student.query.get(id)
username=student.username
student_obj = Student.query.filter_by(username=username).first()
created_sessions = student_obj.study_sessions
participating_sessions = get_user_study_sessions(student_obj)
# return render_template("profile2.html", student_obj=student_obj, created_sessions=created_sessions, participating_sessions=participating_sessions)
# ^ used this during demo night because images are not loading to this page for some reason. Hmm!!
# Turned out to be that my image src was missing a "/" before the url. It only worked on the homepage because that page's route is "/"! Huh!
return redirect(f"/student/{username}")
@app.route('/create_opportunity')
# @login_required
def render_create_opportunity():
return render_template("/create_opportunity.html")
@app.route('/create_opportunity', methods=['POST'])
#@login_required
def create_opportunity():
participant= request.form.get('participant')
proposed_time = request.form.get('proposed_time')
topic= request.form.get('topic')
capacity= request.form.get('capacity') # when it's None it's actually returning ""
# print("*"*20, type(capacity))
prerequisites= request.form.get('prerequisites')
creator= crud.get_participant(session['logged_in'])
new_opportunity=create_study_session(participant, proposed_time, topic, capacity, prerequisites, creator)
return redirect("/")
# When a study_opp event is created
# the study_opp event information should be displayed in index.html,
# including participant icon/link to their profile, study topic, datetime, and max
# @app.route('/creator_attending<study_session_id>')
# #@login_required
# def creator_join(study_session_id):
# creator=session['logged_in']
# study_sessions=get_study_sessions()
# attend(study_session_id, creator)
# I wish I could get this to work! Creator should automatically join the study_session
# return redirect("/")
@app.route('/join_session/<int:study_session_id>')
# @login_required
def create_connection(study_session_id):
# roster_list = get_roster_list()
study_sessions=get_study_sessions()
# study_session = get_study_session_by_id(study_session_id)
user_id=session['logged_in']
attend(study_session_id, user_id)
return redirect('/')
# @app.route('/res')
# # @login_required
# def geolocate():
# address = "683 Sutter St., San Francisco, CA 94102"
# params = {
# 'key': API_KEY_2,
# 'address': address
# }
# base_url ='https://maps.googleapis.com/maps/api/geocode/json?'
# response = requests.get(base_url, params=params)
# res = response.json()
# # print(response)
# # print(response.json().keys)
# if res['status'] == 'OK':
# geometry = res['results'][0]['geometry']
# geometry['location']['lat']
# lat = geometry['location']['lat']
# long = geometry['location']['lng']
# else:
# print("Pllzzzzz give me your addresss!!!! It'll be fiiiine.")
# return redirect("/hackbrighter_map")
@app.route('/hackbrighter_map')
# @login_required
def initMap():
"""Return student map page"""
return render_template("hackbrighter_map.html")
@app.route('/team_calendar')
# @login_required
def view_calendar():
"""Return student calendar view of application"""
return render_template("team_calendar.html")
@app.route('/buddies')
# @login_required
def view_buddies():
"""Return page with students user has collaborated with in the past"""
"""Return student profile page"""
student_obj = Student.query.filter_by(username="username").first() #what we want to filter by=the subjective attribute we're going to be filtering for (JBland07)
# to get the created study sessions by this specific user:
created_sessions = student_obj.study_sessions
# one student can create many study sessions
# a study session can only be created by one user
# student.study_sessions = [] <-- "many" of our "one to many" rlsp
# study_session.creator = <Student> <-- "one"
participating_sessions = get_user_study_sessions(student_obj)
print("*"*30)
print(participating_sessions)
# print('*****************IN USER PROFILE ROUTE!******************')
# print(student_sessions) #when you print in a view function it prints in the ~terminal~!
# participants_for_study_sessions(participant_id)
return render_template("buddies.html", student_obj=student_obj, created_sessions=created_sessions, participating_sessions=participating_sessions)
@app.route('/inbox')
# @login_required
def view_inbox():
"""Return student direct message inbox'"""
return render_template("inbox.html")
@app.route('/projects')
# @login_required
def view_projects():
"""Return project sharing page'"""
print("Ask Alena to make a StHack Overflow page")
print("Ask Sam if she'd like to contribute her locations code to help people find cool study spaces")
print("Place to host ongoing project teams?")
return render_template("projects.html")
@app.route('/about')
# @login_required
def view_about():
"""Return information about the Hackbrighter web application'"""
return render_template("about.html")
@app.route('/user_preferences')
#@login_required
def redirect_preferences():
"""Send the user to personal preferences page"""
user_id=session['logged_in']
student=Student.query.get(user_id)
username=student.username
return redirect(f"/user_preferences/{user_id}")
@app.route(f'/user_preferences/<user_id>', methods=['POST', 'GET'])
# @login_required
def view_preferences(user_id):
"""Return page to change user's personal info'"""
user_id=session['logged_in']
student=Student.query.get(user_id)
# username=student.username
student_obj = Student.query.filter_by(user_id=user_id).first() #what we want to filter by=the subjective attribute we're going to be filtering for (JBland07)
personal_obj = Personal.query.get(user_id)
created_sessions = student_obj.study_sessions
participating_sessions = get_user_study_sessions(student_obj)
form= Personal()
pronouns_to_update = Personal.query.get_or_404(user_id)
if request.method == "POST":
personal_obj.user_id=user_id
personal_obj.pronouns=request.form['pronouns']
personal_obj.location=request.form['location']
personal_obj.goals=request.form['goals']
personal_obj.past_roles=request.form['past_roles']
personal_obj.github=request.form['github']
personal_obj.linkedin=request.form['linkedin']
personal_obj.spotify=request.form['spotify']
personal_obj.instagram=request.form['instagram']
try:
db.session.commit()
flash("Update Successful")
return render_template(f"user_preferences.html", student_obj=student_obj, personal_obj=personal_obj, created_sessions=created_sessions, participating_sessions=participating_sessions)
except:
flash("Error! That didn't quite work! Try again.")
return render_template(f"user_preferences.html", student_obj=student_obj, personal_obj=personal_obj, created_sessions=created_sessions, participating_sessions=participating_sessions)
else:
return render_template(f"user_preferences.html", student_obj=student_obj, personal_obj=personal_obj, created_sessions=created_sessions, participating_sessions=participating_sessions)
#log out
#forgot password
#about the website
#full calendar view
#create a new study session
#inside a study session
#study buddy opportunty board-- homepage?
#dashboard
if __name__ == '__main__':
connect_to_db(app, echo=False)
app.run(debug=True, use_reloader=True, use_debugger=True)
|
{"/seed_database.py": ["/crud.py", "/model.py", "/server.py"], "/server.py": ["/model.py", "/crud.py"], "/crud.py": ["/model.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.