Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions tasks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from . import demo_apps
from . import format_code
from . import gc
from . import kernel
from . import k8s
from . import k9s
from . import kata
Expand Down Expand Up @@ -37,6 +38,7 @@
k9s,
kata,
kbs,
kernel,
knative,
kubeadm,
nydus,
Expand Down
120 changes: 120 additions & 0 deletions tasks/kernel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
from invoke import task
from os import makedirs
from os.path import exists, join
from tasks.util.env import KATA_CONFIG_DIR, KATA_IMG_DIR, KATA_RUNTIMES, SC2_RUNTIMES
from tasks.util.kata import KATA_SOURCE_DIR, copy_from_kata_workon_ctr
from tasks.util.toml import update_toml
from tasks.util.versions import GUEST_KERNEL_VERSION
from subprocess import run


def build_guest(debug=False, hot_replace=False):
"""
Build the guest kernel.

We use Kata's build-kernel.sh to build the guest kernel. Note that, for
the time being, there is no difference between SC2 and non-SC2 guest
kernels. We still need to update them all, because our manual rootfs
build requires a manual kernel build too (for some reason).
"""
kernel_build_dir = "/tmp/sc2-guest-kernel-build-dir"

if exists(kernel_build_dir):
run(f"sudo rm -rf {kernel_build_dir}", shell=True, check=True)

makedirs(kernel_build_dir)
makedirs(join(kernel_build_dir, "kernel"))
makedirs(join(kernel_build_dir, "scripts"))

script_files = [
"kernel/build-kernel.sh",
"kernel/configs/",
"kernel/kata_config_version",
"kernel/patches/",
"scripts/apply_patches.sh",
"scripts/lib.sh",
]

for ctr_path, host_path in zip(
[
join(
# WARNING: for the time being it is OK to copy from the SC2
# Kata source dir because there is no difference between
# SC2 and non-SC2 guest kernels, but this is something we
# should keep in mind.
KATA_SOURCE_DIR,
"tools/packaging",
script,
)
for script in script_files
],
[join(kernel_build_dir, script) for script in script_files],
):
copy_from_kata_workon_ctr(
ctr_path, host_path, sudo=False, debug=debug, hot_replace=hot_replace
)

build_kernel_base_cmd = [
f"./build-kernel.sh -x -f -v {GUEST_KERNEL_VERSION}",
"-u 'https://cdn.kernel.org/pub/linux/kernel/v{}.x/'".format(
GUEST_KERNEL_VERSION.split(".")[0]
),
]
build_kernel_base_cmd = " ".join(build_kernel_base_cmd)

# Install APT deps needed to build guest kernel
out = run(
"sudo apt install -y bison flex libelf-dev libssl-dev make",
shell=True,
capture_output=True,
)
assert out.returncode == 0, "Error installing deps: {}".format(
out.stderr.decode("utf-8")
)

for step in ["setup", "build"]:
out = run(
f"{build_kernel_base_cmd} {step}",
shell=True,
capture_output=True,
cwd=join(kernel_build_dir, "kernel"),
)
assert out.returncode == 0, "Error building guest kernel: {}\n{}".format(
out.stdout.decode("utf-8"), out.stderr.decode("utf-8")
)
if debug:
print(out.stdout.decode("utf-8"))

# Copy the built kernel into the desired path
with open(join(kernel_build_dir, "kernel", "kata_config_version"), "r") as fh:
kata_config_version = fh.read()
kata_config_version = kata_config_version.strip()

sc2_kernel_name = "vmlinuz-confidential-sc2.container"
bzimage_src_path = join(
kernel_build_dir,
"kernel",
f"kata-linux-{GUEST_KERNEL_VERSION}-{kata_config_version}",
"arch",
"x86",
"boot",
"bzImage",
)
bzimage_dst_path = join(KATA_IMG_DIR, sc2_kernel_name)
run(f"sudo cp {bzimage_src_path} {bzimage_dst_path}", shell=True, check=True)

# Update the paths in the config files
for runtime in KATA_RUNTIMES + SC2_RUNTIMES:
conf_file_path = join(KATA_CONFIG_DIR, "configuration-{}.toml".format(runtime))
updated_toml_str = """
[hypervisor.qemu]
kernel = "{new_kernel_path}"
""".format(
new_kernel_path=bzimage_dst_path
)
update_toml(conf_file_path, updated_toml_str)


@task
def hot_replace_guest(ctx, debug=False):
build_guest(debug=debug, hot_replace=True)
66 changes: 40 additions & 26 deletions tasks/sc2.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from invoke import task
from os import makedirs
from os import environ, makedirs
from os.path import exists, join
from subprocess import run
from tasks.containerd import install as containerd_install
Expand All @@ -8,6 +8,7 @@
)
from tasks.k8s import install as k8s_tooling_install
from tasks.k9s import install as k9s_install
from tasks.kernel import build_guest as build_guest_kernel
from tasks.knative import install as knative_install
from tasks.kubeadm import create as k8s_create, destroy as k8s_destroy
from tasks.nydus_snapshotter import install as nydus_snapshotter_install
Expand Down Expand Up @@ -44,10 +45,35 @@
stop as stop_local_registry,
)
from tasks.util.toml import update_toml
from tasks.util.versions import COCO_VERSION, KATA_VERSION
from tasks.util.versions import COCO_VERSION, GUEST_KERNEL_VERSION, KATA_VERSION
from time import sleep


def start_vm_cache(debug=False):
vm_cache_dir = join(PROJ_ROOT, "vm-cache")

# Build the VM cache server
if debug:
print("Building VM cache wrapper...")
result = run(
"cargo build --release", cwd=vm_cache_dir, shell=True, capture_output=True
)
assert result.returncode == 0, print(result.stderr.decode("utf-8").strip())
if debug:
print(result.stdout.decode("utf-8").strip())

# Run the VM cache server in the background
if debug:
print("Running VM cache wrapper in background mode...")
run(
"sudo -E target/release/vm-cache background > /dev/null 2>&1",
cwd=vm_cache_dir,
env=environ,
shell=True,
check=True,
)


def install_sc2_runtime(debug=False):
"""
This script installs SC2 as a different runtime class
Expand Down Expand Up @@ -163,30 +189,6 @@ def install_sc2_runtime(debug=False):
sc2=True,
)

# ---------- VM Cache ---------

vm_cache_dir = join(PROJ_ROOT, "vm-cache")

# Build the VM cache server
if debug:
print("Building VM cache wrapper...")
result = run(
"cargo build --release", cwd=vm_cache_dir, shell=True, capture_output=True
)
assert result.returncode == 0, print(result.stderr.decode("utf-8").strip())
if debug:
print(result.stdout.decode("utf-8").strip())

# Run the VM cache server in the background
if debug:
print("Running VM cache wrapper in background mode...")
run(
"sudo -E target/release/vm-cache background > /dev/null 2>&1",
cwd=vm_cache_dir,
shell=True,
check=True,
)


@task(default=True)
def deploy(ctx, debug=False, clean=False):
Expand Down Expand Up @@ -291,9 +293,21 @@ def deploy(ctx, debug=False, clean=False):
install_sc2_runtime(debug=debug)
print("Success!")

# Build and install the guest VM kernel (must be after installing SC2, so
# that we can patch all config files)
print_dotted_line(f"Build and install guest VM kernel (v{GUEST_KERNEL_VERSION})")
build_guest_kernel()
print("Success!")

# Once we are done with installing components, restart containerd
restart_containerd(debug=debug)

# Start the VM cache at the end so that we can pick up the latest config
# changes
print_dotted_line("Starting cVM cache...")
start_vm_cache(debug=debug)
print("Success!")

# Push demo apps to local registry for easy testing
push_demo_apps_to_local_registry(debug=debug)

Expand Down
4 changes: 3 additions & 1 deletion tasks/util/env.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from os.path import dirname, expanduser, realpath, join
from subprocess import run
from tasks.util.versions import KATA_VERSION
from tasks.util.versions import KATA_VERSION, PAUSE_IMAGE_VERSION

PROJ_ROOT = dirname(dirname(dirname(realpath(__file__))))

Expand All @@ -18,6 +18,8 @@
K8S_CONFIG_FILE = "/etc/kubernetes/admin.conf"
# This value is hardcoded in ./.config/kubeadm.conf
CRI_RUNTIME_SOCKET = "unix:///run/containerd/containerd.sock"
PAUSE_IMAGE_REPO = "docker://registry.k8s.io/pause"
PAUSE_IMAGE = f"{PAUSE_IMAGE_REPO}:{PAUSE_IMAGE_VERSION}"

# Containerd

Expand Down
Loading