From b950de75e7ae3806ce68935b6837e9dc8434389b Mon Sep 17 00:00:00 2001 From: Brett Mastbergen Date: Tue, 3 Feb 2026 10:11:43 -0500 Subject: [PATCH] kt: Add content-release command for kernel release automation Introduces a new 'content-release' command that automates the complete kernel content release workflow through three stages: prepare, build, and test. The command can run all stages sequentially (default) or execute individual stages via --prepare, --build, and --test flags. Prepare Stage: - Validates git user.name and user.email configuration in dist-git repo - Executes mkdistgitdiff.py to generate staging branch and release files - Staging branch format: {automation_tmp}_ - Checks out the newly created staging branch - Extracts and displays the new release tag from script output Build Stage: - Verifies mock command availability and user membership in mock group - Validates DEPOT_USER and DEPOT_TOKEN environment variables - Creates temporary mock config with depot credentials injected - Downloads kernel sources using getsrc.sh script - Builds source RPM with mock in build_files directory - Builds binary RPMs from the SRPM - Lists all created RPM packages - Cleans up temporary mock configuration Test Stage: - Spins up VM for the kernel workspace (creates if needed) - Installs built kernel RPMs excluding .src.rpm, kernel-rt*, kernel-debug* - Reboots VM and verifies running kernel version matches installed version - Executes kselftests using /usr/libexec/kselftests/run_kselftest.sh - Counts and reports number of tests passed - Outputs logs: install.log, selftest-.log Additional Changes: - Refactored VM implementation to extract reusable functions: * load_vm_from_workspace(): Load config and VM from workspace name * setup_and_spinup_vm(): Common VM setup and spinup logic - Increased VM disk size to 30GB during creation to accommodate kernel packages and debug symbols - Updated KT.md with comprehensive documentation for the new command The content-release command streamlines the kernel release process by automating previously manual steps, ensuring consistency and reducing the potential for human error in the release workflow. kt: Fix grubenv symlink issue preventing kernel boot On lts-8.6, /boot/grub2/grubenv is a symlink to /boot/efi/EFI/rocky/grubenv and grub can't follow the symlink when reading the default boot entry. Replace the symlink with a real file before using grubby to set the default kernel. The symlink check must run with sudo permissions to access the grubenv file. This ensures grub can read the saved_entry and boot the correct kernel. --- bin/kt | 2 + kernel_install_dep.sh | 2 + kt/KT.md | 55 +++ kt/commands/content_release/command.py | 75 +++++ kt/commands/content_release/impl.py | 450 +++++++++++++++++++++++++ kt/commands/vm/command.py | 3 + kt/commands/vm/impl.py | 275 +-------------- kt/data/kernels.yaml | 19 +- kt/ktlib/command_runner.py | 12 +- kt/ktlib/kernel_workspace.py | 40 +++ kt/ktlib/kernels.py | 3 + kt/ktlib/local.py | 12 + kt/ktlib/mock.py | 241 +++++++++++++ kt/ktlib/vm.py | 373 ++++++++++++++++++++ 14 files changed, 1286 insertions(+), 276 deletions(-) create mode 100644 kt/commands/content_release/command.py create mode 100644 kt/commands/content_release/impl.py create mode 100644 kt/ktlib/local.py create mode 100644 kt/ktlib/mock.py create mode 100644 kt/ktlib/vm.py diff --git a/bin/kt b/bin/kt index 68c5b57..f7e492f 100755 --- a/bin/kt +++ b/bin/kt @@ -5,6 +5,7 @@ import logging import click from kt.commands.checkout.command import checkout +from kt.commands.content_release.command import content_release from kt.commands.git_push.command import git_push from kt.commands.list_kernels.command import list_kernels from kt.commands.setup.command import setup @@ -30,6 +31,7 @@ def main(): cli.add_command(checkout) cli.add_command(git_push) cli.add_command(vm) + cli.add_command(content_release) cli() diff --git a/kernel_install_dep.sh b/kernel_install_dep.sh index 372cd81..2f8c337 100755 --- a/kernel_install_dep.sh +++ b/kernel_install_dep.sh @@ -166,6 +166,7 @@ install_kselftest_deps_10() { iptables \ iputils \ ipvsadm \ + jq \ kernel-devel \ kernel-selftests-internal \ kernel-tools \ @@ -179,6 +180,7 @@ install_kselftest_deps_10() { llvm \ ncurses-devel \ net-tools \ + netsniff-ng \ nftables \ nmap-ncat \ numactl-devel \ diff --git a/kt/KT.md b/kt/KT.md index 100456a..498e13d 100644 --- a/kt/KT.md +++ b/kt/KT.md @@ -295,3 +295,58 @@ and then ``` /kernel-src-tree-tools/kernel-kselftest.sh ``` + +### kt content-release + +Manages the complete content release workflow for kernel packages. This command +automates the process of preparing, building, and testing kernel releases. + +The command has three steps that can be run individually or all together: + +#### --prepare +Prepares the content release by: +- Validating git user.name and user.email are configured +- Running mkdistgitdiff.py to generate the staging branch and release files +- Checking out the staging branch {automation_tmp}_ +- Creating and displaying the new release tag + +#### --build +Builds kernel RPMs by: +- Verifying mock is installed and user is in mock group +- Checking DEPOT_USER and DEPOT_TOKEN environment variables are set +- Creating a temporary mock config with depot credentials +- Downloading sources using getsrc.sh +- Building SRPM with mock +- Building binary RPMs from the SRPM +- Listing all created RPMs + +Requirements: +- mock must be installed +- User must be in the mock group +- DEPOT_USER and DEPOT_TOKEN environment variables must be set + +#### --test +Tests the built kernel by: +- Spinning up a VM (creates if needed, boots if stopped) +- Installing the built kernel RPMs from build_files +- Rebooting the VM +- Running kselftests using /usr/libexec/kselftests/run_kselftest.sh +- Reporting number of tests passed + +Output logs: +- install.log: RPM installation output +- selftest-.log: Kselftest results + +#### Example: + +Run all steps: +``` +$ DEPOT_USER=user@example.com DEPOT_TOKEN=token kt content-release lts-9.2 +``` + +Run individual steps: +``` +$ kt content-release lts-9.2 --prepare +$ DEPOT_USER=user@example.com DEPOT_TOKEN=token kt content-release lts-9.2 --build +$ kt content-release lts-9.2 --test +``` diff --git a/kt/commands/content_release/command.py b/kt/commands/content_release/command.py new file mode 100644 index 0000000..204b045 --- /dev/null +++ b/kt/commands/content_release/command.py @@ -0,0 +1,75 @@ +import click + +from kt.commands.content_release.impl import ContentRelease +from kt.ktlib.shell_completion import ShellCompletion + +epilog = """ +Manages the complete content release workflow for kernel packages. + +This command automates the kernel content release process through three steps: + +--prepare: Validates git config, runs mkdistgitdiff.py to create staging branch, + and creates a new tagged release in the src_worktree. + +--build: Builds both source and binary RPMs using mock. Downloads sources via + getsrc.sh and builds kernel packages in build_files directory. + Requires DEPOT_USER and DEPOT_TOKEN environment variables. + +--test: Spins up a VM, installs the built kernel RPMs, reboots, and runs + kselftests using /usr/libexec/kselftests/run_kselftest.sh. + +When run without options, executes all three steps sequentially. + +Examples: + +\b +$ DEPOT_USER=user@example.com DEPOT_TOKEN=token kt content-release lts-9.2 +\b +$ kt content-release lts-9.2 --prepare +\b +$ DEPOT_USER=user@example.com DEPOT_TOKEN=token kt content-release lts-9.2 --build +\b +$ kt content-release lts-9.2 --test +""" + + +@click.command(epilog=epilog) +@click.argument("kernel_workspace", required=True, shell_complete=ShellCompletion.show_kernel_workspaces) +@click.option( + "--prepare", + is_flag=True, + help="Run only the prepare step", +) +@click.option( + "--build", + is_flag=True, + help="Run only the build step", +) +@click.option( + "--test", + is_flag=True, + help="Run only the test step", +) +def content_release(kernel_workspace, prepare, build, test): + """Manage content release workflow (prepare, build, test).""" + + # Check if any specific step was requested + any_step_specified = prepare or build or test + + # Determine which steps to run + run_prepare_step = prepare or not any_step_specified + run_build_step = build or not any_step_specified + run_test_step = test or not any_step_specified + + if not any_step_specified: + click.echo(f"Running all content-release steps for {kernel_workspace}: prepare, build, test") + + # Run the selected steps + if run_prepare_step: + ContentRelease.prepare(kernel_workspace=kernel_workspace) + + if run_build_step: + ContentRelease.build(kernel_workspace=kernel_workspace) + + if run_test_step: + ContentRelease.test(kernel_workspace=kernel_workspace) diff --git a/kt/commands/content_release/impl.py b/kt/commands/content_release/impl.py new file mode 100644 index 0000000..699efac --- /dev/null +++ b/kt/commands/content_release/impl.py @@ -0,0 +1,450 @@ +import logging +import os +import re +import time + +from git import GitCommandError, Repo + +from kt.ktlib.config import Config +from kt.ktlib.kernel_workspace import KernelWorkspace +from kt.ktlib.local import LocalCommand +from kt.ktlib.mock import Mock +from kt.ktlib.ssh import SshCommand +from kt.ktlib.util import Constants +from kt.ktlib.vm import Vm + +# Source download configuration for kernels that don't work with getsrc.sh +SOURCE_DOWNLOAD_CONFIG = { + "lts-8.6": { + "base_url": "https://rocky-linux-sources-staging.a1.rockylinux.org", + "files": [ + ("cd67969ef0be82516b144066d3897b071f59f2a2", "kernel-abi-stablelists-4.18.0-372.tar.bz2"), + ("89ce72b86bacc9c2cd712784e9053d9c36f37c23", "kernel-kabi-dw-4.18.0-372.tar.bz2"), + ("c48b00ba5e77fcf4bc9e2dd5e58f1791ae71e8c8", "linux-4.18.0-372.32.1.el8_6.tar.xz"), + ], + }, + "fipslegacy-8.6": { + "base_url": "https://rocky-linux-sources-staging.a1.rockylinux.org", + "files": [ + ("feac61524ad00b8b03f2985f8ac330c7939ba425", "kernel-abi-stablelists-4.18.0-425.tar.bz2"), + ("f2fb49be6e6fe2782bc58e2914d8dcc7b2948764", "kernel-kabi-dw-4.18.0-425.tar.bz2"), + ("57cc7ba600df4d74be3a1b8c2324ea69b92699e4", "linux-4.18.0-425.13.1.el8_7.tar.xz"), + ], + }, + "cbr-7.9": { + "base_url": "https://git.centos.org/sources/kernel/c7", + "files": [ + ("ba5599148e52ecd126ebcf873672e26d3288323e", "kernel-abi-whitelists-1160.tar.bz2"), + ("5000b85c42ef87b6835dd8eef063e4623c2e0fa9", "kernel-kabi-dw-1160.tar.bz2"), + ("83cf85ab62fc9dca6d34175c60cc17cb917d7e0d", "linux-3.10.0-1160.119.1.el7.tar.xz"), + ], + }, +} + + +class ContentRelease: + """Manages the complete content release workflow for kernel packages.""" + + @classmethod + def prepare(cls, kernel_workspace: str): + """ + Prepare step for content release. + + Ensures git user.name and user.email are configured in the kernel-dist-git repo + or globally. + + Args: + kernel_workspace: The name of the kernel workspace (e.g., 'lts-9.4') + """ + logging.info(f"Running prepare step for kernel workspace: {kernel_workspace}") + + # Load the kernel workspace to get the dist-git repo path + kernel_workspace_obj = KernelWorkspace.load_from_name(kernel_workspace) + config = Config.load() + + # Get the kernel-dist-git repo + dist_git_path = kernel_workspace_obj.dist_worktree.folder + + logging.info(f"Checking git config in {dist_git_path}") + + # Check user.name and user.email + user_name = kernel_workspace_obj.dist_worktree.check_git_config_value("user", "name") + user_email = kernel_workspace_obj.dist_worktree.check_git_config_value("user", "email") + + missing = [] + if not user_name: + missing.append("user.name") + if not user_email: + missing.append("user.email") + + if missing: + raise RuntimeError( + f"Git config {' and '.join(missing)} not set in {dist_git_path} or globally. " + f"Please configure them using:\n" + f" git config --global user.name 'Your Name'\n" + f" git config --global user.email 'your.email@example.com'\n" + f"Or set them locally in the repository:\n" + f" cd {dist_git_path}\n" + f" git config user.name 'Your Name'\n" + f" git config user.email 'your.email@example.com'" + ) + + logging.info(f"Git config validated: user.name='{user_name}', user.email='{user_email}'") + + # Run mkdistgitdiff.py + mkdistgitdiff_script = config.base_path / "kernel-tools" / "mkdistgitdiff.py" + if not mkdistgitdiff_script.exists(): + raise RuntimeError(f"mkdistgitdiff.py not found at {mkdistgitdiff_script}") + + logging.info(f"Running mkdistgitdiff.py from {dist_git_path}") + + # Get just the branch name from remote_branch (strip "origin/" prefix) + src_branch_name = str(kernel_workspace_obj.src_worktree.remote_branch).split("/")[-1] + staging_branch = f"{{automation_tmp}}_{src_branch_name}" + + cmd = [ + str(mkdistgitdiff_script), + "--srcgit", + str(kernel_workspace_obj.src_worktree.folder.absolute()), + "--srcgit-branch", + kernel_workspace_obj.src_worktree.local_branch, + "--distgit", + ".", + "--distgit-branch", + kernel_workspace_obj.dist_worktree.local_branch, + "--distgit-staging-branch", + staging_branch, + "--last-tag", + "--bump", + ] + + output_file = kernel_workspace_obj.folder.absolute() / "mkdistgitdiff.log" + logging.info(f"Output will be written to {output_file}") + + try: + LocalCommand.run_with_output(command=cmd, output_file=str(output_file), cwd=dist_git_path) + logging.info("mkdistgitdiff.py completed successfully") + except RuntimeError as e: + logging.error(f"mkdistgitdiff.py failed with exit code {e}") + logging.error(f"Check {output_file} for details") + raise RuntimeError(f"mkdistgitdiff.py failed. See {output_file} for details") + + # Checkout the new staging branch created by mkdistgitdiff + logging.info(f"Checking out staging branch: {staging_branch}") + + repo = Repo(dist_git_path) + try: + repo.git.checkout(staging_branch) + logging.info(f"Successfully checked out {staging_branch}") + except GitCommandError as e: + logging.error(f"Failed to checkout {staging_branch}: {e}") + raise RuntimeError(f"Failed to checkout staging branch {staging_branch}") + + # Verify and display the newly created tag from mkdistgitdiff output + try: + with open(output_file, "r") as f: + log_content = f.read() + # Look for the "Content Release" line which contains the new tag + # Pattern matches version-like strings: kernel-X.Y.Z-A.B+C.D.elN_M_ciq + tag_pattern = r"Content Release\s+(kernel-[\d\.]+-[\d\.]+\+[\d\.]+\.el\d+_\d+_ciq)" + match = re.search(tag_pattern, log_content) + if match: + tag_name = match.group(1) + logging.info(f"New tag created in src_worktree: {tag_name}") + else: + # Fallback to old method if pattern doesn't match + for line in log_content.split("\n"): + if "Content Release" in line: + parts = line.split("Content Release") + if len(parts) > 1: + tag_name = parts[1].strip() + logging.info(f"New tag created in src_worktree: {tag_name}") + break + else: + logging.warning("Could not find new tag in mkdistgitdiff output") + except OSError as e: + logging.warning(f"Could not read tag from mkdistgitdiff output: {e}") + + logging.info("Prepare step completed") + + @classmethod + def _download_sources(cls, kernel_workspace_obj: KernelWorkspace): + """ + Download kernel sources using appropriate method. + + Args: + kernel_workspace_obj: The kernel workspace object + """ + kernel_workspace = kernel_workspace_obj.folder.name + dist_git_path = kernel_workspace_obj.dist_worktree.folder + sources_dir = dist_git_path / "SOURCES" + + download_config = SOURCE_DOWNLOAD_CONFIG.get(kernel_workspace) + + if download_config: + # Direct download for special cases + logging.info(f"Downloading {kernel_workspace} source files directly...") + for hash_or_id, filename in download_config["files"]: + url = f"{download_config['base_url']}/{hash_or_id}" + try: + LocalCommand.run(command=["curl", url, "-o", str(sources_dir / filename)]) + except RuntimeError as e: + raise RuntimeError(f"Failed to download {filename}: {e}") + logging.info(f"{kernel_workspace} source files downloaded successfully") + else: + # Use getsrc.sh for all other kernels + logging.info("Downloading getsrc.sh script...") + try: + LocalCommand.run( + command=[ + "curl", + "-O", + "https://raw.githubusercontent.com/rocky-linux/rocky-tools/main/getsrc/getsrc.sh", + ], + cwd=dist_git_path, + ) + logging.info("getsrc.sh downloaded successfully") + except RuntimeError as e: + raise RuntimeError(f"Failed to download getsrc.sh: {e}") + + getsrc_script = dist_git_path / "getsrc.sh" + try: + LocalCommand.run(command=["chmod", "+x", str(getsrc_script)]) + logging.info("getsrc.sh made executable") + except RuntimeError as e: + raise RuntimeError(f"Failed to make getsrc.sh executable: {e}") + + try: + LocalCommand.run(command=[str(getsrc_script)], cwd=dist_git_path) + logging.info("getsrc.sh completed successfully") + except RuntimeError as e: + raise RuntimeError(f"getsrc.sh failed: {e}") + + @classmethod + def build(cls, kernel_workspace: str): + """ + Build step for content release. + + Verifies mock is available and user is in mock group, then builds the kernel RPMs. + + Args: + kernel_workspace: The name of the kernel workspace (e.g., 'lts-9.4') + """ + logging.info(f"Running build step for kernel workspace: {kernel_workspace}") + + # Verify mock prerequisites + Mock.verify_prerequisites() + + # Load kernel workspace + kernel_workspace_obj = KernelWorkspace.load_from_name(kernel_workspace) + config = Config.load() + + # Prepare mock configuration + mock_config = Mock.prepare_config(kernel_workspace, kernel_workspace_obj, config) + + # Create build_files directory + build_files_dir = kernel_workspace_obj.folder / "build_files" + build_files_dir.mkdir(exist_ok=True) + if not build_files_dir.exists(): + raise RuntimeError(f"Failed to create build_files directory: {build_files_dir}") + logging.info(f"Build files directory: {build_files_dir}") + + # Get dist_worktree path (where we'll run mock from) + dist_git_path = kernel_workspace_obj.dist_worktree.folder + logging.info(f"Running mock from: {dist_git_path}") + + # Download kernel sources + cls._download_sources(kernel_workspace_obj) + + # Build SRPM using mock + mock_srpm_log = kernel_workspace_obj.folder / "mock_buildsrpm.log" + Mock.build_srpm(mock_config, build_files_dir, dist_git_path, mock_srpm_log) + + # Find the SRPM that was just built + srpm_files = list(build_files_dir.glob("*.src.rpm")) + if not srpm_files: + raise RuntimeError(f"No SRPM found in {build_files_dir}") + srpm_file = srpm_files[0] + logging.info(f"Found SRPM: {srpm_file.name}") + + # Build binary RPMs from the SRPM + mock_build_log = kernel_workspace_obj.folder / "mock_build.log" + Mock.build_rpms(mock_config, srpm_file, build_files_dir, dist_git_path, mock_build_log) + + # List all RPMs created + rpm_files = sorted(build_files_dir.glob("*.rpm")) + if rpm_files: + logging.info(f"Created {len(rpm_files)} RPM(s):") + for rpm in rpm_files: + logging.info(f" {rpm.name}") + else: + logging.warning("No RPM files found in build directory") + + # Clean up temporary mock config if needed + mock_config.cleanup() + + logging.info("Build step completed") + + @classmethod + def test(cls, kernel_workspace: str): + """ + Test step for content release. + + Spins up VM, installs built RPMs, reboots, and runs kselftests. + + Args: + kernel_workspace: The name of the kernel workspace (e.g., 'lts-9.4') + """ + if not kernel_workspace: + logging.error("kernel_workspace is required for the test command") + raise ValueError("kernel_workspace argument is required for test command") + + logging.info(f"Running test step for kernel workspace: {kernel_workspace}") + + # Load kernel workspace + kernel_workspace_obj = KernelWorkspace.load_from_name(kernel_workspace) + + # Setup and spin up the VM (reuses common code from vm command) + vm_instance = Vm.setup_and_spinup(kernel_workspace_name=kernel_workspace) + + # Wait for dependencies to be installed if VM was just created + logging.info("Waiting for VM dependencies to be installed...") + time.sleep(Constants.VM_DEPS_INSTALL_WAIT_SECONDS) + + # Install the built RPMs + build_files_dir = kernel_workspace_obj.folder / "build_files" + logging.info(f"Installing RPMs from {build_files_dir}...") + + # Special case for fipslegacy-8.6: enable depot to prevent issues with secure boot shim + if kernel_workspace == "fipslegacy-8.6": + logging.info("Enabling depot for fipslegacy-8.6...") + depot_user = os.environ.get("DEPOT_USER") + depot_token = os.environ.get("DEPOT_TOKEN") + + if not depot_user or not depot_token: + raise RuntimeError( + "DEPOT_USER and DEPOT_TOKEN environment variables must be set for fipslegacy-8.6 testing" + ) + + try: + # Install depot client + SshCommand.run( + domain=vm_instance.domain, + command=[ + 'sudo dnf install -y "https://depot.ciq.com/public/files/depot-client/depot/depot.x86_64.rpm"' + ], + ) + logging.info("Depot client installed") + + # Register depot with credentials + SshCommand.run( + domain=vm_instance.domain, command=[f"sudo depot register -u {depot_user} -t {depot_token}"] + ) + logging.info("Depot registered") + + # Enable fips-legacy-8 + SshCommand.run(domain=vm_instance.domain, command=["sudo depot enable fips-legacy-8"]) + logging.info("fips-legacy-8 enabled via depot") + except RuntimeError as e: + logging.error(f"Failed to enable depot for fipslegacy-8.6: {e}") + raise RuntimeError(f"Failed to enable depot for fipslegacy-8.6: {e}") + + # Build the list of RPMs to install (exclude src, rt, and debug RPMs) + all_rpms = list(build_files_dir.glob("*.rpm")) + install_rpms = [ + rpm + for rpm in all_rpms + if not rpm.name.endswith(".src.rpm") + and not rpm.name.startswith("kernel-rt") + and not rpm.name.startswith("kernel-debug-") + ] + + if not install_rpms: + raise RuntimeError(f"No installable RPMs found in {build_files_dir}") + + rpm_paths = " ".join(str(rpm.absolute()) for rpm in install_rpms) + # Remove libtraceevent first to avoid file conflicts with perf package + install_cmd = ( + f"sudo dnf remove -y libtraceevent || true && sudo dnf install --skip-broken --allowerasing {rpm_paths} -y" + ) + # install_cmd = f"sudo dnf clean all && sudo dnf install --skip-broken --allowerasing {rpm_paths} -y" + + install_log = kernel_workspace_obj.folder.absolute() / "install.log" + logging.info(f"Installing {len(install_rpms)} RPM(s)") + logging.info(f"RPM install output will be written to {install_log}") + + try: + SshCommand.run_with_output(output_file=install_log, domain=vm_instance.domain, command=[install_cmd]) + logging.info("RPMs installed successfully") + except RuntimeError as e: + logging.error(f"RPM installation failed: {e}") + logging.error(f"Check {install_log} for details") + raise RuntimeError(f"RPM installation failed. See {install_log} for details") + + # Determine expected kernel version from the built RPMs + kernel_rpms = list(build_files_dir.glob("kernel-[0-9]*.x86_64.rpm")) + expected_version = None + if kernel_rpms: + # Extract version from RPM filename: kernel-5.14.0-284.30.1+23.1.el9_2_ciq.x86_64.rpm + rpm_name = kernel_rpms[0].name + # Use regex to extract version (more robust than string replacement) + # Pattern: kernel-VERSION.ARCH.rpm where VERSION includes everything up to .x86_64 + version_match = re.match(r"kernel-(.*?)\.rpm$", rpm_name) + if version_match: + expected_version = version_match.group(1) + logging.info(f"Expected kernel version: {expected_version}") + else: + # Fallback to old string replacement method + expected_version = rpm_name.replace("kernel-", "").replace(".rpm", "") + logging.info(f"Expected kernel version (fallback): {expected_version}") + else: + logging.warning("Could not determine expected kernel version from RPMs") + + # Ensure /boot/grub2/grubenv is a real file, not a symlink + # grub can't follow symlinks, so if grubenv is a symlink we need to make it a real file + logging.info("Ensuring /boot/grub2/grubenv is a real file...") + grubenv_fix_cmd = ( + "sudo bash -c 'if [ -L /boot/grub2/grubenv ]; then " + "cp --remove-destination $(readlink -f /boot/grub2/grubenv) /boot/grub2/grubenv; " + "fi'" + ) + try: + SshCommand.run(domain=vm_instance.domain, command=[grubenv_fix_cmd]) + logging.info("grubenv symlink fixed if needed") + except RuntimeError as e: + logging.error(f"Failed to fix grubenv symlink: {e}") + raise RuntimeError(f"Failed to fix grubenv symlink: {e}") + + # Set the newly installed kernel as the default boot kernel + if expected_version: + logging.info(f"Setting default boot kernel to {expected_version}") + kernel_path = f"/boot/vmlinuz-{expected_version}" + set_default_cmd = f"sudo grubby --set-default={kernel_path}" + try: + SshCommand.run(domain=vm_instance.domain, command=[set_default_cmd]) + logging.info("Default boot kernel set successfully") + except RuntimeError as e: + logging.error(f"Failed to set default boot kernel: {e}") + raise RuntimeError(f"Failed to set default boot kernel: {e}") + + # Reboot the VM + logging.info("Rebooting VM...") + vm_instance.reboot() + + # Get the running kernel version from the VM + kernel_version = vm_instance.running_kernel_version().strip() + logging.info(f"Running kernel version: {kernel_version}") + + # Verify the kernel version matches what we installed + if expected_version and kernel_version != expected_version: + raise RuntimeError(f"Kernel version mismatch! Expected: {expected_version}, Running: {kernel_version}") + logging.info("Verified VM is running the newly installed kernel") + + # Run kselftests using the installed kselftests + kselftest_log = kernel_workspace_obj.folder.absolute() / f"selftest-{kernel_version}.log" + vm_instance.kselftests_internal(kselftest_log) + + # Count passed tests + vm_instance.count_kselftest_passed(kselftest_log) + + logging.info("Test step completed successfully") diff --git a/kt/commands/vm/command.py b/kt/commands/vm/command.py index ba966fd..c3b16f8 100644 --- a/kt/commands/vm/command.py +++ b/kt/commands/vm/command.py @@ -59,4 +59,7 @@ @click.option("--test", is_flag=True, help="Build the kernel and run kselftests") @click.argument("kernel_workspace", required=False, shell_complete=ShellCompletion.show_kernel_workspaces) def vm(kernel_workspace, console, destroy, override, list_all, test): + if not list_all and not kernel_workspace: + raise click.UsageError("kernel_workspace is required unless --list-all is specified") + main(name=kernel_workspace, console=console, destroy=destroy, override=override, list_all=list_all, test=test) diff --git a/kt/commands/vm/impl.py b/kt/commands/vm/impl.py index e48038f..6a62e9b 100644 --- a/kt/commands/vm/impl.py +++ b/kt/commands/vm/impl.py @@ -1,266 +1,10 @@ -from __future__ import annotations - import logging -import os import time -from dataclasses import dataclass - -import oyaml as yaml -import wget -from git import Repo -from pathlib3x import Path from kt.ktlib.config import Config -from kt.ktlib.kernel_workspace import KernelWorkspace -from kt.ktlib.ssh import SshCommand from kt.ktlib.util import Constants -from kt.ktlib.virt import VirtHelper, VmCommand - -# TODO move this to a separate repo -CLOUD_INIT_BASE_PATH = Path(__file__).parent.parent.parent.joinpath("data/cloud_init.yaml") - - -@dataclass -class Vm: - """ - Class that represents a virtual machine. - - Attributes: - name: name of the vm - - qcow2_source_path: qcow2 path to the vm image used as source - vm_major_version: major vm version (9 for Rocky 9) - qcow2_path: the qcow2 path of the vm image copied from qcow2_source_path - cloud_init_path: cloud_init.yaml config, adapted from data/cloud_init.yaml - """ - - qcow2_source_path: Path - vm_major_version: str - qcow2_path: Path - cloud_init_path: Path - name: str - kernel_workspace: KernelWorkspace - - @classmethod - def load(cls, config: Config, kernel_workspace: KernelWorkspace): - kernel_workspace_str = kernel_workspace.folder.name - kernel_name = cls._extract_kernel_name(kernel_workspace_str) - vm_major_version = cls._extract_major(kernel_name) - - # Image source paths construction - qcow2_source_path = config.images_source_dir / Path(cls._qcow2_name(vm_major_version=vm_major_version)) - - # Actual current image paths construction - work_dir = config.images_dir / Path(kernel_workspace_str) - qcow2_path = work_dir / Path(f"{kernel_workspace_str}.qcow2") - cloud_init_path = work_dir / Path(Constants.CLOUD_INIT) - - return cls( - qcow2_source_path=qcow2_source_path, - vm_major_version=vm_major_version, - qcow2_path=qcow2_path, - cloud_init_path=cloud_init_path, - name=kernel_workspace_str, - kernel_workspace=kernel_workspace, - ) - - @classmethod - def _extract_kernel_name(cls, kernel_workspace): - # _ --> where kernel does not contain any '_' - return kernel_workspace.split("_")[0] - - @classmethod - def _extract_major(cls, full_version): - # lts-9.4 --> return 9 - return full_version.split("-")[-1].split(".")[0] - - @classmethod - def _qcow2_name(cls, vm_major_version: str): - return f"{Constants.DEFAULT_VM_BASE}-{vm_major_version}-{Constants.QCOW2_TRAIL}" - - def _get_vm_url(self): - return f"{Constants.BASE_URL}/{self.vm_major_version}/images/x86_64/{self.qcow2_source_path.name}" - - def _download_source_image(self): - if self.qcow2_source_path.exists(): - logging.info(f"Image {self.qcow2_source_path} already exists, nothing to do") - return - - # Make sure the folder exists - self.qcow2_source_path.parent.mkdir(parents=True, exist_ok=True) - - logging.info("Downloading image") - wget.download(self._get_vm_url(), out=str(self.qcow2_source_path.parent)) - - def _setup_cloud_init(self, config: Config): - data = None - with open(CLOUD_INIT_BASE_PATH) as f: - data = yaml.safe_load(f) - - # replace placeholders with user data - data["users"][0]["name"] = os.environ["USER"] - - # password remains the default for now - data["chpasswd"]["list"][0] = f"{os.environ['USER']}:test" - - # ssh key - with open(config.ssh_key) as f: - ssh_key_content = f.read().strip() - data["users"][0]["ssh_authorized_keys"][0] = ssh_key_content - - data["mounts"][0][1] = str(config.base_path.absolute()) - data["mounts"][1][0] = str(config.base_path.absolute()) - - # Go to the working directory of the kernel - working_dir = config.kernels_dir / Path(self.name) - data["write_files"][0]["content"] = f"cd {str(working_dir)}" - - # Because $HOME is the same as the host, during boot, cloud-init - # sees the home dir already exists and root remains the owner - # change it to $USER - data["runcmd"][0][1] = f"{os.environ['USER']}:{os.environ['USER']}" - data["runcmd"][0][2] = os.environ["HOME"] - - # Install packages needed later - data["runcmd"].append([str(config.base_path / Path("kernel-src-tree-tools") / Path("kernel_install_dep.sh"))]) - # Write this to image cloud_init - with open(self.cloud_init_path, "w") as f: - f.write("#cloud-config\n") - yaml.dump(data, f) - - def _create_image(self, config: Config): - # Make sure the dir exists - self.qcow2_path.parent.mkdir(parents=True, exist_ok=True) - - self._setup_cloud_init(config=config) - # Copy qcow2 image to work dir - self.qcow2_source_path.copy(self.qcow2_path) - - self._virt_install(config=config) - time.sleep(Constants.VM_STARTUP_WAIT_SECONDS) - - def _virt_install(self, config: Config): - return VmCommand.install( - name=self.name, - qcow2_path=self.qcow2_path, - vm_major_version=self.vm_major_version, - cloud_init_path=self.cloud_init_path, - common_dir=config.base_path, - ) - - def setup(self, config: Config): - self._download_source_image() - - def spin_up(self, config: Config) -> VmInstance: - if not VirtHelper.exists(vm_name=self.name): - logging.info(f"VM {self.name} does not exist, creating from scratch...") - - self._create_image(config=config) - return VmInstance(name=self.name, kernel_workspace=self.kernel_workspace) - - logging.info(f"Vm {self.name} already exists") - - if VirtHelper.is_running(vm_name=self.name): - logging.info(f"Vm {self.name} is running, nothing to do") - return VmInstance(name=self.name, kernel_workspace=self.kernel_workspace) - - logging.info(f"Vm {self.name} is not running, starting it") - VmCommand.start(vm_name=self.name) - time.sleep(Constants.VM_STARTUP_WAIT_SECONDS) - - return VmInstance(name=self.name, kernel_workspace=self.kernel_workspace) - - def destroy(self): - if VirtHelper.is_running(vm_name=self.name): - VmCommand.destroy(vm_name=self.name) - - if VirtHelper.exists(vm_name=self.name): - VmCommand.undefine(vm_name=self.name) - - # remove its folder that contains the qcow2 image and cloud-init config - self.qcow2_path.parent.rmtree(ignore_errors=True) - - -class VmInstance: - name: str - ssh_domain: str - kernel_workspace: KernelWorkspace - - def __init__(self, name: str, kernel_workspace: KernelWorkspace): - self.name = name - ip_addr = VirtHelper.ip_addr(vm_name=self.name) - username = os.environ["USER"] - self.domain = f"{username}@{ip_addr}" - self.kernel_workspace = kernel_workspace - - def reboot(self): - logging.debug("Rebooting vm") - - command = ["sudo", "reboot"] - try: - SshCommand.run(domain=self.domain, command=command) - except RuntimeError as e: - if "closed by remote host" in str(e): - pass - - time.sleep(Constants.VM_REBOOT_WAIT_SECONDS) - VmCommand.start(vm_name=self.name) - time.sleep(Constants.VM_STARTUP_WAIT_SECONDS) - - def current_head_sha_long(self): - repo = Repo(self.kernel_workspace.src_worktree.folder) - return repo.head.commit.hexsha - - def current_head_sha_short(self): - return self.current_head_sha_long()[:7] - - def kselftests(self, config): - logging.debug("Running kselftests") - script = str(config.base_path / Path("kernel-src-tree-tools") / Path("kernel_kselftest.sh")) - output_file = self.kernel_workspace.folder.absolute() / Path(f"kselftest-{self.current_head_sha_short()}.log") - ssh_cmd = f"cd {self.kernel_workspace.src_worktree.folder.absolute()} && {script}" - - SshCommand.run_with_output(output_file=output_file, domain=self.domain, command=[ssh_cmd]) - - def expected_kernel_version(self): - kernel_version = SshCommand.running_kernel_version(domain=self.domain) - subversions = kernel_version.split("-") - if len(subversions) < 2: - return False - - # TODO some proper matching versioning here - install_hash = subversions[-1].split("+")[0] - - head_hash = self.current_head_sha_long() - if not head_hash.startswith(install_hash): - return False - - return True - - def build_kernel(self, config): - logging.debug("Building kernel") - build_script = str(config.base_path / Path("kernel-src-tree-tools") / Path("kernel_build.sh")) - output_file = self.kernel_workspace.folder.absolute() / Path( - f"kernel-build-{self.current_head_sha_short()}.log" - ) - ssh_cmd = f"cd {self.kernel_workspace.src_worktree.folder.absolute()} && {build_script} -n" - - SshCommand.run_with_output(output_file=output_file, domain=self.domain, command=[ssh_cmd]) - - def test(self, config): - if self.expected_kernel_version(): - logging.info("Expected running kernel version, no need to build the kernel") - else: - self.build_kernel(config=config) - self.reboot() - - if not self.expected_kernel_version(): - raise RuntimeError("Kernel version is not what we expect") - - self.kselftests(config=config) - - def console(self): - VmCommand.console(vm_name=self.name) +from kt.ktlib.virt import VmCommand +from kt.ktlib.vm import Vm def main(name: str, console: bool, destroy: bool, override: bool, list_all: bool, test: bool = False): @@ -268,20 +12,17 @@ def main(name: str, console: bool, destroy: bool, override: bool, list_all: bool VmCommand.list_all() return - config = Config.load() - kernel_workpath = config.kernels_dir / name - kernel_workspace = KernelWorkspace.load_from_filepath(folder=kernel_workpath) + # If neither test nor console is requested, we just spin up the VM and exit. + # This is useful for starting a VM that you'll interact with manually (e.g., via SSH). + # If this behavior is not desired, consider requiring at least one action flag. - vm = Vm.load(config=config, kernel_workspace=kernel_workspace) if destroy: + vm = Vm.load_from_workspace(name) vm.destroy() return - if override: - vm.destroy() - - vm.setup(config=config) - vm_instance = vm.spin_up(config=config) + vm_instance = Vm.setup_and_spinup(kernel_workspace_name=name, override=override) + config = Config.load() if test: # Wait for the dependencies to be installed diff --git a/kt/data/kernels.yaml b/kt/data/kernels.yaml index 32d82b4..ca17eb5 100644 --- a/kt/data/kernels.yaml +++ b/kt/data/kernels.yaml @@ -8,39 +8,46 @@ kernels: src_tree_branch: ciqcbr7_9 dist_git_root: dist-git-tree-cbr dist_git_branch: cbr79-7 + mock_config: centos-cbr79 lts-8.6: src_tree_root: kernel-src-tree src_tree_branch: ciqlts8_6 dist_git_root: dist-git-tree-lts dist_git_branch: lts86-8 + mock_config: rocky-lts86 lts-9.2: src_tree_root: kernel-src-tree src_tree_branch: ciqlts9_2 dist_git_root: dist-git-tree-lts dist_git_branch: lts92-9 + mock_config: rocky-lts92 lts-9.4: src_tree_root: kernel-src-tree src_tree_branch: ciqlts9_4 dist_git_root: dist-git-tree-lts dist_git_branch: lts94-9 + mock_config: rocky-lts94 + + lts-9.6: + src_tree_root: kernel-src-tree + src_tree_branch: ciqlts9_6 + dist_git_root: dist-git-tree-lts + dist_git_branch: lts96-9 + mock_config: rocky-lts96 fipslegacy-8.6: src_tree_root: kernel-src-tree src_tree_branch: fips-legacy-8-compliant/4.18.0-425.13.1 dist_git_root: dist-git-tree-fips dist_git_branch: fips-compliant8 + mock_config: rocky-lts86-fips fips-9.2: src_tree_root: kernel-src-tree src_tree_branch: fips-9-compliant/5.14.0-284.30.1 dist_git_root: dist-git-tree-fips dist_git_branch: el92-fips-compliant-9 - - lts-9.6: - src_tree_root: kernel-src-tree - src_tree_branch: ciqlts9_6 - dist_git_root: dist-git-tree-lts - dist_git_branch: lts96-9 + mock_config: rocky-fips92 diff --git a/kt/ktlib/command_runner.py b/kt/ktlib/command_runner.py index 730a6f3..2a684b2 100644 --- a/kt/ktlib/command_runner.py +++ b/kt/ktlib/command_runner.py @@ -16,7 +16,7 @@ def _build_command(cls, **kwargs) -> list[str]: raise NotImplementedError @classmethod - def run(cls, **kwargs) -> str: + def run(cls, cwd=None, **kwargs) -> str: full_command = cls._build_command(**kwargs) logging.info(f"Running command {full_command}") @@ -25,6 +25,7 @@ def run(cls, **kwargs) -> str: text=True, capture_output=True, check=False, + cwd=cwd, ) if result.returncode != 0: raise RuntimeError(result.stderr) @@ -32,13 +33,18 @@ def run(cls, **kwargs) -> str: return result.stdout @classmethod - def run_with_output(cls, output_file: str, **kwargs): + def run_with_output(cls, output_file: str, cwd=None, **kwargs): full_command = cls._build_command(**kwargs) logging.info(f"Running command {full_command}") # Run the command and stream output process = subprocess.Popen( - full_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, bufsize=1 + full_command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + bufsize=1, + cwd=cwd, ) # Read and display output line by line diff --git a/kt/ktlib/kernel_workspace.py b/kt/ktlib/kernel_workspace.py index 3257712..9ea720e 100644 --- a/kt/ktlib/kernel_workspace.py +++ b/kt/ktlib/kernel_workspace.py @@ -100,6 +100,30 @@ def push(self, force: bool = False): repo.git.push(*args) + def check_git_config_value(self, section, option): + """ + Get a git config value from repo or global config. + + Args: + section: Config section (e.g., 'user') + option: Config option (e.g., 'name') + + Returns: + The config value, or None if not found + """ + repo = Repo(self.folder) + try: + # Try repo-specific config first + return repo.config_reader().get_value(section, option) + except Exception: + pass + + try: + # Fall back to global config + return repo.config_reader("global").get_value(section, option) + except Exception: + return None + @dataclass class KernelWorkspace: @@ -125,6 +149,22 @@ def load_from_filepath(cls, folder: Path): src_worktree=src_worktree, ) + @classmethod + def load_from_name(cls, kernel_workspace_name: str): + """ + Load a kernel workspace by name. + + Args: + kernel_workspace_name: The name of the kernel workspace (e.g., 'lts-9.4') + + Returns: + KernelWorkspace + """ + config = Config.load() + kernel_workpath = config.kernels_dir / kernel_workspace_name + workspace = cls.load_from_filepath(folder=kernel_workpath) + return workspace + @classmethod def load(cls, name: str, config: Config, kernel_info: KernelInfo, extra: str): if extra: diff --git a/kt/ktlib/kernels.py b/kt/ktlib/kernels.py index a881d44..b29f077 100644 --- a/kt/ktlib/kernels.py +++ b/kt/ktlib/kernels.py @@ -21,6 +21,7 @@ class KernelInfo: src_tree_branch: the corresponding branch in the source tree dist_git_root: rocky staging rpm repo dist_git_branch: corresponding branch in the rocky staging rpm repo + mock_config: mock configuration name for building RPMs The src_tree_root and dist_git_root contain absolute paths to the local clone of these repos and their corresponding remote url. @@ -34,6 +35,8 @@ class KernelInfo: dist_git_root: RepoInfo dist_git_branch: str + mock_config: str + @dataclass class KernelsInfo: diff --git a/kt/ktlib/local.py b/kt/ktlib/local.py new file mode 100644 index 0000000..3d62e04 --- /dev/null +++ b/kt/ktlib/local.py @@ -0,0 +1,12 @@ +from kt.ktlib.command_runner import CommandRunner + + +class LocalCommand(CommandRunner): + """ + Command runner for local commands. + """ + + @classmethod + def _build_command(cls, command: list[str]) -> list[str]: + """Return the command as-is for local execution.""" + return command diff --git a/kt/ktlib/mock.py b/kt/ktlib/mock.py new file mode 100644 index 0000000..f05bb1c --- /dev/null +++ b/kt/ktlib/mock.py @@ -0,0 +1,241 @@ +import logging +import os +from dataclasses import dataclass +from pathlib3x import Path + +from kt.ktlib.config import Config +from kt.ktlib.kernel_workspace import KernelWorkspace +from kt.ktlib.kernels import KernelsInfo +from kt.ktlib.local import LocalCommand + + +@dataclass +class MockConfig: + """ + Represents a mock configuration for building RPMs. + + Attributes: + config_path: Path to the mock config file to use + is_temporary: Whether this config is temporary and needs cleanup + kernel_workspace: The kernel workspace name + """ + + config_path: Path + is_temporary: bool + kernel_workspace: str + + def cleanup(self): + """Remove temporary mock config if it was created.""" + if self.is_temporary and self.config_path.exists(): + try: + self.config_path.unlink() + logging.info("Removed temporary mock config") + except OSError as e: + logging.warning(f"Failed to remove temporary mock config: {e}") + + +class Mock: + """Mock build system operations.""" + + @classmethod + def verify_prerequisites(cls): + """ + Verify mock is installed and user is in mock group. + + Raises: + RuntimeError: If mock is not installed or user is not in mock group + """ + # Verify mock command is available + try: + mock_path = LocalCommand.run(command=["which", "mock"]).strip() + logging.info(f"mock command found at: {mock_path}") + except RuntimeError: + raise RuntimeError("mock command not found. Please install mock:\n sudo dnf install mock") + + # Verify current user is in mock group + try: + groups_output = LocalCommand.run(command=["groups"]) + groups = groups_output.strip().split() + if "mock" not in groups: + raise RuntimeError( + "Current user is not in the mock group. Please add yourself to the mock group:\n" + " sudo usermod -a -G mock $USER\n" + "Then log out and log back in for the group change to take effect." + ) + logging.info("User is in mock group") + except RuntimeError as e: + raise RuntimeError(f"Failed to check user groups: {e}") + + @classmethod + def verify_depot_credentials(cls): + """ + Verify DEPOT_USER and DEPOT_TOKEN environment variables are set. + + Returns: + Tuple of (depot_user, depot_token) + + Raises: + RuntimeError: If either environment variable is not set + """ + depot_user = os.environ.get("DEPOT_USER") + depot_token = os.environ.get("DEPOT_TOKEN") + + if not depot_user: + raise RuntimeError( + "DEPOT_USER environment variable is not set. Please set it:\n export DEPOT_USER=your_username" + ) + if not depot_token: + raise RuntimeError( + "DEPOT_TOKEN environment variable is not set. Please set it:\n export DEPOT_TOKEN=your_token" + ) + logging.info("DEPOT_USER and DEPOT_TOKEN environment variables are set") + + return depot_user, depot_token + + @classmethod + def prepare_config(cls, kernel_workspace: str, kernel_workspace_obj: KernelWorkspace, config: Config) -> MockConfig: + """ + Prepare mock configuration for building. + + For CBR kernels, uses the config directly. + For depot-based kernels, creates a temporary config with credentials replaced. + + Args: + kernel_workspace: The name of the kernel workspace (e.g., 'lts-9.4') + kernel_workspace_obj: The kernel workspace object + config: The configuration object + + Returns: + MockConfig object with the path to the config to use + + Raises: + RuntimeError: If config not found or kernel workspace is unknown + """ + # Get the mock config name from kernels.yaml + kernels_info = KernelsInfo.from_yaml(config) + if kernel_workspace not in kernels_info.kernels: + raise RuntimeError( + f"Unknown kernel workspace: {kernel_workspace}\n" + f"Supported workspaces: {', '.join(sorted(kernels_info.kernels.keys()))}" + ) + + kernel_info = kernels_info.kernels[kernel_workspace] + mock_config_base = kernel_info.mock_config + + # CBR is a special case - no depot config, no credential replacement needed + is_cbr = kernel_workspace.startswith("cbr-") + + if is_cbr: + mock_config_name = f"{mock_config_base}-x86_64.cfg" + else: + mock_config_name = f"{mock_config_base}-depot-x86_64.cfg" + + # Find the mock config file + mock_configs_dir = config.base_path / "mock-configs" + mock_config_source = mock_configs_dir / mock_config_name + + if not mock_config_source.exists(): + raise RuntimeError( + f"Mock config not found: {mock_config_source}\nExpected to find {mock_config_name} in {mock_configs_dir}" + ) + + logging.info(f"Found mock config: {mock_config_source}") + + # For CBR, use the config directly. For others, create temp config with credentials + if is_cbr: + logging.info("Using CBR mock config directly (no depot credentials needed)") + return MockConfig(config_path=mock_config_source, is_temporary=False, kernel_workspace=kernel_workspace) + else: + # Verify depot credentials are set + depot_user, depot_token = cls.verify_depot_credentials() + + # Create a temporary mock config with DEPOT credentials replaced + temp_mock_config = kernel_workspace_obj.folder / f"temp_{mock_config_name}" + + logging.info(f"Creating temporary mock config: {temp_mock_config}") + + with open(mock_config_source, "r") as src: + config_content = src.read() + + # Replace DEPOT_USER and DEPOT_TOKEN + config_content = config_content.replace("DEPOT_USER", depot_user) + config_content = config_content.replace("DEPOT_TOKEN", depot_token) + + with open(temp_mock_config, "w") as dest: + dest.write(config_content) + + logging.info("Temporary mock config created with credentials") + return MockConfig(config_path=temp_mock_config, is_temporary=True, kernel_workspace=kernel_workspace) + + @classmethod + def build_srpm(cls, mock_config: MockConfig, build_files_dir: Path, dist_git_path: Path, output_log: Path): + """ + Build SRPM using mock. + + Args: + mock_config: The mock configuration to use + build_files_dir: Directory where build results will be placed + dist_git_path: Path to dist-git repository (working directory) + output_log: Path to write build output + + Raises: + RuntimeError: If SRPM build fails + """ + logging.info("Building SRPM with mock...") + logging.info(f"Mock SRPM build output will be written to {output_log}") + + mock_cmd = [ + "mock", + "-v", + "-r", + str(mock_config.config_path), + f"--resultdir={build_files_dir}", + "--buildsrpm", + "--spec=SPECS/kernel.spec", + "--sources=SOURCES", + ] + + try: + LocalCommand.run_with_output(command=mock_cmd, output_file=str(output_log), cwd=dist_git_path) + logging.info("SRPM build completed successfully") + except RuntimeError as e: + logging.error(f"Mock SRPM build failed with exit code {e}") + logging.error(f"Check {output_log} for details") + raise RuntimeError(f"Mock SRPM build failed. See {output_log} for details") + + @classmethod + def build_rpms( + cls, mock_config: MockConfig, srpm_file: Path, build_files_dir: Path, dist_git_path: Path, output_log: Path + ): + """ + Build binary RPMs from SRPM using mock. + + Args: + mock_config: The mock configuration to use + srpm_file: Path to the source RPM file + build_files_dir: Directory where build results will be placed + dist_git_path: Path to dist-git repository (working directory) + output_log: Path to write build output + + Raises: + RuntimeError: If RPM build fails + """ + logging.info("Building binary RPMs with mock...") + logging.info(f"Mock binary RPM build output will be written to {output_log}") + + mock_build_cmd = [ + "mock", + "-v", + "-r", + str(mock_config.config_path), + f"--resultdir={build_files_dir}", + str(srpm_file), + ] + + try: + LocalCommand.run_with_output(command=mock_build_cmd, output_file=str(output_log), cwd=dist_git_path) + logging.info("Binary RPM build completed successfully") + except RuntimeError as e: + logging.error(f"Mock binary RPM build failed with exit code {e}") + logging.error(f"Check {output_log} for details") + raise RuntimeError(f"Mock binary RPM build failed. See {output_log} for details") diff --git a/kt/ktlib/vm.py b/kt/ktlib/vm.py new file mode 100644 index 0000000..f526ccb --- /dev/null +++ b/kt/ktlib/vm.py @@ -0,0 +1,373 @@ +from __future__ import annotations + +import logging +import os +import time +from dataclasses import dataclass + +import oyaml as yaml +import wget +from git import Repo +from pathlib3x import Path + +from kt.ktlib.config import Config +from kt.ktlib.kernel_workspace import KernelWorkspace +from kt.ktlib.local import LocalCommand +from kt.ktlib.ssh import SshCommand +from kt.ktlib.util import Constants +from kt.ktlib.virt import VirtHelper, VmCommand + +# TODO move this to a separate repo +CLOUD_INIT_BASE_PATH = Path(__file__).parent.parent.joinpath("data/cloud_init.yaml") + + +@dataclass +class Vm: + """ + Class that represents a virtual machine. + + Attributes: + name: name of the vm + + qcow2_source_path: qcow2 path to the vm image used as source + vm_major_version: major vm version (9 for Rocky 9) + qcow2_path: the qcow2 path of the vm image copied from qcow2_source_path + cloud_init_path: cloud_init.yaml config, adapted from data/cloud_init.yaml + """ + + qcow2_source_path: Path + vm_major_version: str + qcow2_path: Path + cloud_init_path: Path + name: str + kernel_workspace: KernelWorkspace + + @classmethod + def load(cls, config: Config, kernel_workspace: KernelWorkspace): + kernel_workspace_str = kernel_workspace.folder.name + kernel_name = cls._extract_kernel_name(kernel_workspace_str) + vm_major_version = cls._extract_major(kernel_name) + + # Image source paths construction + qcow2_source_path = config.images_source_dir / Path(cls._qcow2_name(vm_major_version=vm_major_version)) + + # Actual current image paths construction + work_dir = config.images_dir / Path(kernel_workspace_str) + qcow2_path = work_dir / Path(f"{kernel_workspace_str}.qcow2") + cloud_init_path = work_dir / Path(Constants.CLOUD_INIT) + + return cls( + qcow2_source_path=qcow2_source_path, + vm_major_version=vm_major_version, + qcow2_path=qcow2_path, + cloud_init_path=cloud_init_path, + name=kernel_workspace_str, + kernel_workspace=kernel_workspace, + ) + + @classmethod + def _extract_kernel_name(cls, kernel_workspace): + # _ --> where kernel does not contain any '_' + return kernel_workspace.split("_")[0] + + @classmethod + def _extract_major(cls, full_version): + # lts-9.4 --> return 9 + return full_version.split("-")[-1].split(".")[0] + + @classmethod + def _qcow2_name(cls, vm_major_version: str): + return f"{Constants.DEFAULT_VM_BASE}-{vm_major_version}-{Constants.QCOW2_TRAIL}" + + @classmethod + def load_from_workspace(cls, kernel_workspace_name: str): + """ + Load VM from a kernel workspace name. + + Args: + kernel_workspace_name: The name of the kernel workspace + + Returns: + Vm: The VM object + """ + config = Config.load() + kernel_workpath = config.kernels_dir / kernel_workspace_name + kernel_workspace = KernelWorkspace.load_from_filepath(folder=kernel_workpath) + return cls.load(config=config, kernel_workspace=kernel_workspace) + + @classmethod + def setup_and_spinup(cls, kernel_workspace_name: str, override: bool = False): + """ + Setup and spin up a VM from a kernel workspace name. + + Args: + kernel_workspace_name: The name of the kernel workspace + override: If True, destroy and recreate the VM + + Returns: + VmInstance: The running VM instance + """ + vm = cls.load_from_workspace(kernel_workspace_name) + config = Config.load() + + if override: + vm.destroy() + + vm.setup(config=config) + vm_instance = vm.spin_up(config=config) + + return vm_instance + + def _get_vm_url(self): + return f"{Constants.BASE_URL}/{self.vm_major_version}/images/x86_64/{self.qcow2_source_path.name}" + + def _download_source_image(self): + if self.qcow2_source_path.exists(): + logging.info(f"Image {self.qcow2_source_path} already exists, nothing to do") + return + + # Make sure the folder exists + self.qcow2_source_path.parent.mkdir(parents=True, exist_ok=True) + + logging.info("Downloading image") + wget.download(self._get_vm_url(), out=str(self.qcow2_source_path.parent)) + + def _setup_cloud_init(self, config: Config): + data = None + with open(CLOUD_INIT_BASE_PATH) as f: + data = yaml.safe_load(f) + + # replace placeholders with user data + data["users"][0]["name"] = os.environ["USER"] + + # password remains the default for now + data["chpasswd"]["list"][0] = f"{os.environ['USER']}:test" + + # ssh key + with open(config.ssh_key) as f: + ssh_key_content = f.read().strip() + data["users"][0]["ssh_authorized_keys"][0] = ssh_key_content + + data["mounts"][0][1] = str(config.base_path.absolute()) + data["mounts"][1][0] = str(config.base_path.absolute()) + + # Go to the working directory of the kernel + working_dir = config.kernels_dir / Path(self.name) + data["write_files"][0]["content"] = f"cd {str(working_dir)}" + + # Because $HOME is the same as the host, during boot, cloud-init + # sees the home dir already exists and root remains the owner + # change it to $USER + data["runcmd"][0][1] = f"{os.environ['USER']}:{os.environ['USER']}" + data["runcmd"][0][2] = os.environ["HOME"] + + # Install packages needed later + data["runcmd"].append([str(config.base_path / Path("kernel-src-tree-tools") / Path("kernel_install_dep.sh"))]) + # Write this to image cloud_init + with open(self.cloud_init_path, "w") as f: + f.write("#cloud-config\n") + yaml.dump(data, f) + + def _create_image(self, config: Config): + # Make sure the dir exists + self.qcow2_path.parent.mkdir(parents=True, exist_ok=True) + + self._setup_cloud_init(config=config) + # Copy qcow2 image to work dir + self.qcow2_source_path.copy(self.qcow2_path) + + # Resize the disk to 30GB + self._resize_disk() + + self._virt_install(config=config) + time.sleep(Constants.VM_STARTUP_WAIT_SECONDS) + + def _virt_install(self, config: Config): + return VmCommand.install( + name=self.name, + qcow2_path=self.qcow2_path, + vm_major_version=self.vm_major_version, + cloud_init_path=self.cloud_init_path, + common_dir=config.base_path, + ) + + def _resize_disk(self): + """Resize the qcow2 disk image to 30GB.""" + logging.info(f"Resizing disk {self.qcow2_path} to 30G") + try: + LocalCommand.run(command=["qemu-img", "resize", str(self.qcow2_path), "30G"]) + except RuntimeError as e: + raise RuntimeError(f"Failed to resize disk image: {e}") + + def setup(self, config: Config): + self._download_source_image() + + def spin_up(self, config: Config) -> VmInstance: + if not VirtHelper.exists(vm_name=self.name): + logging.info(f"VM {self.name} does not exist, creating from scratch...") + + self._create_image(config=config) + return VmInstance(name=self.name, kernel_workspace=self.kernel_workspace) + + logging.info(f"Vm {self.name} already exists") + + if VirtHelper.is_running(vm_name=self.name): + logging.info(f"Vm {self.name} is running, nothing to do") + return VmInstance(name=self.name, kernel_workspace=self.kernel_workspace) + + logging.info(f"Vm {self.name} is not running, starting it") + VmCommand.start(vm_name=self.name) + time.sleep(Constants.VM_STARTUP_WAIT_SECONDS) + + return VmInstance(name=self.name, kernel_workspace=self.kernel_workspace) + + def destroy(self): + if VirtHelper.is_running(vm_name=self.name): + VmCommand.destroy(vm_name=self.name) + + if VirtHelper.exists(vm_name=self.name): + VmCommand.undefine(vm_name=self.name) + + # remove its folder that contains the qcow2 image and cloud-init config + self.qcow2_path.parent.rmtree(ignore_errors=True) + + +class VmInstance: + name: str + ssh_domain: str + kernel_workspace: KernelWorkspace + + def __init__(self, name: str, kernel_workspace: KernelWorkspace): + self.name = name + ip_addr = VirtHelper.ip_addr(vm_name=self.name) + username = os.environ["USER"] + self.domain = f"{username}@{ip_addr}" + self.kernel_workspace = kernel_workspace + + def reboot(self): + logging.debug("Rebooting vm") + + command = ["sudo", "reboot"] + try: + SshCommand.run(domain=self.domain, command=command) + except RuntimeError as e: + if "closed by remote host" in str(e): + pass + + time.sleep(Constants.VM_REBOOT_WAIT_SECONDS) + VmCommand.start(vm_name=self.name) + time.sleep(Constants.VM_STARTUP_WAIT_SECONDS) + + def current_head_sha_long(self): + repo = Repo(self.kernel_workspace.src_worktree.folder) + return repo.head.commit.hexsha + + def current_head_sha_short(self): + return self.current_head_sha_long()[:7] + + def kselftests(self, config): + """Run kselftests from source tree.""" + logging.debug("Running kselftests") + script = str(config.base_path / Path("kernel-src-tree-tools") / Path("kernel_kselftest.sh")) + output_file = self.kernel_workspace.folder.absolute() / Path(f"kselftest-{self.current_head_sha_short()}.log") + ssh_cmd = f"cd {self.kernel_workspace.src_worktree.folder.absolute()} && {script}" + + SshCommand.run_with_output(output_file=output_file, domain=self.domain, command=[ssh_cmd]) + + def kselftests_internal(self, output_file: Path): + """ + Run installed kselftests from /usr/libexec/kselftests (for official releases). + + Args: + output_file: Path to write kselftest output + + Raises: + RuntimeError: If kselftests fail + """ + logging.info("Running kernel selftests...") + logging.info(f"Kselftest output will be written to {output_file}") + + kselftest_cmd = "sudo /usr/libexec/kselftests/run_kselftest.sh" + + try: + SshCommand.run_with_output(output_file=output_file, domain=self.domain, command=[kselftest_cmd]) + logging.info("Kselftests completed successfully") + except RuntimeError as e: + logging.error(f"Kselftests failed: {e}") + logging.error(f"Check {output_file} for details") + raise RuntimeError(f"Kselftests failed. See {output_file} for details") + + @staticmethod + def count_kselftest_passed(log_file: Path): + """ + Count the number of passed tests in a kselftest log file. + + Args: + log_file: Path to the kselftest log file + + Returns: + int or None: Number of passed tests, or None if counting failed + """ + try: + with open(log_file, "r") as f: + passed_tests = sum(1 for line in f if line.startswith("ok")) + logging.info(f"Kselftests passed: {passed_tests} tests") + return passed_tests + except OSError as e: + logging.warning(f"Could not count passed tests: {e}") + return None + + def running_kernel_version(self): + """ + Get the running kernel version from the VM. + + Returns: + str: The kernel version string (e.g., "5.14.0-284.30.1+23.1.el9_2_ciq.x86_64") + """ + return SshCommand.running_kernel_version(domain=self.domain) + + def expected_kernel_version(self): + """ + Check if running kernel matches the current HEAD sha (for unofficial builds). + + Returns: + bool: True if running kernel was built from current HEAD + """ + kernel_version = self.running_kernel_version() + subversions = kernel_version.split("-") + if len(subversions) < 2: + return False + + # TODO some proper matching versioning here + install_hash = subversions[-1].split("+")[0] + + head_hash = self.current_head_sha_long() + if not head_hash.startswith(install_hash): + return False + + return True + + def build_kernel(self, config): + logging.debug("Building kernel") + build_script = str(config.base_path / Path("kernel-src-tree-tools") / Path("kernel_build.sh")) + output_file = self.kernel_workspace.folder.absolute() / Path( + f"kernel-build-{self.current_head_sha_short()}.log" + ) + ssh_cmd = f"cd {self.kernel_workspace.src_worktree.folder.absolute()} && {build_script} -n" + + SshCommand.run_with_output(output_file=output_file, domain=self.domain, command=[ssh_cmd]) + + def test(self, config): + if self.expected_kernel_version(): + logging.info("Expected running kernel version, no need to build the kernel") + else: + self.build_kernel(config=config) + self.reboot() + + if not self.expected_kernel_version(): + raise RuntimeError("Kernel version is not what we expect") + + self.kselftests(config=config) + + def console(self): + VmCommand.console(vm_name=self.name)