new commit

This commit is contained in:
Manuel 2025-04-27 16:44:09 +00:00
parent 3eee4490f5
commit bb408cba9c
3 changed files with 237 additions and 159 deletions

View File

@ -0,0 +1,159 @@
# builder/core/bootstrap.py
import os
import logging
import platform
import sys # Added import
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
try:
# Assuming paths and logger_config are available through your project's structure
# If paths is a dict defined elsewhere, you might need to pass it directly
# from builder.core.bootstrap.paths import paths # Removed direct import, paths should be passed
from builder.configs import logger_config
# Assuming run_command is in builder.core.command_runner.py
from buider.core.command_runner import run_command
except ImportError as e:
# Log the error before exiting
logging.error(f"Error importing necessary modules: {e}. Ensure your environment is set up correctly.")
# Provide more specific details if possible, though 'e' usually has them
# logging.error(f"Details: {e}") # 'e' is already in the f-string above
sys.exit(1) # Exit if essential modules can't be imported
# Setup logger specifically for this module
logger = logger_config.setup_logger('bootstrap')
def filter_repositories_by_architecture(repositories_data, target_architecture):
"""
Filters the list of repositories to include only those supporting the target architecture.
Args:
repositories_data (list): A list of repository dictionaries,
each potentially having an 'architectures' key.
target_architecture (str): The target architecture (e.g., 'aarch64', 'i686', 'x86_64').
Returns:
list: A list of '-R <uri>' strings for the relevant repositories.
"""
filtered_repo_args = []
for repo in repositories_data:
# If 'architectures' key exists and target_architecture is in the list,
# or if 'architectures' key does not exist (assume it applies to all architectures).
# Also ensure 'uri' key exists.
if 'uri' in repo and ('architectures' not in repo or target_architecture in repo['architectures']):
filtered_repo_args.extend(["-R", repo['uri']])
logger.debug(f"Including repository {repo['uri']} for architecture {target_architecture}")
else:
if 'uri' not in repo:
logger.warning(f"Repository entry missing 'uri' key: {repo}. Skipping.")
else:
logger.debug(f"Excluding repository {repo['uri']} for architecture {target_architecture}")
return filtered_repo_args
def run_bootstrap_for_environment(env_name, paths, target_architecture, host_architecture, repositories_data, all_bootstrap_packages):
"""
Executes the xbps-install bootstrap command for a specific environment (rootfs, pep-host, pep-target).
Handles architecture-specific repository filtering and execution in a chroot
for cross-architecture builds.
Args:
env_name (str): The name of the environment to bootstrap ('rootfs', 'pep-host', 'pep-target').
paths (dict): Dictionary of build paths.
target_architecture (str): The target architecture for the bootstrap.
host_architecture (str): The architecture of the host system.
repositories_data (list): List of repository dictionaries from YAML config.
Expected structure: [{'name': '...', 'uri': '...', 'architectures': [...]}, ...]
all_bootstrap_packages (dict): Dictionary containing package lists for
different environments and architectures.
Expected structure: {'env_name': {'arch': ['pkg1', ...]}}
Raises:
subprocess.CalledProcessError: If the xbps-install command fails.
ValueError: If an unknown environment name is provided.
"""
logger.info(f"=> Executing bootstrap command for {env_name.upper()} ({target_architecture})...")
# Determine the target directory based on environment name
target_directory_path = None
if env_name == "rootfs":
target_directory_path = paths.get("ROOTFS")
elif env_name == "pep-host":
target_directory_path = paths.get("PEPHOSTDIR")
elif env_name == "pep-target":
target_directory_path = paths.get("PEPTARGETDIR")
else:
logger.error(f"Unknown bootstrap environment: {env_name}")
raise ValueError(f"Unknown bootstrap environment: {env_name}")
if not target_directory_path:
logger.error(f"Path for environment '{env_name}' not found in paths dictionary.")
raise KeyError(f"Path for environment '{env_name}' not found.")
# Get the list of packages for this environment and target architecture
# Assumes all_bootstrap_packages has structure {'env_name': {'arch': [...]}}
packages_to_install = all_bootstrap_packages.get(env_name, {}).get(target_architecture, [])
if not packages_to_install:
logger.warning(f"No packages defined for {env_name} on architecture {target_architecture}. Skipping bootstrap.")
return
# Filter repository URLs for the target architecture
filtered_repo_args = filter_repositories_by_architecture(repositories_data, target_architecture)
if not filtered_repo_args:
logger.error(f"No repositories found supporting target architecture: {target_architecture}. Cannot bootstrap {env_name}.")
raise ValueError(f"No repositories found for architecture {target_architecture}")
# Get the XBPS cache directory for the target architecture (on the host)
# Assumes paths['XBPS_CACHEDIR_<ARCH_UPPER>'] is defined or we construct it
# Construct based on target arch as it's the target's packages being cached
host_cachedir = os.path.join(os.getcwd(), "xbps_package_cache", target_architecture)
# Ensure the cache directory exists
os.makedirs(host_cachedir, exist_ok=True)
logger.debug(f"Using XBPS cache directory (on host): {host_cachedir}")
# Construct the base xbps-install command arguments
xbps_command_args = [
"-S", "-y", # Sync and auto-yes
] + filtered_repo_args + [
"-r", target_directory_path, # Install to the target rootfs/directory (path on host)
"-c", host_cachedir,
] + packages_to_install
# Determine the command execution method (direct or via chroot)
command_list = []
# Execute inside a chroot for ALL architectures for consistency and to handle
# INSTALL scripts correctly, leveraging QEMU on foreign arches.
logger.info(f"-> Running xbps-install for {env_name} in chroot...")
# Note: The chroot command path is just 'chroot', relying on it being in the host's PATH
# '/usr/bin/chroot' could be used for absolute path.
command_list = [
"sudo", "chroot", target_directory_path, # Execute command inside this directory (path on host)
"/usr/bin/xbps-install", # The xbps-install binary *inside* the chroot (path inside chroot)
# Pass the rest of the xbps_command_args as arguments to xbps-install inside the chroot
# Arguments like -r and -c need paths that are correct from the *host's* perspective,
# which run_command handles by passing them literally.
# The chroot environment needs /proc, /sys, /dev mounted for xbps and scripts.
# The main script's mount_essential_filesystems_in_chroot should handle this *before*
# calling this bootstrap function for rootfs. For pep-host/pep-target, manual mounts
# inside this function or ensuring they are handled before might be needed if
# they rely on those filesystems. Let's assume essential mounts are handled elsewhere
# or not strictly needed for these specific xbps-install steps.
] + xbps_command_args
# Execute the command
# The run_command function should handle subprocess execution and error checking
logger.info(f"Executing command: {' '.join(command_list)}")
# Assuming run_command handles potential CalledProcessError and raises it
run_command(command_list)
logger.info(f"=> Void Linux {env_name.upper()} bootstrap COMPLETE.")

View File

@ -50,35 +50,3 @@ def load_repositories_from_yaml(yaml_file_path):
except Exception as e:
return {}
def generate_bootstrap_commands(paths, architecture, repositories, bootstrap_packages):
"""Generates xbps-install bootstrap commands for different environments (rootfs, pep-host, pep-target)."""
bootstrap_commands = []
chroot_paths = [paths['ROOTFS'], paths['PEPHOSTDIR'], paths['PEPTARGETDIR']]
targets = ["base-system", "void-host-sys", "void-target-sys"]
cache_dir = os.path.join(paths["ISO_CACHE_DIR"], architecture)
for i, chroot_path in enumerate(chroot_paths):
repo_args = ""
for repo_dict in repositories:
repo_url = repo_dict['uri']
repo_args += f"-R {repo_url} "
command_list = ['xbps-install', '-S', '-y']
repo_urls = repo_args.strip().split(" ")
command_list.extend(repo_urls)
command_list.extend(['-r', chroot_path])
command_list.extend(['-c', cache_dir])
command_list.extend(bootstrap_packages[targets[i]])
bootstrap_commands.append(command_list)
return bootstrap_commands
if __name__ == "__main__":
yaml_repos_file = paths['REPOS_YAML_FILE']
bootstrap_commands = generate_bootstrap_commands(yaml_repos_file)
print("xbps-install commands generated for bootstrap:\n")
for cmd in bootstrap_commands:
print(cmd)
print("\n")

View File

@ -24,7 +24,6 @@ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
try:
from builder.core.bootstrap.fusato_structure import create_fusato_structure
from builder.core.bootstrap.yaml_repo_loader import generate_bootstrap_commands
from builder.core.command_runner import run_command
from builder.core.bootstrap.paths import paths
from builder.core.bootstrap import copy_system_files
@ -32,7 +31,7 @@ try:
from builder.core.bootstrap import yaml_repo_loader
from builder.core.xbps_commands import xbps_commands
from builder.core.initramfs_builder import create_initramfs, copy_kernel_image
from core.iso_generator import iso_generator_main
from builder.core.iso_generator import iso_generator_main
from builder.core.rootfs_cleanup import cleanup_rootfs, post_install_cleanup
from builder.core import bootloaders
from builder.core.squashfs import create_squashfs_image
@ -47,6 +46,7 @@ try:
from builder.core.bootstrap.cache import get_or_download_package, sync_repositories
from builder.core.final_cleanup import remove_fusato_directory
from builder.core.move_iso import move_and_cleanup_iso
from builder.core.bootstrap import bootstrap
except ImportError as e:
print(f"Error importing necessary modules: {e}. Please ensure all dependencies are installed.")
sys.exit(1)
@ -75,17 +75,11 @@ def iso_builder_main(
boot_type
):
"""Main function to execute the entire ISO build process."""
create_fusato_structure()
build_rootfs = True
build_peptarget = True
build_pephost = True
rootfs_path = paths["ROOTFS"]
boot_path = paths["BOOT_DIR"]
pep_target_path = paths["PEPTARGETDIR"]
pephost_path = paths["PEPHOSTDIR"]
logger.info(f"=> Starting ISO build for: Architecture={architecture}, Desktop={desktop}, Kernel={kernel_type}")
logger.info("=> Initiating the ISO building process...")
@ -99,15 +93,15 @@ def iso_builder_main(
os.environ["XBPS_CACHEDIR"] = xbps_cachedir_env
os.environ["XBPS_HOST_CACHEDIR"] = xbps_host_cachedir_env
os.environ["XBPS_ARCH"] = architecture
logger.info(f"=> Cache directory for the target architecture: {os.environ.get('XBPS_CACHEDIR')}")
logger.info(f"=> Cache directory for the host architecture: {os.environ.get('XBPS_HOST_CACHEDIR')}")
logger.info(f"=> XBPS_CACHEDIR (Target): {os.environ.get('XBPS_CACHEDIR')}")
logger.info(f"=> XBPS_HOST_CACHEDIR (Host): {os.environ.get('XBPS_HOST_CACHEDIR')}")
logger.info(f"=> XBPS_ARCH (Target): {os.environ.get('XBPS_ARCH')}")
logger.info("=> Copying Void keys to ROOTFS...")
logger.info("=> Copying Void keys to ROOTFS, PEP-HOST, and PEP-TARGET...")
copy_system_files.copy_void_keys_python(paths['ROOTFS'])
copy_system_files.copy_void_keys_python(paths['PEPHOSTDIR'])
copy_system_files.copy_void_keys_python(paths['PEPTARGETDIR'])
logger.info("=> Void keys copied to ROOTFS, PEPHOSTDIR, and PEPTARGETDIR.")
logger.info("=> Void keys copied.")
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
version_file_path = os.path.join(base_dir, "version")
@ -122,76 +116,44 @@ def iso_builder_main(
logger.error(f"=> Error reading version file: {e}. Using default version.")
version = "0.0.0"
iso_filename = f"{iso_name_base}-{architecture}-{kernel_type}-{version}.iso"
iso_build_dir = paths["ISO_OUTPUT_DIR"]
iso_path = os.path.join(iso_build_dir, iso_filename)
logger.info(f"=> ISO filename will be: {iso_path}")
paths['GRUB_EFI_DIR'] = os.path.join(paths['ISO_BASE_DIR'], "grub_efi")
paths['ISOLINUX_DIR'] = os.path.join(paths['ISO_BASE_DIR'], "isolinux")
sync_repositories(architecture, repos_yaml_file)
iso_filename_info = f"{iso_name_base}-{architecture}-{kernel_type}-{version}.iso"
logger.info(f"=> ISO filename will be: {os.path.join(paths['ISO_OUTPUT_DIR'], iso_filename_info)}")
repo_data = yaml_repo_loader.load_repositories_from_yaml(repos_yaml_file)
repositories = repo_data['repositories']
bootstrap_packages = repo_data['bootstrap_packages']
repositories = repo_data.get('repositories', [])
all_bootstrap_packages = repo_data.get('bootstrap_packages', {})
if build_rootfs:
rootfs_path = paths["ROOTFS"]
logger.info("=> Executing bootstrap command for ROOTFS...")
bootstrap_commands = generate_bootstrap_commands(paths, architecture, repositories, bootstrap_packages)
for category, packages in bootstrap_packages.items():
if isinstance(packages, list):
for package in packages:
get_or_download_package(package, architecture, repos_yaml_file)
elif isinstance(packages, str):
get_or_download_package(packages, architecture, repos_yaml_file)
for command_list in bootstrap_commands:
target_path = command_list[-2]
command_str = ' '.join(command_list)
logger.info(f"=> Executing command [TARGETED TO: {target_path}]: {command_str}")
run_command(command_list)
bootstrap.run_bootstrap_for_environment(
"rootfs",
paths,
architecture,
host_arch,
repositories,
all_bootstrap_packages
)
logger.info("=> Void Linux ROOTFS bootstrap COMPLETE.")
if build_pephost:
pephost_path = paths["PEPHOSTDIR"]
logger.info("=> Executing bootstrap command for PEP-HOST...")
bootstrap_commands = generate_bootstrap_commands(paths, architecture, repositories, bootstrap_packages)
for category, packages in bootstrap_packages.items():
if isinstance(packages, list):
for package in packages:
get_or_download_package(package, architecture, repos_yaml_file)
elif isinstance(packages, str):
get_or_download_package(packages, architecture, repos_yaml_file)
for command_list in bootstrap_commands:
target_path = command_list[-2]
command_str = ' '.join(command_list)
logger.info(f"=> Executing command [TARGETED TO: {target_path}]: {command_str}")
run_command(command_list)
bootstrap.run_bootstrap_for_environment(
"pep-host",
paths,
architecture,
host_arch,
repositories,
all_bootstrap_packages
)
logger.info("=> Void Linux PEP-HOST bootstrap COMPLETE.")
if build_peptarget:
peptarget_path = paths["PEPTARGETDIR"]
logger.info("=> Executing bootstrap command for PEP-TARGET...")
bootstrap_commands = generate_bootstrap_commands(paths, architecture, repositories, bootstrap_packages)
for category, packages in bootstrap_packages.items():
if isinstance(packages, list):
for package in packages:
get_or_download_package(package, architecture, repos_yaml_file)
elif isinstance(packages, str):
get_or_download_package(packages, architecture, repos_yaml_file)
for command_list in bootstrap_commands:
target_path = command_list[-2]
command_str = ' '.join(command_list)
logger.info(f"=> Executing command [TARGETED TO: {target_path}]: {command_str}")
run_command(command_list)
bootstrap.run_bootstrap_for_environment(
"pep-target",
paths,
architecture,
host_arch,
repositories,
all_bootstrap_packages
)
logger.info("=> Void Linux PEP-TARGET bootstrap COMPLETE.")
logger.info("=> Void Linux system bootstrap (ROOTFS, PEP-HOST, PEP-TARGET) COMPLETE.")
@ -209,45 +171,26 @@ def iso_builder_main(
iso_build_config['kernel_type'] = kernel_type
common_packages = load_yaml_config('builder/configs/packages/common_packages.yaml', 'common_packages.yaml')
if common_packages:
for category, packages in common_packages.items():
if isinstance(packages, list):
for package in packages:
get_or_download_package(package, architecture, repos_yaml_file)
elif isinstance(packages, str):
get_or_download_package(packages, architecture, repos_yaml_file)
install_common_packages_rootfs_yaml(paths["ROOTFS"], paths["COMMON_PACKAGES_YAML"], architecture)
install_common_packages_rootfs_yaml(paths["ROOTFS"], paths["COMMON_PACKAGES_YAML"], architecture, host_arch, repositories)
desktop_config = load_yaml_config(ficheiro_pacotes_desktop_yaml, desktop_selecionado + ".yaml")
if desktop_config and 'desktop_environment' in desktop_config:
desktop_packages = desktop_config['desktop_environment'].get('desktop_packages', [])
login_manager_packages = desktop_config['desktop_environment'].get('login_manager_packages', [])
default_packages = desktop_config['desktop_environment'].get('default_packages', [])
for package in desktop_packages:
get_or_download_package(package, architecture, repos_yaml_file)
for package in login_manager_packages:
get_or_download_package(package, architecture, repos_yaml_file)
for package in default_packages:
get_or_download_package(package, architecture, repos_yaml_file)
install_desktop_environment(
arch=architecture,
desktop_environment_name=desktop,
desktops_config=desktops_config,
target_env='rootfs'
target_env='rootfs',
host_arch=host_arch,
repositories_data=repositories
)
kernel_config = kernels_config.get('kernels', {}).get(kernel_type)
if kernel_config and 'package_name' in kernel_config:
kernel_package = kernel_config['package_name'].split()[0]
get_or_download_package(kernel_package, architecture, repos_yaml_file)
install_kernel(
arch=architecture,
kernel_type=kernel_type,
kernels_config=kernels_config,
target_env='rootfs'
target_env='rootfs',
host_arch=host_arch,
repositories_data=repositories
)
logger.info("=> Updating bootloader configuration (xbps-reconfigure -f linux) in ROOTFS...")
@ -263,18 +206,17 @@ def iso_builder_main(
logger.warning("Failed to obtain kernel package name for reconfiguring. Using 'linux' as default.")
kernel_package_name_for_reconfigure = 'linux'
reconfigure_command = [
"chroot", paths['ROOTFS'],
xbps_commands["XBPS_RECONFIGURE_CMD"],
reconfigure_command_list = [
"sudo", "chroot", paths['ROOTFS'],
"/usr/bin/xbps-reconfigure",
'-f', kernel_package_name_for_reconfigure
]
reconfigure_command_str = " ".join(reconfigure_command)
reconfigure_command_str = " ".join(reconfigure_command_list)
logger.info(f"=> Executing XBPS_RECONFIGURE_CMD for bootloader [TARGETED TO: {paths['ROOTFS']}]: {reconfigure_command_str}")
try:
run_command(reconfigure_command)
run_command(reconfigure_command_list)
logger.info("=> Bootloader configuration updated successfully.")
except Exception as e:
logger.error(f"Error updating bootloader configuration: {e}")
@ -284,12 +226,12 @@ def iso_builder_main(
reconfigure_system_in_chroot(
paths['ROOTFS'],
architecture,
host_arch,
repositories,
xbps_cachedir_env
)
logger.info("=> System reconfiguration in chroot COMPLETE.")
copy_system_files.copy_dracut_files(rootfs_path)
copy_system_files.copy_dracut_files(paths['ROOTFS'])
desktop_config_name = f"{desktop}.yaml"
desktop_config_path = os.path.join("builder/configs/desktops", desktop_config_name)
@ -297,10 +239,8 @@ def iso_builder_main(
if desktop_config and 'desktop_environment' in desktop_config:
customizations_path = desktop_config['desktop_environment'].get('customizations_path')
rootfs_path = paths['ROOTFS']
if customizations_path:
copy_custom_files(customizations_path, rootfs_path, desktop_config_path)
copy_custom_files(customizations_path, paths['ROOTFS'], desktop_config_path)
else:
logger.warning(f"Customizations path not defined for desktop: {desktop}")
else:
@ -311,12 +251,13 @@ def iso_builder_main(
logger.info("=> System customization completed.")
logger.info("=> Starting initramfs creation...")
initrd_path, kernel_version_dict = create_initramfs(rootfs_path, boot_path, xbps_commands, iso_build_config)
initrd_path, kernel_version_dict = create_initramfs(paths['ROOTFS'], paths['BOOT_DIR'], xbps_commands, iso_build_config)
if boot_type == "bios" or boot_type == "hybrid":
logger.info("=> Copying kernel image to BOOT directory...")
copy_kernel_image(
rootfs_path=rootfs_path,
boot_path=boot_path,
rootfs_path=paths['ROOTFS'],
boot_path=paths['BOOT_DIR'],
iso_build_config=iso_build_config,
)
kernel_version = None
@ -331,7 +272,6 @@ def iso_builder_main(
logger.error("=> Failed to obtain kernel version during initramfs creation. ISO build may fail.")
return None
if kernel_version:
iso_build_config['kernel'] = kernel_version
logger.info(f"=> Kernel version extracted for bootloaders: {kernel_version}")
@ -350,6 +290,9 @@ def iso_builder_main(
boot_title=f"Peppermint OS Void ({desktop.upper()} - kernel {kernel_type})",
boot_cmdline=iso_build_config['iso_config']['iso'].get('boot_cmdline', ''),
Volume_ID=Volume_ID,
paths=paths,
host_arch=host_arch,
repositories_data=repositories,
)
if isolinux_dir:
logger.info(f"=> Isolinux bootloader generated at: {isolinux_dir}")
@ -360,7 +303,7 @@ def iso_builder_main(
grub_efi_dir = bootloaders.create_grub_efi_boot(
architecture=architecture,
pep_target_path=pep_target_path,
pep_target_path=paths["PEPTARGETDIR"],
boot_dir=paths["BOOT_DIR"],
grub_cfg_template_dir=paths["GRUB_EFI_CFG_TEMPLATE_DIR"],
grub_modules=iso_build_config.get('grub_modules_efi', []),
@ -375,7 +318,9 @@ def iso_builder_main(
boot_title=boot_title,
keymap=iso_build_config.get('keymap', 'us'),
locale=iso_build_config.get('locale', 'en_US.UTF-8'),
config=iso_build_config
config=iso_build_config,
host_arch=host_arch,
repositories_data=repositories,
)
if grub_efi_dir:
logger.info(f"=> GRUB EFI bootloader generated at: {grub_efi_dir}")
@ -409,15 +354,13 @@ def iso_builder_main(
return None
logger.info("=> Starting ISO image generation...")
iso_path = None
iso_path = iso_generator_main(
iso_final_path = iso_generator_main(
arch=architecture,
desktop=desktop,
kernel_type=kernel_type,
iso_build_dir=iso_build_dir,
iso_build_dir=paths['ISO_OUTPUT_DIR'],
iso_name_base=iso_name_base,
iso_name=iso_filename,
iso_name=iso_filename_info,
boot_type=boot_type,
Volume_ID=Volume_ID,
efi_boot_dir_name=efi_boot_dir_name,
@ -430,17 +373,25 @@ def iso_builder_main(
iso_volume_application_id=iso_build_config.get('iso_volume_application_id', 'Peppermint OS'),
iso_system_id=iso_build_config.get('iso_system_id', 'Peppermint OS'),
iso_build_config=iso_build_config,
iso_profile=iso_profile
iso_profile=iso_profile,
paths=paths,
host_arch=host_arch,
repositories_data=repositories,
)
logger.debug(f"=> iso_generator_main returned: {iso_path}")
logger.debug(f"=> iso_generator_main returned: {iso_final_path}")
if not iso_final_path or not os.path.exists(iso_final_path):
logger.error("=> ISO image generation failed.")
return None
logger.info("=> Final cleanup tasks started.")
remove_fusato_directory()
logger.info("=> Final cleanup tasks finished.")
move_and_cleanup_iso(architecture, desktop, kernel_type, iso_name_base)
logger.info("=> ISO moved and old files removed from the server.")
final_iso_location = move_and_cleanup_iso(architecture, desktop, kernel_type, iso_name_base, iso_final_path)
return final_iso_location
return iso_filename
def main():
"""Main script to build Peppermint Void Linux ISOs."""