Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 3 additions & 8 deletions configuration/builders/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from buildbot.process.buildrequest import BuildRequest
from buildbot.process.factory import BuildFactory
from buildbot.process.workerforbuilder import AbstractWorkerForBuilder
from configuration.builders.callables import canStartBuild, nextBuild
from configuration.builders.infra.runtime import BuildSequence
from configuration.steps.processors import (
processor_docker_cleanup,
Expand Down Expand Up @@ -101,11 +102,7 @@ def get_factory(self) -> BuildFactory:
def get_config(
self,
workers: Iterable[WorkerBase],
can_start_build: Callable[
[Builder, AbstractWorkerForBuilder, BuildRequest], bool
],
jobs: int,
next_build: Callable[[Builder, Iterable[BuildRequest]], BuildRequest],
tags: list[str] = [],
properties: dict[str, str] = None,
) -> util.BuilderConfig:
Expand All @@ -118,7 +115,6 @@ def get_config(
can_start_build (Callable): A callable that determines if a build can
start on a worker.
jobs (int): The number of CPU's to allocate for commands that support parallel execution.
next_build (Callable): A callable that determines the next build request.
tags (list[str], optional): A list of tags associated with the builder.
Defaults to an empty list.
properties (dict[str, str], optional): Additional properties for the builder.
Expand All @@ -127,7 +123,6 @@ def get_config(
- jobs is a measure of how many CPU's are used for the build, for commands that support parallel execution (e.g. make, mtr).
- provide a value greater or equal to 1
- jobs should never exceed the number of CPU's available on the worker, given by the worker total_jobs property.
- canStartBuild will determine at runtime if the builder can start on any of the workers assigned to it, based on what other jobs were claimed (currently running) by other builders.
Returns:
util.BuilderConfig: A BuilderConfig object containing the builder's configuration.
"""
Expand All @@ -145,8 +140,8 @@ def get_config(
name=self.name,
workernames=[worker.name for worker in workers],
tags=tags,
nextBuild=next_build,
canStartBuild=can_start_build,
nextBuild=nextBuild,
canStartBuild=canStartBuild,
factory=self.get_factory(),
properties=properties,
)
Expand Down
5 changes: 0 additions & 5 deletions configuration/builders/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from typing import Union

from configuration.builders.base import GenericBuilder
from configuration.builders.callables import canStartBuild, nextBuild
from configuration.builders.infra.runtime import DockerConfig
from configuration.builders.sequences.release import deb_autobake, rpm_autobake

Expand Down Expand Up @@ -72,8 +71,6 @@ def deb_release_builder(
],
).get_config(
workers=worker_pool,
next_build=nextBuild,
can_start_build=canStartBuild,
tags=["release_packages", "autobake", "deb"],
jobs=DEFAULT_BUILDER_JOBS,
properties={
Expand Down Expand Up @@ -127,8 +124,6 @@ def rpm_release_builder(
],
).get_config(
workers=worker_pool,
next_build=nextBuild,
can_start_build=canStartBuild,
tags=["release_packages", "autobake", "rpm"],
jobs=DEFAULT_BUILDER_JOBS,
properties={
Expand Down
10 changes: 8 additions & 2 deletions configuration/steps/commands/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,14 @@ def as_cmd_arg(self) -> list[str]:


class BashCommand(Command):
def __init__(self, cmd: str, name: str = "Run command", user: str = "buildbot"):
super().__init__(name=name, workdir=PurePath("."), user=user)
def __init__(
self,
cmd: str,
name: str = "Run command",
user: str = "buildbot",
workdir: PurePath = PurePath("."),
):
super().__init__(name=name, workdir=workdir, user=user)
self.cmd = cmd

def as_cmd_arg(self) -> list[str]:
Expand Down
8 changes: 7 additions & 1 deletion configuration/steps/commands/packages.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,13 @@ def as_cmd_arg(self) -> list[str]:
util.Interpolate(
f"""
mkdir -p {self.destination} &&
cp -r {package_list} {self.destination}
for package in {package_list}; do
if [ ! -e "$package" ]; then
echo "Warning: package '$package' does not exist and will be skipped."
continue
fi
cp -r $package {self.destination}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't we wont verbose output here cp -vr ? Just a question, maybe a bad idea.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's not much interest in a verbose output for this step

done
"""
),
]
Expand Down
26 changes: 25 additions & 1 deletion configuration/workers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@


class WorkerBase:
ALLOWED_OS_TYPES = ["debian", "redhat", "macos", "windows", "freebsd", "aix"]
ALLOWED_ARCHS = ["amd64", "aarch64", "ppc64le", "s390x"]
"""
Base class for worker instances in the build system.
This class provides a structure for worker instances, including their name and properties.
Expand All @@ -10,9 +12,31 @@ class WorkerBase:
properties (dict[str, Union[str, int, bool]]): A dictionary of properties associated with the worker.
"""

def __init__(self, name: str, properties: dict[str, Union[str, int, bool]]):
def __init__(
self,
name: str,
properties: dict[str, Union[str, int, bool]],
os_type: str,
arch: str,
):
self.name = name
self.os_type = os_type
self.properties = properties
self.arch = arch
self._raise_for_invalid_os_type()
self._raise_for_invalid_arch()

def __str__(self):
return self.name

def _raise_for_invalid_os_type(self):
if self.os_type not in WorkerBase.ALLOWED_OS_TYPES:
raise ValueError(
f"Invalid OS type: {self.os_type} for {self.name}. Allowed: {WorkerBase.ALLOWED_OS_TYPES}"
)

def _raise_for_invalid_arch(self):
if self.arch not in WorkerBase.ALLOWED_ARCHS:
raise ValueError(
f"Invalid arch: {self.arch} for {self.name}. Allowed: {WorkerBase.ALLOWED_ARCHS}"
)
84 changes: 49 additions & 35 deletions configuration/workers/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,52 +6,58 @@

class WorkerPool:
"""
A class to manage a pool of workers categorized by architecture.

Manages a pool of workers categorized by architecture.
This class allows adding workers and retrieving them based on criteria such as architecture, names, and OS type.
Attributes:
workers (defaultdict): A dictionary where keys are architecture types (str)
and values are lists of worker names (str) associated with that architecture.
instances (list): A list of worker instances.

workers (defaultdict): A dictionary mapping architectures to lists of WorkerBase instances.
Methods:
__init__():
Initializes the WorkerPool with empty workers and instances.

add(arch: str, worker):
Adds a worker to the pool under the specified architecture.
Args:
arch (str): The architecture type to associate the worker with.
worker (object): The worker object, which must have `name` and `instance` attributes.

get_workers_for_arch(arch: str, filter_fn: callable = None) -> list:
Retrieves a list of worker names for the specified architecture, optionally filtered
by a provided function.
Args:
arch (str): The architecture type to retrieve workers for.
filter_fn (callable, optional): A function to filter the workers. Defaults to None.
Returns:
list: A list of worker names matching the specified architecture and filter criteria.
Raises:
ValueError: If no workers are found for the specified architecture.
add(worker: WorkerBase): Adds a worker
get_instances() -> list: Retrieves all worker instances in the pool.
get_workers_for_arch(arch: str, names: list = None, os_type: str = None) -> list: Retrieves workers for a specific architecture, optionally filtering by names and OS type.
"""

def __init__(self):
self.workers = defaultdict(list)

def add(self, arch, worker):
self.workers[arch].append((worker))
def add(self, worker: WorkerBase):
self.workers[worker.arch].append((worker))

def get_instances(self):
return [
worker.instance for workers in self.workers.values() for worker in workers
]

def get_workers_for_arch(self, arch: str, filter_fn: str = lambda _: True) -> list:
workers_for_arch = [worker for worker in self.workers.get(arch, [])]
workers = list(filter(lambda w: filter_fn(w.name), workers_for_arch))
if not workers:
raise ValueError(f"No workers found for architecture: {arch}")
return workers
def get_workers_for_arch(
self, arch: str, names: list = None, os_type: str = None
) -> list:
WorkerPool._raise_for_invalid_os_type(os_type=os_type)
WorkerPool._raise_for_invalid_arch(arch=arch)

workers_for_arch: WorkerBase = [worker for worker in self.workers.get(arch, [])]
if names:
workers_for_arch = [w for w in workers_for_arch if w.name in names]
if os_type:
workers_for_arch = [w for w in workers_for_arch if w.os_type == os_type]

if not workers_for_arch:
raise ValueError(
f"No workers found for: arch={arch}, names={names}, os_type={os_type}"
)
return workers_for_arch

@staticmethod
def _raise_for_invalid_os_type(os_type: str):
if os_type and os_type not in WorkerBase.ALLOWED_OS_TYPES:
raise ValueError(
f"Invalid OS type: {os_type} requested. Allowed: {WorkerBase.ALLOWED_OS_TYPES}"
)

@staticmethod
def _raise_for_invalid_arch(arch: str):
if arch not in WorkerBase.ALLOWED_ARCHS:
raise ValueError(
f"Invalid arch: {arch} requested. Allowed: {WorkerBase.ALLOWED_ARCHS}"
)


class NonLatent(WorkerBase):
Expand All @@ -75,15 +81,23 @@ class NonLatent(WorkerBase):
"""

def __init__(
self, name: str, config: dict[str, dict], total_jobs: int, max_builds=999
self,
name: str,
config: dict[str, dict],
os_type: str,
arch: str,
total_jobs: int,
max_builds=999,
):
self.instance = None
self.requested_jobs = 0
self.builders = {}
self.config = config
self.max_builds = max_builds
self.total_jobs = total_jobs
super().__init__(name, properties={"total_jobs": total_jobs})
super().__init__(
name, properties={"total_jobs": total_jobs}, os_type=os_type, arch=arch
)
self.__define()

def __define(self):
Expand Down
60 changes: 39 additions & 21 deletions master-migration/master.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@

import os

import yaml

from configuration.builders.base import GenericBuilder
from configuration.builders.callables import canStartBuild, nextBuild
from configuration.builders.common import (
deb_release_builder,
docker_config,
Expand Down Expand Up @@ -33,15 +34,40 @@ with open(os.path.join(base_dir, "master-private.cfg"), "r") as file:

c = BuildmasterConfig = base_master_config(config)

## ------------------------------------------------------------------- ##
## WORKER POOL ##
## ------------------------------------------------------------------- ##

with open(os.path.join(cfg_dir, "workers.yaml"), "r") as f:
workers_data = yaml.safe_load(f)

WORKER_POOL = worker.WorkerPool()
WORKER_POOL.add(
arch="amd64", worker=worker.NonLatent(name="hz-bbw8", config=config, total_jobs=110)
)
WORKER_POOL.add(
arch="amd64", worker=worker.NonLatent(name="hz-bbw9", config=config, total_jobs=110)
)
for w in workers_data:
worker_args = dict(
name=w["name"],
arch=w["arch"],
config=config,
total_jobs=w["total_jobs"],
os_type=w["os_type"],
)
# Optional, canStartBuild already handles build allocation based on total_jobs
# but some workers may want to limit the number of builds even further
if "max_builds" in w:
worker_args["max_builds"] = w["max_builds"]

WORKER_POOL.add(
worker=worker.NonLatent(**worker_args),
)
c["workers"] = WORKER_POOL.get_instances()

DEFAULT_AMD64_WORKER_POOL = WORKER_POOL.get_workers_for_arch(
arch="amd64", names=["hz-bbw8", "hz-bbw9"]
) # General purpose AMD64 workers

DEFAULT_AMD64_RHEL_WORKER_POOL = WORKER_POOL.get_workers_for_arch(
arch="amd64", names=["hz-bbw8", "hz-bbw9"], os_type="redhat"
) # Required by RHEL release builds (SRPM)

## ------------------------------------------------------------------- ##
## RELEASE BUILDERS ##
## ------------------------------------------------------------------- ##
Expand All @@ -50,15 +76,15 @@ c["builders"] = [
rpm_release_builder(
name="amd64-rhel-9-rpm-autobake-migration",
image="rhel9",
worker_pool=WORKER_POOL.get_workers_for_arch(arch="amd64"),
worker_pool=DEFAULT_AMD64_RHEL_WORKER_POOL,
arch="amd64",
has_compat=False,
rpm_type="rhel9",
),
deb_release_builder(
name="amd64-debian-12-deb-autobake-migration",
image="debian12",
worker_pool=WORKER_POOL.get_workers_for_arch(arch="amd64"),
worker_pool=DEFAULT_AMD64_WORKER_POOL,
),
]

Expand All @@ -81,9 +107,7 @@ c["builders"].extend(
f_seq(jobs=compile_only_jobs, config=docker_config(image="debian13"))
],
).get_config(
workers=WORKER_POOL.get_workers_for_arch(arch="amd64"),
next_build=nextBuild,
can_start_build=canStartBuild,
workers=DEFAULT_AMD64_WORKER_POOL,
tags=["compile-only", "protected"],
jobs=compile_only_jobs,
)
Expand All @@ -106,9 +130,7 @@ c["builders"].append(
)
],
).get_config(
workers=WORKER_POOL.get_workers_for_arch(arch="amd64"),
next_build=nextBuild,
can_start_build=canStartBuild,
workers=DEFAULT_AMD64_WORKER_POOL,
tags=[
"debug",
],
Expand Down Expand Up @@ -137,9 +159,7 @@ def ubasan_builder(name: str, debug: bool) -> GenericBuilder:
)
],
).get_config(
workers=WORKER_POOL.get_workers_for_arch(arch="amd64"),
next_build=nextBuild,
can_start_build=canStartBuild,
workers=DEFAULT_AMD64_WORKER_POOL,
tags=list(tags_ubasan),
jobs=jobs,
)
Expand All @@ -166,9 +186,7 @@ def msan_builder(name: str, debug: bool) -> GenericBuilder:
)
],
).get_config(
workers=WORKER_POOL.get_workers_for_arch(arch="amd64"),
next_build=nextBuild,
can_start_build=canStartBuild,
workers=DEFAULT_AMD64_WORKER_POOL,
tags=list(tags_msan),
jobs=jobs,
)
Expand Down
Loading