Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion src/BenchMatcha/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,20 @@


class Config:
"""default configuration."""
"""default configuration.

Attributes:
color (str): plot marker color.
line_color (str): plot line color.
font (str): plot font family style.
x_axis (int): Maximum number of line ticks on x-axis.

"""

color: str = plotting.Prism[3]
line_color: str = plotting.Prism[4]
font: str = "Space Grotesk Light, Courier New, monospace"
x_axis: int = 13


class ConfigUpdater:
Expand Down Expand Up @@ -92,6 +101,7 @@ def update_config_from_pyproject(path: str) -> None:
color="#FFF"
line_color="#333"
font="Courier"
x_axis=5

"""
cu = ConfigUpdater(path)
Expand Down
4 changes: 2 additions & 2 deletions src/BenchMatcha/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ def construct_log2_axis(x: np.ndarray) -> tuple[list[int], list[str]]:
minimum = int(x.min())
maximum = power_of_2(int(x.max())) + 1
current = power_of_2(minimum)
if 1 < current >= minimum:
current //= 2
if current >= minimum:
current = max(1, current // 2)
power = int(np.log2(current))

while current < maximum:
Expand Down
169 changes: 144 additions & 25 deletions src/BenchMatcha/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

"""Primary Benchmark Runner."""

import argparse
import logging
import os
import sys
Expand All @@ -40,6 +41,8 @@
from wurlitzer import pipes # type: ignore[import-untyped]

from . import plotting

# from .complexity import analyze_complexity
from .config import Config, update_config_from_pyproject
from .errors import ParsingError
from .handlers import HandleText
Expand All @@ -53,6 +56,7 @@
def manage_registration(path: str) -> None:
"""Manage import, depending on whether path is a directory or file."""
abspath: str = os.path.abspath(path)
log.debug("Loading path: %s", abspath)
if not os.path.exists(abspath):
raise FileNotFoundError("Invalid filepath")

Expand All @@ -65,8 +69,10 @@ def manage_registration(path: str) -> None:
else:
log.warning(
"Unsupported path provided. While the path does exist, it is neither a"
" file nor a directory."
" python file nor a directory: %s",
abspath,
)
raise TypeError(f"Unsupported path type: {abspath}")


def plot_benchmark_array(benchmark: BenchmarkArray) -> go.Figure:
Expand Down Expand Up @@ -99,7 +105,7 @@ def plot_benchmark_array(benchmark: BenchmarkArray) -> go.Figure:
)

vals, labels = plotting.construct_log2_axis(benchmark.size)
if (p := len(vals) // 13) > 0:
if (p := len(vals) // Config.x_axis) > 0:
vals = vals[:: p + 1]
labels = labels[:: p + 1]

Expand Down Expand Up @@ -128,8 +134,8 @@ def plot_benchmark_array(benchmark: BenchmarkArray) -> go.Figure:
return fig


# TODO: Consider defining CLI Exit Status in an Enum
def _run() -> BenchmarkContext:
# TODO: Improve logic here
if "--benchmark_format=json" not in sys.argv:
sys.argv.append("--benchmark_format=json")

Expand All @@ -146,8 +152,13 @@ def _run() -> BenchmarkContext:
...

text: str = stdout.read()
error: str = stderr.read()
stdout.close(), stderr.close() # pylint: disable=W0106

# Pass stderr from google_benchmark
if len(error):
log.error(error)

handler = HandleText(text)
try:
obj: dict = handler.handle()
Expand Down Expand Up @@ -182,39 +193,147 @@ def save(context: BenchmarkContext, cache_dir: str) -> None:
f.write(serialized)


def run(path: str, cache_dir: str) -> None:
def run(cache_dir: str) -> None:
"""BenchMatcha Runner."""
manage_registration(path)
context: BenchmarkContext = _run()

# TODO: remove arguments specific to BenchMatch to prevent failure on google
# benchmark interface.
# TODO: Capture re-analyzed complexity information. Determine where to store, or
# how to present this information in a manner that is useful.
# for bench in context.benchmarks:
# analyze_complexity(bench.size, bench.real_time)

context: BenchmarkContext = _run()
save(context, cache_dir)


# TODO: Handle a list of separated filepaths.
# def run_paths(paths: list[str]) -> None:
# """Run benchmarks against a list of paths."""
# for path in paths:
# manage_registration(path)
def get_args() -> argparse.Namespace:
"""Get BenchMatcha command line arguments and reset to support google_benchmark."""
args = argparse.ArgumentParser("benchmatcha", conflict_handler="error")
args.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set Logging Level to DEBUG.",
required=False,
)
args.add_argument(
"-c",
"--color",
default=None,
help="Scatterplot marker color.",
required=False,
)
args.add_argument(
"-l",
"--line-color",
default=None,
help="Scatterplot complexity fit line color.",
required=False,
)
args.add_argument(
"-x",
"--x-axis",
default=None,
help="Maximum Number of units displayed on x-axis.",
required=False,
type=int,
)

cwd: str = os.getcwd()
args.add_argument(
"--config",
default=os.path.join(cwd, "pyproject.toml"),
help="Path location of pyproject.toml configuration file. "
"Defaults to Current Working Directory.",
)
args.add_argument(
"--cache",
default=os.path.join(cwd, ".benchmatcha"),
help="Path location of cache directory. Defaults to Current Working Directory.",
)
args.add_argument(
"--path",
action="extend",
nargs="+",
help="Valid file or directory path to benchmarks.",
)

# context: BenchmarkContext = _run()
# Capture anything that doesn't fit (to be fed downstream to google_benchmark cli)
args.add_argument("others", nargs=argparse.REMAINDER)

# TODO: Plotting over time (pulling from database)
# sub = args.add_subparsers()
# plot = sub.add_parser("plot")
# plot.add_argument(
# "--min-date",
# default=None,
# help="Filter data after minimum date (inclusive).",
# )
# plot.add_argument(
# "--max-date",
# default=None,
# help="Filter data before date (inclusive).",
# )
# plot.add_argument("--host", default=None, help="Filter data by specific host.")
# plot.add_argument("--os", default=None, help="Filter data by specific OS type.")
# plot.add_argument(
# "--function",
# default=None,
# help="Filter data to present a specific function name.",
# )
known, unknown = args.parse_known_args()

# NOTE: Only validate `benchmark_format` argument from google_benchmark cli, since
# we require json format to correctly work downstream. All other argument
# validations should be handled by google_benchmark cli parsing directly.
problems: list[str] = []
for k in filter(
lambda x: isinstance(x, str) and "--benchmark_format=" in x,
unknown,
):
if "json" not in k:
log.warning("Benchmark Format must be json: `%s`", k)
problems.append(k)
for p in problems:
unknown.remove(p)

# Prune / Reset for google_benchmark
sys.argv = [sys.argv[0], *unknown, *known.others]

return known


def main() -> None:
"""Primary CLI Entry Point."""
# TODO: support specification of config file path from CLI, to overwrite default
# TODO: Support command line args to overwrite default config.
cwd: str = os.getcwd()
p = os.path.join(cwd, "pyproject.toml")
if os.path.exists(p):
update_config_from_pyproject(p)
args: argparse.Namespace = get_args()

if args.verbose:
logging.basicConfig(level=logging.DEBUG)
log.setLevel(logging.DEBUG)

# Create cache if does not exist
cache = os.path.join(cwd, ".benchmatcha")
if not os.path.exists(cache):
if os.path.exists(args.config):
log.debug("Updating default configuration from file: %s", args.config)
update_config_from_pyproject(args.config)

# NOTE: Configuration Args should overwrite values set in config file
if args.color is not None:
log.debug("Overriding color from arg: %s", args.color)
Config.color = args.color

if args.line_color is not None:
log.debug("Overriding line_color from arg: %s", args.line_color)
Config.line_color = args.line_color

if args.x_axis is not None:
log.debug("Overriding x_axis from arg: %s", args.x_axis)
Config.x_axis = args.x_axis

# Create cache directory if it does not exist
if not os.path.exists(cache := args.cache):
log.debug("Creating cache directory at: %s", cache)
os.mkdir(cache)

# TODO: Determine if a list of paths have been provided instead, and handle
run(sys.argv.pop(), cache)
# Natively handle multiple provided paths
for path in args.path:
manage_registration(path)

run(cache)
4 changes: 4 additions & 0 deletions src/BenchMatcha/structure.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ def from_json(cls, record: dict[str, Any]) -> Self:
)

def to_json(self) -> dict:
"""Convert to json dictionary object."""
return self.__dict__.copy()


Expand Down Expand Up @@ -167,6 +168,7 @@ def from_json(cls, record: dict[str, Any]) -> Self:
)

def to_json(self) -> dict:
"""Convert to json dictionary object."""
return self.__dict__.copy()


Expand Down Expand Up @@ -194,6 +196,7 @@ class BenchmarkArray:
complexity: ComplexityInfo

def to_json(self) -> dict:
"""Convert to json dictionary object."""
d = self.__dict__.copy()
d["complexity"] = self.complexity.to_json()

Expand Down Expand Up @@ -338,6 +341,7 @@ def from_json(cls, record: dict[str, Any]) -> Self:
)

def to_json(self) -> dict:
"""Convert to json dictionary object."""
data = self.__dict__.copy()
data["caches"] = [i.to_json() for i in self.caches]
data["benchmarks"] = [j.to_json() for j in self.benchmarks]
Expand Down
2 changes: 2 additions & 0 deletions src/BenchMatcha/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,12 @@ class BigO(enum.StrEnum):

@classmethod
def get(cls, value: str) -> str:
"""Get value from key string."""
# e.g. "o1" -> "(1)"
return cls[value].value

@classmethod
def back(cls, value: str) -> str:
"""Get key string from value."""
# e.g. "(1)" -> "o1"
return cls(value).name
15 changes: 11 additions & 4 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,21 @@
# not introduced until V7.10.3. See the following for details:
# https://github.com/nedbat/coveragepy/issues/1499
@pytest.fixture
def benchmark() -> Iterator[Callable[[list[str]], tuple[int, str, str, str]]]:
def benchmark() -> Iterator[
Callable[[list[str], Callable[[str], None] | None], tuple[int, str, str, str]]
]:
"""Benchmark entry point subprocess."""
with tempfile.TemporaryDirectory(dir=HERE) as cursor:

with tempfile.TemporaryDirectory(dir=os.getcwd()) as cursor:
def inner(
args: list[str],
setup: Callable[[str], None] | None = None,
) -> tuple[int, str, str, str]:
if setup is not None and callable(setup):
setup(cursor)

def inner(args: list[str]) -> tuple[int, str, str, str]:
response: subprocess.CompletedProcess[bytes] = subprocess.run(
["benchmatcha", *args],
["benchmatcha", "--benchmark_dry_run", *args],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
Expand Down
Loading
Loading