Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
782 changes: 2 additions & 780 deletions src/guidellm/__main__.py

Large diffs are not rendered by default.

46 changes: 46 additions & 0 deletions src/guidellm/cli/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
"""
GuideLLM command-line interface entry point.

Primary CLI application providing benchmark execution, dataset preprocessing, and
mock server functionality for language model evaluation. Organizes commands into
three main groups: benchmark operations for performance testing, preprocessing
utilities for data transformation, and mock server capabilities for development
and testing. Supports multiple backends, output formats, and flexible configuration
through CLI options and environment variables.

Example:
::
# Run a benchmark against a model
guidellm benchmark run --target http://localhost:8000 --data dataset.json \\
--profile sweep

# Preprocess a dataset
guidellm preprocess dataset input.json output.json --processor gpt2

# Start a mock server for testing
guidellm mock-server --host 0.0.0.0 --port 8080
"""

from __future__ import annotations

import click

from .benchmark import benchmark
from .config import config
from .mock_server import mock_server
from .preprocess import preprocess

__all__ = ["cli"]


@click.group()
@click.version_option(package_name="guidellm", message="guidellm version: %(version)s")
def cli():
"""GuideLLM CLI for benchmarking, preprocessing, and testing language models."""


# Register all commands and groups
cli.add_command(config)
cli.add_command(mock_server)
cli.add_command(benchmark)
cli.add_command(preprocess)
26 changes: 26 additions & 0 deletions src/guidellm/cli/benchmark/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
"""Benchmark command group."""

from __future__ import annotations

import click

from guidellm.utils.default_group import DefaultGroupHandler

from .from_file import from_file
from .run import run

__all__ = ["benchmark"]


@click.group(
help="Run a benchmark or load a previously saved benchmark report.",
cls=DefaultGroupHandler,
default="run",
)
def benchmark():
"""Benchmark commands for performance testing generative models."""


# Register subcommands
benchmark.add_command(run)
benchmark.add_command(from_file)
45 changes: 45 additions & 0 deletions src/guidellm/cli/benchmark/from_file.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
"""Benchmark from-file command."""

from __future__ import annotations

import asyncio
from pathlib import Path

import click

from guidellm.benchmark import reimport_benchmarks_report

__all__ = ["from_file"]


@click.command(
"from-file",
help=(
"Load a saved benchmark report and optionally re-export to other formats. "
"PATH: Path to the saved benchmark report file (default: ./benchmarks.json)."
),
)
@click.argument(
"path",
type=click.Path(file_okay=True, dir_okay=False, exists=True),
default=Path.cwd() / "benchmarks.json",
)
@click.option(
"--output-path",
type=click.Path(),
default=Path.cwd(),
help=(
"Directory or file path to save re-exported benchmark results. "
"If a directory, all output formats will be saved there. "
"If a file, the matching format will be saved to that file."
),
)
@click.option(
"--output-formats",
multiple=True,
type=str,
default=("console", "json"), # ("console", "json", "html", "csv")
help="Output formats for benchmark results (e.g., console, json, html, csv).",
)
def from_file(path, output_path, output_formats):
asyncio.run(reimport_benchmarks_report(path, output_path, output_formats))
Loading
Loading