Speed up benchmark cpu tests with _BORG_BENCHMARK_CPU_TEST env var

Add _BORG_BENCHMARK_CPU_TEST environment variable (following the existing
pattern of _BORG_BENCHMARK_CRUD_TEST) that reduces:
- timeit iterations from 100 to 1 (10 to 1 for compression)
- KDF iterations from 5 to 1
- random data buffer from 10MB to 100KB

Set this env var in test_benchmark_cpu and test_benchmark_cpu_json so
they complete quickly in CI while still exercising the full code path.

Fixes #9414

Signed-off-by: edvatar <88481784+toroleapinc@users.noreply.github.com>
This commit is contained in:
edvatar 2026-02-27 11:17:46 -05:00
parent eb82eeba05
commit bb2ca29cba
2 changed files with 18 additions and 10 deletions

View file

@ -167,7 +167,13 @@ class BenchmarkMixIn:
result = {} if args.json else None
random_10M = os.urandom(10 * 1000 * 1000)
is_test = "_BORG_BENCHMARK_CPU_TEST" in os.environ
# Use minimal iterations and data size in test mode to keep CI fast.
number_default = 1 if is_test else 100
number_compression = 1 if is_test else 10
data_size = 100 * 1000 if is_test else 10 * 1000 * 1000
random_10M = os.urandom(data_size)
key_256 = os.urandom(32)
key_128 = os.urandom(16)
key_96 = os.urandom(12)
@ -202,7 +208,7 @@ class BenchmarkMixIn:
),
("fixed,1048576", "ch = get_chunker('fixed', 1048576, sparse=False)", "chunkit(ch)", locals()),
]:
dt = timeit(func, setup, number=100, globals=vars)
dt = timeit(func, setup, number=number_default, globals=vars)
if args.json:
algo, _, algo_params = spec.partition(",")
result["chunkers"].append({"algo": algo, "algo_params": algo_params, "size": size, "time": dt})
@ -218,7 +224,7 @@ class BenchmarkMixIn:
size = 1000000000
tests = [("xxh64", lambda: xxh64(random_10M)), ("crc32 (zlib)", lambda: crc32(random_10M))]
for spec, func in tests:
dt = timeit(func, number=100)
dt = timeit(func, number=number_default)
if args.json:
result["checksums"].append({"algo": spec, "size": size, "time": dt})
else:
@ -235,7 +241,7 @@ class BenchmarkMixIn:
("hmac-sha256", lambda: hmac_sha256(key_256, random_10M)),
("blake2b-256", lambda: blake2b_256(key_256, random_10M)),
]:
dt = timeit(func, number=100)
dt = timeit(func, number=number_default)
if args.json:
result["hashes"].append({"algo": spec, "size": size, "time": dt})
else:
@ -275,7 +281,7 @@ class BenchmarkMixIn:
),
]
for spec, func in tests:
dt = timeit(func, number=100)
dt = timeit(func, number=number_default)
if args.json:
result["encryption"].append({"algo": spec, "size": size, "time": dt})
else:
@ -285,7 +291,7 @@ class BenchmarkMixIn:
print("KDFs (slow is GOOD, use argon2!) ===============================")
else:
result["kdf"] = []
count = 5
count = 1 if is_test else 5
for spec, func in [
("pbkdf2", lambda: FlexiKey.pbkdf2("mypassphrase", b"salt" * 8, PBKDF2_ITERATIONS, 32)),
("argon2", lambda: FlexiKey.argon2("mypassphrase", 64, b"S" * ARGON2_SALT_BYTES, **ARGON2_ARGS)),
@ -319,7 +325,7 @@ class BenchmarkMixIn:
]:
compressor = CompressionSpec(spec).compressor
size = 100000000
dt = timeit(lambda: compressor.compress({}, random_10M), number=10)
dt = timeit(lambda: compressor.compress({}, random_10M), number=number_compression)
if args.json:
algo, _, algo_params = spec.partition(",")
result["compression"].append({"algo": algo, "algo_params": algo_params, "size": size, "time": dt})
@ -334,7 +340,7 @@ class BenchmarkMixIn:
items = [item.as_dict()] * 1000
size = "100k Items"
spec = "msgpack"
dt = timeit(lambda: msgpack.packb(items), number=100)
dt = timeit(lambda: msgpack.packb(items), number=number_default)
if args.json:
result["msgpack"].append({"algo": spec, "count": 100000, "time": dt})
else:

View file

@ -45,7 +45,8 @@ def test_benchmark_crud_json_lines(archiver, monkeypatch):
assert entry["io"] > 0
def test_benchmark_cpu(archiver):
def test_benchmark_cpu(archiver, monkeypatch):
monkeypatch.setenv("_BORG_BENCHMARK_CPU_TEST", "YES")
output = cmd(archiver, "benchmark", "cpu")
# verify all section headers appear in the plain-text output
assert "Chunkers" in output
@ -57,7 +58,8 @@ def test_benchmark_cpu(archiver):
assert "msgpack" in output
def test_benchmark_cpu_json(archiver):
def test_benchmark_cpu_json(archiver, monkeypatch):
monkeypatch.setenv("_BORG_BENCHMARK_CPU_TEST", "YES")
output = cmd(archiver, "benchmark", "cpu", "--json")
result = json.loads(output)
assert isinstance(result, dict)