Integration Examples

The following examples demonstrate how to integrate BenchmarkFcns with popular optimization and AI libraries.

SciPy

Using vectorized evaluation with SciPy’s Differential Evolution.

from scipy.optimize import differential_evolution
from benchmarkfcns import ackley
import time


# With vectorized=True, SciPy passes the entire population as an (M, N) matrix.
# This perfectly matches the BenchmarkFcns C++ backend, enabling OpenMP parallelization.
def objective_vectorized(x):
    # x is already (M, N), so we just pass it directly
    return ackley(x)


if __name__ == "__main__":
    dimensions = 20
    bounds = [(-32, 32)] * dimensions

    print(
        f"Starting VECTORIZED SciPy Differential Evolution on {dimensions}D Ackley..."
    )

    start = time.time()
    # Setting vectorized=True is the key to high performance here
    result = differential_evolution(
        objective_vectorized, bounds, vectorized=True, popsize=1000
    )
    end = time.time()

    print("\nOptimization Results:")
    print(f"Global Minimum Value: {result.fun}")
    print(f"Time Taken: {end - start:.4f} seconds")
    print(f"Success: {result.success}")

Nevergrad

Integration with Meta AI’s Nevergrad meta-optimizer.

import nevergrad as ng
from benchmarkfcns import rastrigin


# benchmarkfcns expects a 2D array (M-by-N).
def objective(x):
    # Nevergrad passes coordinates as individual arguments or a list depending on setup.
    # Here we assume a list/array input.
    return rastrigin(x.reshape(1, -1))[0]


if __name__ == "__main__":
    dimensions = 5

    # Define the parametrization (search space)
    parametrization = ng.p.Array(shape=(dimensions,)).set_bounds(
        lower=-5.12, upper=5.12
    )

    # Choose an optimizer (NGOpt is a powerful meta-optimizer)
    optimizer = ng.optimizers.registry["NGOpt"](
        parametrization=parametrization, budget=1000
    )

    print(f"Starting Nevergrad NGOpt on {dimensions}D Rastrigin function...")

    recommendation = optimizer.minimize(objective)

    print("\nOptimization Results:")
    print(f"Global Minimum Value: {recommendation.loss}")
    print(f"Best Coordinates: {recommendation.value}")

Optuna

Testing Bayesian optimization performance.

import optuna
from benchmarkfcns import rosenbrock
import numpy as np


def objective(trial, dims=5):
    # Define the search space for each dimension
    x = np.array([trial.suggest_float(f"x{i}", -5, 5) for i in range(dims)])

    # Evaluate using the high-performance C++ backend
    return rosenbrock(x.reshape(1, -1))[0]


if __name__ == "__main__":
    dims = 5
    print(f"Starting Optuna Study on {dims}D Rosenbrock function...")

    # Create a study to minimize the function
    study = optuna.create_study(direction="minimize")
    study.optimize(lambda trial: objective(trial, dims=dims), n_trials=500)

    print("\nOptimization Results:")
    print(f"Best Value: {study.best_value}")
    print(f"Best Params: {study.best_params}")

Pymoo

Multi-objective optimization using NSGA-II.

from pymoo.optimize import minimize
from pymoo.core.problem import Problem
from pymoo.algorithms.moo.nsga2 import NSGA2
from benchmarkfcns import multiobjective


# Using 'Problem' instead of 'ElementwiseProblem' enables vectorized evaluation.
# The algorithm passes the entire population as a matrix.
class ZDT1ProblemVectorized(Problem):
    def __init__(self, n_var=30):
        super().__init__(n_var=n_var, n_obj=2, xl=0.0, xu=1.0)

    def _evaluate(self, x, out, *args, **kwargs):
        # x is (M, N). multiobjective.zdt1 accepts this and returns (M, 2).
        # The C++ core handles the parallel evaluation of all M points.
        out["F"] = multiobjective.zdt1(x)


if __name__ == "__main__":
    problem = ZDT1ProblemVectorized()
    algorithm = NSGA2(pop_size=100)

    print("Starting VECTORIZED Pymoo NSGA-II on ZDT1...")

    res = minimize(problem, algorithm, ("n_gen", 100), seed=1, verbose=False)

    print("\nOptimization Results:")
    print(f"Number of Pareto-optimal solutions: {len(res.F)}")
    print(f"First 5 objective scores:\n{res.F[:5]}")

CMA-ES

Continuous black-box optimization.

import cma
from benchmarkfcns import elliptic
import numpy as np


# benchmarkfcns expects a 2D array (M-by-N).
def objective(x):
    return elliptic(x.reshape(1, -1))[0]


if __name__ == "__main__":
    dimensions = 10
    x0 = np.random.uniform(-5, 5, dimensions)  # Initial guess
    sigma0 = 0.5  # Initial step size

    print(f"Starting CMA-ES on {dimensions}D Elliptic function...")

    # Run the optimizer
    es = cma.fmin(
        objective, x0, sigma0, options={"bounds": [-100, 100], "maxiter": 1000}
    )

    print("\nOptimization Results:")
    print(f"Global Minimum Value: {es[1]}")
    print(f"Best Coordinates: {es[0]}")