跳至内容

继续优化#

展开复制 examples/1_basics/5_continue.py (右上角)
from __future__ import annotations

from ConfigSpace import Configuration, ConfigurationSpace, Float

from smac import Callback
from smac import HyperparameterOptimizationFacade as HPOFacade
from smac import Scenario
from smac.main.smbo import SMBO
from smac.runhistory import TrialInfo, TrialValue

__copyright__ = "Copyright 2025, Leibniz University Hanover, Institute of AI"
__license__ = "3-clause BSD"


class StopCallback(Callback):
    def __init__(self, stop_after: int):
        self._stop_after = stop_after

    def on_tell_end(self, smbo: SMBO, info: TrialInfo, value: TrialValue) -> bool | None:
        """Called after the stats are updated and the trial is added to the runhistory. Optionally, returns false
        to gracefully stop the optimization.
        """
        if smbo.runhistory.finished == self._stop_after:
            return False

        return None


class QuadraticFunction:
    @property
    def configspace(self) -> ConfigurationSpace:
        cs = ConfigurationSpace(seed=0)
        x = Float("x", (-5, 5), default=-5)
        cs.add([x])

        return cs

    def train(self, config: Configuration, seed: int = 0) -> float:
        """Returns the y value of a quadratic function with a minimum at x=0."""
        x = config["x"]
        return x * x


if __name__ == "__main__":
    model = QuadraticFunction()

    # Scenario object specifying the optimization "environment"
    scenario = Scenario(model.configspace, deterministic=True, n_trials=50)
    stop_after = 10

    # Now we use SMAC to find the best hyperparameters
    smac = HPOFacade(
        scenario,
        model.train,  # We pass the target function here
        callbacks=[StopCallback(stop_after=stop_after)],
        overwrite=True,  # Overrides any previous results that are found that are inconsistent with the meta-data
    )

    incumbent = smac.optimize()
    assert smac.runhistory.finished == stop_after

    # Now, we want to continue the optimization
    # Make sure, we don't overwrite the last run
    smac2 = HPOFacade(
        scenario,
        model.train,
        overwrite=False,
    )

    # Check whether we get the same incumbent
    assert smac.intensifier.get_incumbent() == smac2.intensifier.get_incumbent()
    assert smac2.runhistory.finished == stop_after

    # And now we finish the optimization
    incumbent2 = smac2.optimize()

    default_cost = smac.validate(model.configspace.get_default_configuration())
    print(f"Default cost: {default_cost}")

    incumbent_cost = smac.validate(incumbent)
    print(f"Incumbent cost of first run: {incumbent_cost}")

    incumbent_cost = smac2.validate(incumbent2)
    print(f"Incumbent cost of continued run: {incumbent_cost}")

描述#

SMAC 也可以从之前的运行继续。为此,它会读取旧文件(根据场景名称、输出目录和种子派生)并设置相应的组件。在此示例中,我们将继续优化一个简单的二次函数。

首先,在创建一个包含 50 次试验的场景后,我们使用 overwrite=True 运行 SMAC。这将覆盖任何之前的运行(以防之前调用过此示例)。我们使用一个自定义回调函数,在进行了 10 次试验后人为地停止第一次优化。

其次,我们再次使用相同的场景运行 SMAC 优化,但这次使用 overwrite=False。由于已经存在一个具有相同元数据的先前运行,此次运行将继续,直到达到 50 次试验。

from __future__ import annotations

from ConfigSpace import Configuration, ConfigurationSpace, Float

from smac import Callback
from smac import HyperparameterOptimizationFacade as HPOFacade
from smac import Scenario
from smac.main.smbo import SMBO
from smac.runhistory import TrialInfo, TrialValue

__copyright__ = "Copyright 2025, Leibniz University Hanover, Institute of AI"
__license__ = "3-clause BSD"


class StopCallback(Callback):
    def __init__(self, stop_after: int):
        self._stop_after = stop_after

    def on_tell_end(self, smbo: SMBO, info: TrialInfo, value: TrialValue) -> bool | None:
        """Called after the stats are updated and the trial is added to the runhistory. Optionally, returns false
        to gracefully stop the optimization.
        """
        if smbo.runhistory.finished == self._stop_after:
            return False

        return None


class QuadraticFunction:
    @property
    def configspace(self) -> ConfigurationSpace:
        cs = ConfigurationSpace(seed=0)
        x = Float("x", (-5, 5), default=-5)
        cs.add([x])

        return cs

    def train(self, config: Configuration, seed: int = 0) -> float:
        """Returns the y value of a quadratic function with a minimum at x=0."""
        x = config["x"]
        return x * x


if __name__ == "__main__":
    model = QuadraticFunction()

    # Scenario object specifying the optimization "environment"
    scenario = Scenario(model.configspace, deterministic=True, n_trials=50)
    stop_after = 10

    # Now we use SMAC to find the best hyperparameters
    smac = HPOFacade(
        scenario,
        model.train,  # We pass the target function here
        callbacks=[StopCallback(stop_after=stop_after)],
        overwrite=True,  # Overrides any previous results that are found that are inconsistent with the meta-data
    )

    incumbent = smac.optimize()
    assert smac.runhistory.finished == stop_after

    # Now, we want to continue the optimization
    # Make sure, we don't overwrite the last run
    smac2 = HPOFacade(
        scenario,
        model.train,
        overwrite=False,
    )

    # Check whether we get the same incumbent
    assert smac.intensifier.get_incumbent() == smac2.intensifier.get_incumbent()
    assert smac2.runhistory.finished == stop_after

    # And now we finish the optimization
    incumbent2 = smac2.optimize()

    default_cost = smac.validate(model.configspace.get_default_configuration())
    print(f"Default cost: {default_cost}")

    incumbent_cost = smac.validate(incumbent)
    print(f"Incumbent cost of first run: {incumbent_cost}")

    incumbent_cost = smac2.validate(incumbent2)
    print(f"Incumbent cost of continued run: {incumbent_cost}")