RsBundle  Artifact [99b5b53ffb]

Artifact 99b5b53ffb9d87f2ad52b06618aa615f751aaff4:

  • File src/solver.rs — part of check-in [4728cdaec5] at 2019-07-25 13:42:45 on branch trunk — Fix indexing of minorants in solver. Each minorants is assigned a globally unique and invariant identifier. This identifiers is always used to refer to that minorant. The former implementation violated this rule during the model update when the subgradient extension callback is called. This caused the wrong primal to be used for the subgradient extension ultimately leading to wrong subgradients/minorants. (user: fifr size: 37350)

// Copyright (c) 2016, 2017, 2018, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! The main bundle method solver.

use crate::{DVector, Real};
use crate::{Evaluation, FirstOrderProblem, HKWeighter, Update};

use crate::master::{BoxedMasterProblem, Error as MasterProblemError, MasterProblem, UnconstrainedMasterProblem};
use crate::master::{CplexMaster, MinimalMaster};

use log::{debug, info, warn};

use std::error::Error;
use std::f64::{INFINITY, NEG_INFINITY};
use std::fmt;
use std::mem::swap;
use std::result::Result;
use std::time::Instant;

/// A solver error.
#[derive(Debug)]
pub enum SolverError<E> {
    /// An error occured during oracle evaluation.
    Evaluation(E),
    /// An error occured during oracle update.
    Update(E),
    /// An error has been raised by the master problem.
    Master(MasterProblemError),
    /// The oracle did not return a minorant.
    NoMinorant,
    /// The dimension of some data is wrong.
    Dimension,
    /// Some parameter has an invalid value.
    Parameter(ParameterError),
    /// The lower bound of a variable is larger than the upper bound.
    InvalidBounds { lower: Real, upper: Real },
    /// The value of a variable is outside its bounds.
    ViolatedBounds { lower: Real, upper: Real, value: Real },
    /// The variable index is out of bounds.
    InvalidVariable { index: usize, nvars: usize },
    /// Iteration limit has been reached.
    IterationLimit { limit: usize },
}

impl<E: fmt::Display> fmt::Display for SolverError<E> {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
        use self::SolverError::*;
        match self {
            Evaluation(err) => write!(fmt, "Oracle evaluation failed: {}", err),
            Update(err) => write!(fmt, "Oracle update failed: {}", err),
            Master(err) => write!(fmt, "Master problem failed: {}", err),
            NoMinorant => write!(fmt, "The oracle did not return a minorant"),
            Dimension => write!(fmt, "Dimension of lower bounds does not match number of variables"),
            Parameter(msg) => write!(fmt, "Parameter error: {}", msg),
            InvalidBounds { lower, upper } => write!(fmt, "Invalid bounds, lower:{}, upper:{}", lower, upper),
            ViolatedBounds { lower, upper, value } => write!(
                fmt,
                "Violated bounds, lower:{}, upper:{}, value:{}",
                lower, upper, value
            ),
            InvalidVariable { index, nvars } => {
                write!(fmt, "Variable index out of bounds, got:{} must be < {}", index, nvars)
            }
            IterationLimit { limit } => write!(fmt, "The iteration limit of {} has been reached.", limit),
        }
    }
}

impl<E: Error> Error for SolverError<E> {
    fn cause(&self) -> Option<&Error> {
        match self {
            SolverError::Evaluation(err) => Some(err),
            SolverError::Update(err) => Some(err),
            SolverError::Master(err) => Some(err),
            _ => None,
        }
    }
}

impl<E> From<ParameterError> for SolverError<E> {
    fn from(err: ParameterError) -> SolverError<E> {
        SolverError::Parameter(err)
    }
}

impl<E> From<MasterProblemError> for SolverError<E> {
    fn from(err: MasterProblemError) -> SolverError<E> {
        SolverError::Master(err)
    }
}

/**
 * The current state of the bundle method.
 *
 * Captures the current state of the bundle method during the run of
 * the algorithm. This state is passed to certain callbacks like
 * Terminator or Weighter so that they can compute their result
 * depending on the state.
 */
pub struct BundleState<'a> {
    /// Current center of stability.
    pub cur_y: &'a DVector,

    /// Function value in current center.
    pub cur_val: Real,

    /// Current candidate, point of last evaluation.
    pub nxt_y: &'a DVector,

    /// Function value in candidate.
    pub nxt_val: Real,

    /// Model value in candidate.
    pub nxt_mod: Real,

    /// Cut value of new subgradient in current center.
    pub new_cutval: Real,

    /// The current aggregated subgradient norm.
    pub sgnorm: Real,

    /// The expected progress of the current model.
    pub expected_progress: Real,

    /// Currently used weight of quadratic term.
    pub weight: Real,

    /**
     * The type of the current step.
     *
     * If the current step is Step::Term, the weighter should be reset.
     */
    pub step: Step,
}

impl<'a> BundleState<'a> {}

macro_rules! current_state {
    ($slf:ident, $step:expr) => {
        BundleState {
            cur_y: &$slf.cur_y,
            cur_val: $slf.cur_val,
            nxt_y: &$slf.nxt_y,
            nxt_mod: $slf.nxt_mod,
            nxt_val: $slf.nxt_val,
            new_cutval: $slf.new_cutval,
            sgnorm: $slf.sgnorm,
            weight: $slf.master.weight(),
            step: $step,
            expected_progress: $slf.expected_progress,
        }
    };
}

/**
 * Termination predicate.
 *
 * Given the current state of the bundle method, this function returns
 * whether the solution process should be stopped.
 */
pub trait Terminator {
    /// Return true if the method should stop.
    fn terminate(&mut self, state: &BundleState, params: &SolverParams) -> bool;
}

/**
 * Terminates if expected progress is small enough.
 */
pub struct StandardTerminator {
    pub termination_precision: Real,
}

impl Terminator for StandardTerminator {
    #[allow(unused_variables)]
    fn terminate(&mut self, state: &BundleState, params: &SolverParams) -> bool {
        assert!(self.termination_precision >= 0.0);
        state.expected_progress <= self.termination_precision * (state.cur_val.abs() + 1.0)
    }
}

/**
 * Bundle weight controller.
 *
 * Given the current state of the bundle method, this function determines the
 * weight factor of the quadratic term for the next iteration.
 */
pub trait Weighter {
    /// Return the new weight of the quadratic term.
    fn weight(&mut self, state: &BundleState, params: &SolverParams) -> Real;
}

/// An invalid value for some parameter has been passes.
#[derive(Debug)]
pub struct ParameterError(String);

impl fmt::Display for ParameterError {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
        write!(fmt, "{}", self.0)
    }
}

impl Error for ParameterError {}

/// Parameters for tuning the solver.
#[derive(Clone, Debug)]
pub struct SolverParams {
    /// Maximal individual bundle size.
    pub max_bundle_size: usize,

    /**
     * Factor for doing a descent step.
     *
     * If the proportion of actual decrease to predicted decrease is
     * at least that high, a descent step will be done.
     *
     * Must be in (0,1).
     */
    pub acceptance_factor: Real,

    /**
     * Factor for doing a null step.
     *
     * Factor that guarantees a null step. This factor is used to
     * compute a bound for the function oracle, that guarantees a null
     * step. If the function is evaluated by some iterative method that ensures
     * an objective value that is at least as large as this bound, the
     * oracle can stop returning an appropriate $\varepsilon$-subgradient.
     *
     * Must be in (0, acceptance_factor).
     */
    pub nullstep_factor: Real,

    /// Minimal allowed bundle weight. Must be > 0 and < max_weight.
    pub min_weight: Real,

    /// Maximal allowed bundle weight. Must be > min_weight,
    pub max_weight: Real,

    /**
     * Maximal number of updates of box multipliers.
     *
     * This is the maximal number of iterations for updating the box
     * multipliers when solving the master problem with box
     * constraints. This is a technical parameter that should probably
     * never be changed. If you experience an unexpectedly high number
     * of inner iterations, consider removing/fixing the corresponding
     * variables.
     */
    pub max_updates: usize,
}

impl SolverParams {
    /// Verify that all parameters are valid.
    fn check(&self) -> Result<(), ParameterError> {
        if self.max_bundle_size < 2 {
            Err(ParameterError(format!(
                "max_bundle_size must be >= 2 (got: {})",
                self.max_bundle_size
            )))
        } else if self.acceptance_factor <= 0.0 || self.acceptance_factor >= 1.0 {
            Err(ParameterError(format!(
                "acceptance_factor must be in (0,1) (got: {})",
                self.acceptance_factor
            )))
        } else if self.nullstep_factor <= 0.0 || self.nullstep_factor > self.acceptance_factor {
            Err(ParameterError(format!(
                "nullstep_factor must be in (0,acceptance_factor] (got: {}, acceptance_factor:{})",
                self.nullstep_factor, self.acceptance_factor
            )))
        } else if self.min_weight <= 0.0 {
            Err(ParameterError(format!(
                "min_weight must be in > 0 (got: {})",
                self.min_weight
            )))
        } else if self.max_weight < self.min_weight {
            Err(ParameterError(format!(
                "max_weight must be in >= min_weight (got: {}, min_weight: {})",
                self.max_weight, self.min_weight
            )))
        } else if self.max_updates == 0 {
            Err(ParameterError(format!(
                "max_updates must be in > 0 (got: {})",
                self.max_updates
            )))
        } else {
            Ok(())
        }
    }
}

impl Default for SolverParams {
    fn default() -> SolverParams {
        SolverParams {
            max_bundle_size: 50,

            nullstep_factor: 0.1,
            acceptance_factor: 0.1,

            min_weight: 0.01,
            max_weight: 1000.0,

            max_updates: 50,
        }
    }
}

/// The step type that has been performed.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Step {
    /// A null step has been performed.
    Null,
    /// A descent step has been performed.
    Descent,
    /// No step but the algorithm has been terminated.
    Term,
}

/// Information about a minorant.
#[derive(Debug, Clone)]
struct MinorantInfo {
    /// The minorant's index in the master problem
    index: usize,
    /// Current multiplier.
    multiplier: Real,
}

/// Information about the last iteration.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum IterationInfo {
    NewMinorantTooHigh { new: Real, old: Real },
    UpperBoundNullStep,
    ShallowCut,
}

/// State information for the update callback.
pub struct UpdateState<'a, Pr: 'a> {
    /// Current model minorants.
    minorants: &'a [Vec<MinorantInfo>],
    /// The primals.
    primals: &'a Vec<Option<Pr>>,
    /// The last step type.
    pub step: Step,
    /// Iteration information.
    pub iteration_info: &'a [IterationInfo],
    /// The current candidate. If the step was a descent step, this is
    /// the new center.
    pub nxt_y: &'a DVector,
    /// The center. IF the step was a descent step, this is the old
    /// center.
    pub cur_y: &'a DVector,
}

impl<'a, Pr: 'a> UpdateState<'a, Pr> {
    pub fn aggregated_primals(&self, subproblem: usize) -> Vec<(Real, &Pr)> {
        self.minorants[subproblem]
            .iter()
            .map(|m| (m.multiplier, self.primals[m.index].as_ref().unwrap()))
            .collect()
    }

    /// Return the last primal for a given subproblem.
    ///
    /// This is the last primal generated by the oracle.
    pub fn last_primal(&self, fidx: usize) -> Option<&Pr> {
        self.minorants[fidx].last().and_then(|m| self.primals[m.index].as_ref())
    }
}

/**
 * Implementation of a bundle method.
 */
pub struct Solver<P: FirstOrderProblem> {
    /// The first order problem description.
    problem: P,

    /// The solver parameter.
    pub params: SolverParams,

    /// Termination predicate.
    pub terminator: Box<Terminator>,

    /// Weighter heuristic.
    pub weighter: Box<Weighter>,

    /// Lower and upper bounds of all variables.
    bounds: Vec<(Real, Real)>,

    /// Current center of stability.
    cur_y: DVector,

    /// Function value in current point.
    cur_val: Real,

    /// Model value in current point.
    cur_mod: Real,

    /// Vector of subproblem function values in current point.
    cur_vals: DVector,

    /// Vector of model values in current point.
    cur_mods: DVector,

    /**
     * Whether the data of the current center is valid.
     *
     * This variable is set to false of the problem data changes so
     * the function is re-evaluated at the center.
     */
    cur_valid: bool,

    /// Direction from current center to candidate.
    nxt_d: DVector,

    /// Current candidate point.
    nxt_y: DVector,

    /// (Upper bound on) function value in candidate.
    nxt_val: Real,

    /// Model value in candidate.
    nxt_mod: Real,

    /// DVector of subproblem function values in candidate.
    nxt_vals: DVector,

    /// Vector of model values in candidate point.
    nxt_mods: DVector,

    /// Cut value of new subgradient in current center.
    new_cutval: Real,

    /// Norm of current aggregated subgradient.
    sgnorm: Real,

    /// Expected progress.
    expected_progress: Real,

    /// Number of descent steps.
    cnt_descent: usize,

    /// Number of null steps.
    cnt_null: usize,

    /**
     * Time when the solution process started.
     *
     * This is actually the time of the last call to `Solver::init`.
     */
    start_time: Instant,

    /// The master problem.
    master: Box<MasterProblem<MinorantIndex = usize>>,

    /// The active minorant indices for each subproblem.
    minorants: Vec<Vec<MinorantInfo>>,

    /// The primals associated with each global minorant index.
    primals: Vec<Option<P::Primal>>,

    /// Accumulated information about the last iteration.
    iterinfos: Vec<IterationInfo>,
}

impl<P: FirstOrderProblem> Solver<P>
where
    P::Err: Into<Box<dyn Error>>,
{
    /**
     * Create a new solver for the given problem.
     *
     * Note that the solver owns the problem, so you cannot use the
     * same problem description elsewhere as long as it is assigned to
     * the solver. However, it is possible to get a reference to the
     * internally stored problem using `Solver::problem()`.
     */
    pub fn new_params(problem: P, params: SolverParams) -> Result<Solver<P>, SolverError<P::Err>> {
        Ok(Solver {
            problem,
            params,
            terminator: Box::new(StandardTerminator {
                termination_precision: 1e-3,
            }),
            weighter: Box::new(HKWeighter::new()),
            bounds: vec![],
            cur_y: dvec![],
            cur_val: 0.0,
            cur_mod: 0.0,
            cur_vals: dvec![],
            cur_mods: dvec![],
            cur_valid: false,
            nxt_d: dvec![],
            nxt_y: dvec![],
            nxt_val: 0.0,
            nxt_mod: 0.0,
            nxt_vals: dvec![],
            nxt_mods: dvec![],
            new_cutval: 0.0,
            sgnorm: 0.0,
            expected_progress: 0.0,
            cnt_descent: 0,
            cnt_null: 0,
            start_time: Instant::now(),
            master: Box::new(BoxedMasterProblem::new(MinimalMaster::new()?)),
            minorants: vec![],
            primals: vec![],
            iterinfos: vec![],
        })
    }

    /// A new solver with default parameter.
    pub fn new(problem: P) -> Result<Solver<P>, SolverError<P::Err>> {
        Solver::new_params(problem, SolverParams::default())
    }

    /**
     * Set the first order problem description associated with this
     * solver.
     *
     * Note that the solver owns the problem, so you cannot use the
     * same problem description elsewhere as long as it is assigned to
     * the solver. However, it is possible to get a reference to the
     * internally stored problem using `Solver::problem()`.
     */
    pub fn set_problem(&mut self, problem: P) {
        self.problem = problem;
    }

    /// Returns a reference to the solver's current problem.
    pub fn problem(&self) -> &P {
        &self.problem
    }

    /// Initialize the solver.
    pub fn init(&mut self) -> Result<(), SolverError<P::Err>> {
        self.params.check()?;
        if self.cur_y.len() != self.problem.num_variables() {
            self.cur_valid = false;
            self.cur_y.init0(self.problem.num_variables());
        }

        let lb = self.problem.lower_bounds();
        let ub = self.problem.upper_bounds();
        self.bounds.clear();
        self.bounds.reserve(self.cur_y.len());
        for i in 0..self.cur_y.len() {
            let lb_i = lb.as_ref().map(|x| x[i]).unwrap_or(NEG_INFINITY);
            let ub_i = ub.as_ref().map(|x| x[i]).unwrap_or(INFINITY);
            if lb_i > ub_i {
                return Err(SolverError::InvalidBounds {
                    lower: lb_i,
                    upper: ub_i,
                });
            }
            if self.cur_y[i] < lb_i {
                self.cur_valid = false;
                self.cur_y[i] = lb_i;
            } else if self.cur_y[i] > ub_i {
                self.cur_valid = false;
                self.cur_y[i] = ub_i;
            }
            self.bounds.push((lb_i, ub_i));
        }

        let m = self.problem.num_subproblems();
        self.cur_vals.init0(m);
        self.cur_mods.init0(m);
        self.nxt_vals.init0(m);
        self.nxt_mods.init0(m);

        self.start_time = Instant::now();

        Ok(())
    }

    /// Solve the problem with at most 10_000 iterations.
    ///
    /// Use `solve_with_limit` for an explicit iteration limit.
    pub fn solve(&mut self) -> Result<(), SolverError<P::Err>> {
        const LIMIT: usize = 10_000;
        self.solve_with_limit(LIMIT)
    }

    /// Solve the problem with explicit iteration limit.
    pub fn solve_with_limit(&mut self, iter_limit: usize) -> Result<(), SolverError<P::Err>> {
        // First initialize the internal data structures.
        self.init()?;

        if self.solve_iter(iter_limit)? {
            Ok(())
        } else {
            Err(SolverError::IterationLimit { limit: iter_limit })
        }
    }

    /// Solve the problem but stop after `niter` iterations.
    ///
    /// The function returns `Ok(true)` if the termination criterion
    /// has been satisfied. Otherwise it returns `Ok(false)` or an
    /// error code.
    ///
    /// If this function is called again, the solution process is
    /// continued from the previous point. Because of this one must
    /// call `init()` before the first call to this function.
    pub fn solve_iter(&mut self, niter: usize) -> Result<bool, SolverError<P::Err>> {
        for _ in 0..niter {
            let mut term = self.step()?;
            let changed = self.update_problem(term)?;
            // do not stop if the problem has been changed
            if changed && term == Step::Term {
                term = Step::Null
            }
            self.show_info(term);
            if term == Step::Term {
                return Ok(true);
            }
        }
        Ok(false)
    }

    /// Called to update the problem.
    ///
    /// Calling this function typically triggers the problem to
    /// separate new constraints depending on the current solution.
    fn update_problem(&mut self, term: Step) -> Result<bool, SolverError<P::Err>> {
        let updates = {
            let state = UpdateState {
                minorants: &self.minorants,
                primals: &self.primals,
                step: term,
                iteration_info: &self.iterinfos,
                // this is a dirty trick: when updating the center, we
                // simply swapped the `cur_*` fields with the `nxt_*`
                // fields
                cur_y: if term == Step::Descent {
                    &self.nxt_y
                } else {
                    &self.cur_y
                },
                nxt_y: if term == Step::Descent {
                    &self.cur_y
                } else {
                    &self.nxt_y
                },
            };
            self.problem.update(&state).map_err(SolverError::Update)?
        };

        let mut newvars = Vec::with_capacity(updates.len());
        for u in updates {
            match u {
                Update::AddVariable { lower, upper } => {
                    if lower > upper {
                        return Err(SolverError::InvalidBounds { lower, upper });
                    }
                    let value = if lower > 0.0 {
                        lower
                    } else if upper < 0.0 {
                        upper
                    } else {
                        0.0
                    };
                    self.bounds.push((lower, upper));
                    newvars.push((None, lower - value, upper - value, value));
                }
                Update::AddVariableValue { lower, upper, value } => {
                    if lower > upper {
                        return Err(SolverError::InvalidBounds { lower, upper });
                    }
                    if value < lower || value > upper {
                        return Err(SolverError::ViolatedBounds { lower, upper, value });
                    }
                    self.bounds.push((lower, upper));
                    newvars.push((None, lower - value, upper - value, value));
                }
                Update::MoveVariable { index, value } => {
                    if index >= self.bounds.len() {
                        return Err(SolverError::InvalidVariable {
                            index,
                            nvars: self.bounds.len(),
                        });
                    }
                    let (lower, upper) = self.bounds[index];
                    if value < lower || value > upper {
                        return Err(SolverError::ViolatedBounds { lower, upper, value });
                    }
                    newvars.push((Some(index), lower - value, upper - value, value));
                }
            }
        }

        if !newvars.is_empty() {
            let problem = &mut self.problem;
            let primals = &self.primals;
            self.master.add_vars(
                &newvars.iter().map(|v| (v.0, v.1, v.2)).collect::<Vec<_>>(),
                &mut |_fidx, minidx, vars| {
                    problem
                        .extend_subgradient(primals[minidx].as_ref().unwrap(), vars)
                        .map(DVector)
                        .map_err(|e| e.into())
                },
            )?;
            // modify moved variables
            for (index, val) in newvars.iter().filter_map(|v| v.0.map(|i| (i, v.3))) {
                self.cur_y[index] = val;
                self.nxt_y[index] = val;
                self.nxt_d[index] = 0.0;
            }
            // add new variables
            self.cur_y.extend(newvars.iter().filter(|v| v.0.is_none()).map(|v| v.3));
            self.nxt_y.extend(newvars.iter().filter(|v| v.0.is_none()).map(|v| v.3));
            self.nxt_d.resize(self.nxt_y.len(), 0.0);
            Ok(true)
        } else {
            Ok(false)
        }
    }

    /// Return the current aggregated primal information for a subproblem.
    ///
    /// This function returns all currently used minorants $x_i$ along
    /// with their coefficients $\alpha_i$. The aggregated primal can
    /// be computed by combining the minorants $\bar{x} =
    /// \sum_{i=1}\^m \alpha_i x_i$.
    pub fn aggregated_primals(&self, subproblem: usize) -> Vec<(Real, &P::Primal)> {
        self.minorants[subproblem]
            .iter()
            .map(|m| (m.multiplier, self.primals[m.index].as_ref().unwrap()))
            .collect()
    }

    fn show_info(&self, step: Step) {
        let time = self.start_time.elapsed();
        info!(
            "{} {:0>2}:{:0>2}:{:0>2}.{:0>2} {:4} {:4} {:4}{:1}  {:9.4} {:9.4} \
             {:12.6e}({:12.6e}) {:12.6e}",
            if step == Step::Term { "_endit" } else { "endit " },
            time.as_secs() / 3600,
            (time.as_secs() / 60) % 60,
            time.as_secs() % 60,
            time.subsec_nanos() / 10_000_000,
            self.cnt_descent,
            self.cnt_descent + self.cnt_null,
            self.master.cnt_updates(),
            if step == Step::Descent { "*" } else { " " },
            self.master.weight(),
            self.expected_progress,
            self.nxt_mod,
            self.nxt_val,
            self.cur_val
        );
    }

    /// Return the current center of stability.
    pub fn center(&self) -> &[Real] {
        &self.cur_y
    }

    /// Return the last candidate point.
    pub fn candidate(&self) -> &[Real] {
        &self.nxt_y
    }

    /**
     * Initializes the master problem.
     *
     * The oracle is evaluated once at the initial center and the
     * master problem is initialized with the returned subgradient
     * information.
     */
    fn init_master(&mut self) -> Result<(), SolverError<P::Err>> {
        let m = self.problem.num_subproblems();

        self.master = if m == 1 && self.params.max_bundle_size == 2 {
            debug!("Use minimal master problem");
            Box::new(BoxedMasterProblem::new(MinimalMaster::new()?))
        } else {
            debug!("Use CPLEX master problem");
            Box::new(BoxedMasterProblem::new(CplexMaster::new()?))
        };

        let lb = self.problem.lower_bounds().map(DVector);
        let ub = self.problem.upper_bounds().map(DVector);

        if lb
            .as_ref()
            .map(|lb| lb.len() != self.problem.num_variables())
            .unwrap_or(false)
        {
            return Err(SolverError::Dimension);
        }
        if ub
            .as_ref()
            .map(|ub| ub.len() != self.problem.num_variables())
            .unwrap_or(false)
        {
            return Err(SolverError::Dimension);
        }

        self.master.set_num_subproblems(m)?;
        self.master.set_vars(self.problem.num_variables(), lb, ub)?;
        self.master.set_max_updates(self.params.max_updates)?;

        self.minorants = (0..m).map(|_| vec![]).collect();

        self.cur_val = 0.0;
        for i in 0..m {
            let result = self
                .problem
                .evaluate(i, &self.cur_y, INFINITY, 0.0)
                .map_err(SolverError::Evaluation)?;
            self.cur_vals[i] = result.objective();
            self.cur_val += self.cur_vals[i];

            let mut minorants = result.into_iter();
            if let Some((minorant, primal)) = minorants.next() {
                self.cur_mods[i] = minorant.constant;
                self.cur_mod += self.cur_mods[i];
                let minidx = self.master.add_minorant(i, minorant)?;
                self.minorants[i].push(MinorantInfo {
                    index: minidx,
                    multiplier: 0.0,
                });
                if minidx >= self.primals.len() {
                    self.primals.resize_with(minidx + 1, || None);
                }
                self.primals[minidx] = Some(primal);
            } else {
                return Err(SolverError::NoMinorant);
            }
        }

        self.cur_valid = true;

        // Solve the master problem once to compute the initial
        // subgradient.
        //
        // We could compute that subgradient directly by
        // adding up the initial minorants, but this would not include
        // the eta terms. However, this is a heuristic anyway because
        // we assume an initial weight of 1.0, which, in general, will
        // *not* be the initial weight for the first iteration.
        self.master.set_weight(1.0)?;
        self.master.solve(self.cur_val)?;
        self.sgnorm = self.master.get_dualoptnorm2().sqrt();

        // Compute the real initial weight.
        let state = current_state!(self, Step::Term);
        let new_weight = self.weighter.weight(&state, &self.params);
        self.master.set_weight(new_weight)?;

        debug!("Init master completed");

        Ok(())
    }

    /// Solve the model (i.e. master problem) to compute the next candidate.
    fn solve_model(&mut self) -> Result<(), SolverError<P::Err>> {
        self.master.solve(self.cur_val)?;
        self.nxt_d = self.master.get_primopt();
        self.nxt_y.add(&self.cur_y, &self.nxt_d);
        self.nxt_mod = self.master.get_primoptval();
        self.sgnorm = self.master.get_dualoptnorm2().sqrt();
        self.expected_progress = self.cur_val - self.nxt_mod;

        // update multiplier from master solution
        for i in 0..self.problem.num_subproblems() {
            for m in &mut self.minorants[i] {
                m.multiplier = self.master.multiplier(m.index);
            }
        }

        debug!("Model result");
        debug!("  cur_val ={}", self.cur_val);
        debug!("  nxt_mod ={}", self.nxt_mod);
        debug!("  expected={}", self.expected_progress);
        Ok(())
    }

    /// Reduce size of bundle.
    fn compress_bundle(&mut self) -> Result<(), SolverError<P::Err>> {
        for i in 0..self.problem.num_subproblems() {
            let n = self.master.num_minorants(i);
            if n >= self.params.max_bundle_size {
                // aggregate minorants with smallest coefficients
                self.minorants[i].sort_by_key(|m| -((1e6 * m.multiplier) as isize));
                let aggr = self.minorants[i].split_off(self.params.max_bundle_size - 2);
                let aggr_sum = aggr.iter().map(|m| m.multiplier).sum();
                let (aggr_mins, aggr_primals): (Vec<_>, Vec<_>) = aggr
                    .into_iter()
                    .map(|m| (m.index, self.primals[m.index].take().unwrap()))
                    .unzip();
                let (aggr_min, aggr_coeffs) = self.master.aggregate(i, &aggr_mins)?;
                // append aggregated minorant
                self.minorants[i].push(MinorantInfo {
                    index: aggr_min,
                    multiplier: aggr_sum,
                });
                self.primals[aggr_min] = Some(
                    self.problem
                        .aggregate_primals(aggr_coeffs.into_iter().zip(aggr_primals.into_iter()).collect()),
                );
            }
        }
        Ok(())
    }

    /// Perform a descent step.
    fn descent_step(&mut self) -> Result<(), SolverError<P::Err>> {
        let new_weight = self.weighter.weight(&current_state!(self, Step::Descent), &self.params);
        self.master.set_weight(new_weight)?;
        self.cnt_descent += 1;
        swap(&mut self.cur_y, &mut self.nxt_y);
        swap(&mut self.cur_val, &mut self.nxt_val);
        swap(&mut self.cur_mod, &mut self.nxt_mod);
        swap(&mut self.cur_vals, &mut self.nxt_vals);
        swap(&mut self.cur_mods, &mut self.nxt_mods);
        self.master.move_center(1.0, &self.nxt_d);
        debug!("Descent Step");
        debug!("  dir ={}", self.nxt_d);
        debug!("  newy={}", self.cur_y);
        Ok(())
    }

    /// Perform a null step.
    fn null_step(&mut self) -> Result<(), SolverError<P::Err>> {
        let new_weight = self.weighter.weight(&current_state!(self, Step::Null), &self.params);
        self.master.set_weight(new_weight)?;
        self.cnt_null += 1;
        debug!("Null Step");
        Ok(())
    }

    /// Perform one bundle iteration.
    #[allow(clippy::collapsible_if)]
    pub fn step(&mut self) -> Result<Step, SolverError<P::Err>> {
        self.iterinfos.clear();

        if !self.cur_valid {
            // current point needs new evaluation
            self.init_master()?;
        }

        self.solve_model()?;
        if self
            .terminator
            .terminate(&current_state!(self, Step::Term), &self.params)
        {
            return Ok(Step::Term);
        }

        let m = self.problem.num_subproblems();
        let descent_bnd = self.get_descent_bound();
        let nullstep_bnd = if m == 1 { self.get_nullstep_bound() } else { INFINITY };
        let relprec = if m == 1 { self.get_relative_precision() } else { 0.0 };

        self.compress_bundle()?;

        let mut nxt_lb = 0.0;
        let mut nxt_ub = 0.0;
        self.new_cutval = 0.0;
        for fidx in 0..self.problem.num_subproblems() {
            let result = self
                .problem
                .evaluate(fidx, &self.nxt_y, nullstep_bnd, relprec)
                .map_err(SolverError::Evaluation)?;
            let fun_ub = result.objective();

            let mut minorants = result.into_iter();
            let mut nxt_minorant;
            let nxt_primal;
            match minorants.next() {
                Some((m, p)) => {
                    nxt_minorant = m;
                    nxt_primal = p;
                }
                None => return Err(SolverError::NoMinorant),
            }
            let fun_lb = nxt_minorant.constant;

            nxt_lb += fun_lb;
            nxt_ub += fun_ub;
            self.nxt_vals[fidx] = fun_ub;

            // move center of minorant to cur_y
            nxt_minorant.move_center(-1.0, &self.nxt_d);
            self.new_cutval += nxt_minorant.constant;
            let minidx = self.master.add_minorant(fidx, nxt_minorant)?;
            self.minorants[fidx].push(MinorantInfo {
                index: minidx,
                multiplier: 0.0,
            });
            if minidx >= self.primals.len() {
                self.primals.resize_with(minidx + 1, || None);
            }
            self.primals[minidx] = Some(nxt_primal);
        }

        if self.new_cutval > self.cur_val + 1e-3 {
            warn!(
                "New minorant has higher value in center new:{} old:{}",
                self.new_cutval, self.cur_val
            );
            self.cur_val = self.new_cutval;
            self.iterinfos.push(IterationInfo::NewMinorantTooHigh {
                new: self.new_cutval,
                old: self.cur_val,
            });
        }

        self.nxt_val = nxt_ub;

        // check for potential problems with relative precision of all kinds
        if nxt_lb <= descent_bnd {
            // lower bound gives descent step
            if nxt_ub > descent_bnd {
                // upper bound will produce null-step
                if self.cur_val - nxt_lb > (self.cur_val - self.nxt_mod) * self.params.nullstep_factor.max(0.5) {
                    warn!("Relative precision of returned objective interval enforces null-step.");
                    self.iterinfos.push(IterationInfo::UpperBoundNullStep);
                }
            }
        } else if self.cur_val - nxt_lb > 0.8 * (self.cur_val - self.nxt_mod) {
            // TODO: double check with ConicBundle if this test makes sense.
            // lower bound gives already a null step
            // subgradient won't yield much improvement
            warn!("Shallow cut (subgradient won't yield much improvement)");
            self.iterinfos.push(IterationInfo::ShallowCut);
        }

        debug!("Step");
        debug!("  cur_val    ={}", self.cur_val);
        debug!("  nxt_mod    ={}", self.nxt_mod);
        debug!("  nxt_ub     ={}", self.nxt_val);
        debug!("  descent_bnd={}", descent_bnd);

        // do a descent step or null step
        if nxt_ub <= descent_bnd {
            self.descent_step()?;
            Ok(Step::Descent)
        } else {
            self.null_step()?;
            Ok(Step::Null)
        }
    }

    /**
     * Return the bound on the function value that enforces a
     * nullstep.
     *
     * If the oracle guarantees that $f(\bar{y}) \ge$ this bound, the
     * bundle method will perform a nullstep.
     *
     * This value is $f(\hat{y}) + \varrho' \cdot \Delta$ where
     * $\Delta = f(\hat{y}) - \hat{f}(\bar{y})$ is the expected
     * progress and $\varrho'$ is the `nullstep_factor`.
     */
    fn get_nullstep_bound(&self) -> Real {
        self.cur_val - self.params.nullstep_factor * (self.cur_val - self.nxt_mod)
    }

    /**
     * Return the bound the function value must be below of to enforce a descent step.
     *
     * If the oracle guarantees that $f(\bar{y}) \le$ this bound, the
     * bundle method will perform a descent step.
     *
     * This value is $f(\hat{y}) + \varrho \cdot \Delta$ where
     * $\Delta = f(\hat{y}) - \hat{f}(\bar{y})$ is the expected
     * progress and $\varrho$ is the `acceptance_factor`.
     */
    fn get_descent_bound(&self) -> Real {
        self.cur_val - self.params.acceptance_factor * (self.cur_val - self.nxt_mod)
    }

    /**
     * Return the required relative precision for the computation.
     */
    fn get_relative_precision(&self) -> Real {
        (0.1 * (self.cur_val - self.get_nullstep_bound()) / (self.cur_val.abs() + 1.0)).min(1e-3)
    }
}