RsBundle  Artifact [b205e26e08]

Artifact b205e26e0885bff1f8a9d9c9f908036b55fd0c67:

  • File src/master/boxed.rs — part of check-in [98b7cb4721] at 2023-08-14 20:51:25 on branch add-subproblems — master: add `add_subproblems` api (user: fifr size: 17552) [more...]

// Copyright (c) 2016-2023 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

pub mod unconstrained;
use self::unconstrained::UnconstrainedMasterProblem;

use super::MasterProblem;
use crate::problem::SubgradientExtender;
use crate::saveable::Saveable;
use crate::{DVector, Minorant, Real};

use either::Either;
use log::debug;
use num_traits::FromPrimitive;
use std::marker::PhantomData;

#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};

/**
 * Turn unconstrained master problem into box-constrained one.
 *
 * This master problem adds box constraints to an unconstrained
 * master problem implementation. The box constraints are enforced by
 * an additional outer optimization loop.
 */
pub struct BoxedMasterProblem<Mn, M>
where
    Mn: Minorant,
{
    /// The unconstrained master problem solver.
    pub master: M,

    /// Maximal number of updates of box multipliers.
    pub max_updates: usize,

    /// Variable lower bounds.
    lb: DVector,

    /// Variable upper bounds.
    ub: DVector,

    /// Multipliers for box constraints.
    η: DVector,

    /// Convexity sets (list of indices and current right-hand side)
    convexity_sets: Vec<(Vec<usize>, Real)>,

    /// Multipliers for convexity constraints.
    λ: DVector,

    /// Combination of λ and η
    λ_η: DVector,

    /// Primal optimal solution.
    primopt: DVector,

    /// Primal optimal solution value.
    primoptval: Real,

    /// Square of norm of dual optimal solution.
    dualoptnorm2: Real,

    /// Model precision.
    model_eps: Real,

    need_new_candidate: bool,

    /// Current number of updates.
    cnt_updates: usize,

    phantom: PhantomData<Mn>,
}

impl<Mn, M> BoxedMasterProblem<Mn, M>
where
    Mn: Minorant,
    M: UnconstrainedMasterProblem<Mn>,
{
    pub fn with_master(master: M) -> BoxedMasterProblem<Mn, M> {
        BoxedMasterProblem {
            master,
            max_updates: 50,
            lb: dvec![],
            ub: dvec![],
            η: dvec![],
            convexity_sets: vec![],
            λ: dvec![],
            λ_η: dvec![],
            primopt: dvec![],
            primoptval: 0.0,
            dualoptnorm2: 0.0,
            model_eps: 0.6,
            cnt_updates: 0,
            need_new_candidate: true,
            phantom: PhantomData,
        }
    }

    pub fn set_max_updates(&mut self, max_updates: usize) -> Result<(), M::Err> {
        assert!(max_updates > 0);
        self.max_updates = max_updates;
        Ok(())
    }

    /**
     * Update box multipliers $\eta$.
     *
     * This function solves the dual problem for fixed aggregated
     * minorant w.r.t. $\eta$. When called, the variable `self.primopt`
     * should contain the primal solution (i.e. the new candidate point
     * d) without the influence of $\eta$.
     */
    fn update_box_multipliers(&mut self) -> bool {
        let weight = self.master.weight();

        // at the beginning primopt contains y with only the influence of the aggregated minorant
        // but without η and λ, i.e. the candidate would be = y_old - 1/w (Gα - η - λ e)
        // but primopt = y_old - 1/w Gα

        debug!("primopt(only)={}   lb={}", self.primopt, self.lb);

        self.η.resize(self.lb.len(), 0.0);
        self.λ_η.clear();
        self.λ_η.resize(self.lb.len(), 0.0);

        // first update the λ
        let mut updated_lambda = false;
        {
            let η = &self.η;
            let primopt = &mut self.primopt;
            for (λ, cvx) in self.λ.iter_mut().zip(self.convexity_sets.iter()) {
                let sum = cvx.0.iter().map(|&i| primopt[i] + η[i] / weight).sum::<Real>();
                let newlambda = weight / Real::from_usize(cvx.0.len()).unwrap() * (cvx.1 - sum);
                for &i in &cvx.0 {
                    primopt[i] += newlambda / weight;
                    self.λ_η[i] += newlambda;
                }
                updated_lambda = updated_lambda || (newlambda - *λ).abs() > f64::EPSILON;
                *λ = newlambda;
            }
        }
        // now primopt contains the influence of (the new) λ

        // update η
        let mut updated_eta = false;
        for (i, η) in self.η.iter_mut().enumerate() {
            let x = &mut self.primopt[i];
            let newx = if *x < self.lb[i] {
                self.lb[i]
            } else if *x > self.ub[i] {
                self.ub[i]
            } else {
                *η = 0.0;
                continue;
            };
            let neweta = (newx - *x) * weight;
            updated_eta = updated_eta || (neweta - *η).abs() > f64::EPSILON;
            *η = neweta;
            self.λ_η[i] += neweta;
            *x = newx;
        }

        debug!("λ and η update");
        debug!("  primopt={}", self.primopt);
        debug!("  λ    ={}", self.λ);
        debug!("  η    ={}", self.η);

        updated_eta || updated_lambda
    }

    // Compute the new candidate point.
    //
    // This consists of two steps:
    //
    // 1. the new point is computed as $-\tfrac{1}{u}\bar{g}$, where $\bar{g}$
    //    is the aggregated minorant
    // 2. the multipliers $\eta$ are updated
    //
    // In other words, this function computes the new candidate
    // defined by a fixed $\bar{g}$ while choosing the best possible
    // $\eta$.
    //
    fn compute_candidate(&mut self) {
        self.need_new_candidate = false;

        if self.master.dualopt().len() == self.lb.len() {
            self.primopt.scal(-1.0 / self.master.weight(), self.master.dualopt())
        } else {
            self.primopt.init0(self.lb.len());
        }
        self.update_box_multipliers();
    }

    /// Compute $\langle b, \eta \rangle$ with $b$ the bounds of η.
    ///
    /// Used for debugging only
    #[allow(dead_code)]
    fn lambda_eta_cutval(&self) -> Real {
        self.η
            .iter()
            .enumerate()
            .map(|(i, &η)| {
                if η >= 0.0 {
                    if self.lb[i] > f64::NEG_INFINITY {
                        self.lb[i] * η
                    } else {
                        0.0
                    }
                } else if self.ub[i] < f64::INFINITY {
                    self.ub[i] * η
                } else {
                    0.0
                }
            })
            .sum()
    }

    /**
     * Return $\\|G \alpha - \eta - \lambda \\|_2\^2$.
     *
     * This is the norm-square of the dual optimal solution including
     * the current box-multipliers $\eta$.
     */
    fn get_norm_subg2(&self) -> Real {
        self.master
            .dualopt()
            .iter()
            .zip(self.λ_η.iter())
            .map(|(x, y)| x - y)
            .map(|z| z * z)
            .sum()
    }
}

impl<Mn, M> MasterProblem<Mn> for BoxedMasterProblem<Mn, M>
where
    Mn: Minorant,
    M: UnconstrainedMasterProblem<Mn>,
{
    type MinorantIndex = M::MinorantIndex;

    type Err = M::Err;

    fn new() -> Result<Self, Self::Err> {
        M::new().map(BoxedMasterProblem::with_master)
    }

    fn set_num_subproblems(&mut self, n: usize) -> Result<(), Self::Err> {
        self.master.set_num_subproblems(n)
    }

    fn num_subproblems(&self) -> usize {
        self.master.num_subproblems()
    }

    fn set_vars(&mut self, n: usize, lb: Option<DVector>, ub: Option<DVector>) -> Result<(), Self::Err> {
        assert_eq!(lb.as_ref().map(|x| x.len()).unwrap_or(n), n);
        assert_eq!(ub.as_ref().map(|x| x.len()).unwrap_or(n), n);
        self.lb = lb.unwrap_or_else(|| dvec![f64::NEG_INFINITY; n]);
        self.ub = ub.unwrap_or_else(|| dvec![f64::INFINITY; n]);
        Ok(())
    }

    fn set_convexity_sets<S, I>(&mut self, convexity_sets: S)
    where
        S: IntoIterator<Item = I>,
        I: IntoIterator<Item = usize>,
    {
        self.convexity_sets = convexity_sets
            .into_iter()
            .map(|i| (i.into_iter().collect(), 1.0))
            .collect();
        self.λ = dvec![0.0; self.convexity_sets.len()];
    }

    fn num_variables(&self) -> usize {
        self.lb.len()
    }

    fn num_minorants(&self, fidx: usize) -> usize {
        self.master.num_minorants(fidx)
    }

    fn weight(&self) -> Real {
        self.master.weight()
    }

    fn set_weight(&mut self, weight: Real) -> Result<(), Self::Err> {
        self.master.set_weight(weight)
    }

    fn set_max_updates(&mut self, max_updates: usize) -> Result<(), Self::Err> {
        BoxedMasterProblem::set_max_updates(self, max_updates)
    }

    fn cnt_updates(&self) -> usize {
        self.cnt_updates
    }

    fn add_vars<SErr>(
        &mut self,
        bounds: &[(Real, Real)],
        sgext: &dyn SubgradientExtender<Mn, SErr>,
    ) -> Result<(), Either<Self::Err, SErr>> {
        debug_assert!(bounds.iter().all(|&(l, u)| l <= u));
        if !bounds.is_empty() {
            self.lb.extend(bounds.iter().map(|x| x.0));
            self.ub.extend(bounds.iter().map(|x| x.1));
            self.η.resize(self.lb.len(), 0.0);
            self.λ_η.resize(self.lb.len(), 0.0);
            self.need_new_candidate = true;
            let nnew = bounds.len();
            self.master.add_vars(nnew, sgext)
        } else {
            Ok(())
        }
    }

    fn add_minorant(&mut self, fidx: usize, minorant: Mn) -> Result<Self::MinorantIndex, Self::Err> {
        self.master.add_minorant(fidx, minorant)
    }

    fn add_subproblems(&mut self, minorants: &[Mn]) -> Result<Vec<Self::MinorantIndex>, Self::Err> {
        self.master.add_subproblems(minorants)
    }

    #[allow(clippy::cognitive_complexity)]
    fn solve(&mut self, center_value: Real) -> Result<(), Self::Err> {
        debug!("Solve Master");
        debug!("  lb      ={}", self.lb);
        debug!("  ub      ={}", self.ub);

        if self.need_new_candidate {
            self.compute_candidate();
        }

        let mut cnt_updates = 0;
        let mut old_augval = f64::NEG_INFINITY;
        loop {
            cnt_updates += 1;
            self.cnt_updates += 1;

            // TODO: relprec is fixed
            self.master.solve(&self.λ_η, center_value, old_augval, 1e-3)?;

            // compute the primal solution without the influence of η
            self.primopt.scal(-1.0 / self.master.weight(), self.master.dualopt());

            // solve w.r.t. η
            let updated_eta = self.update_box_multipliers();

            // compute value of the linearized model
            self.dualoptnorm2 = self.get_norm_subg2();
            let linval = self.master.dualopt().dot(&self.primopt) + self.master.dualopt_cutval();
            //assert!((self.λ_η.dot(&self.primopt) - self.lambda_eta_cutval()).abs() < 1e-6); // verify complementarity
            let augval = linval + 0.5 * self.dualoptnorm2 / self.master.weight();

            let mut cutval = linval;
            if updated_eta {
                cutval = self.master.eval_model(&self.primopt)
            }

            let curval = center_value;
            let model_prec = (cutval - linval) / (curval - linval).max(1e-16);

            debug!("Eta Test");
            debug!("  dualnorm2={}", self.dualoptnorm2);
            debug!("  linval={}", linval);
            debug!("  modval={}", self.master.eval_model(&self.primopt));
            debug!("  augval={}", augval);
            debug!("  cutval={}", cutval);
            debug!("  model_prec={}", model_prec);
            debug!("  old_augval={}", old_augval);
            debug!("  center_value={}", center_value);
            debug!("  model_eps={}", self.model_eps);
            debug!(
                "  cut-lin={} < eps*(cur-lin)={}",
                cutval - linval,
                self.model_eps * (curval - linval)
            );
            debug!("  cnt_update={} max_updates={}", cnt_updates, self.max_updates);

            self.primoptval = linval;

            // check if all convexity constraints are satisfied with sufficient precision
            let convexity_valid = self
                .convexity_sets
                .iter()
                .all(|(cvx, rhs)| (rhs - cvx.iter().map(|&i| self.primopt[i]).sum::<Real>()).abs() < 1e-3);

            if convexity_valid
                && (augval < old_augval + 1e-10
                    || cutval - linval < self.model_eps * (curval - linval)
                    || cnt_updates >= self.max_updates)
            {
                break;
            }

            old_augval = old_augval.max(augval);
        }

        debug!("Model");
        debug!("  cnt_update={}", cnt_updates);
        debug!("  primopt={}", self.primopt);
        debug!("  dualopt={}", self.master.dualopt());
        debug!("  etaopt={}", self.η);
        debug!("  primoptval={}", self.primoptval);

        Ok(())
    }

    fn compress(&mut self) -> Result<(), Self::Err> {
        self.master.compress()
    }

    fn fill_models<F>(&mut self, f: F) -> Result<(), Self::Err>
    where
        F: FnMut(Self::MinorantIndex, &mut dyn Iterator<Item = (Self::MinorantIndex, Real)>),
    {
        self.master.fill_models(f)
    }

    fn get_primopt(&self) -> DVector {
        self.primopt.clone()
    }

    fn get_primoptval(&self) -> Real {
        self.primoptval
    }

    fn get_optvalue_subproblem(&self, fidx: usize) -> Real {
        self.master.eval_model_subproblem(fidx, &self.primopt)
    }

    fn get_dualoptnorm2(&self) -> Real {
        self.dualoptnorm2
    }

    fn multiplier(&self, min: Self::MinorantIndex) -> Real {
        self.master.multiplier(min)
    }

    fn aggregated_primal(&self, fidx: usize) -> Result<Mn::Primal, Self::Err> {
        self.master.aggregated_primal(fidx)
    }

    fn move_center(&mut self, alpha: Real, d: &DVector) {
        self.need_new_candidate = true;
        self.master.move_center(alpha, d);
        self.lb.add_scaled_begin(-alpha, d);
        self.ub.add_scaled_begin(-alpha, d);
        for (cvx, rhs) in &mut self.convexity_sets {
            for &i in cvx.iter() {
                *rhs -= alpha * d[i];
            }
        }
    }
}

#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
pub struct BoxedMasterState<S> {
    max_updates: usize,
    lb: DVector,
    ub: DVector,
    η: DVector,
    convexity_sets: Vec<(Vec<usize>, Real)>,
    λ: DVector,
    λ_η: DVector,
    primopt: DVector,
    primoptval: Real,
    dualoptnorm2: Real,
    model_eps: Real,
    need_new_candidate: bool,
    cnt_updates: usize,
    unboxed: S,
}

impl<Mn, M> Saveable for BoxedMasterProblem<Mn, M>
where
    Mn: Minorant,
    M: UnconstrainedMasterProblem<Mn> + Saveable,
{
    type State = BoxedMasterState<<M as Saveable>::State>;

    type Err = <M as Saveable>::Err;

    fn get_state(&self) -> Result<Self::State, Self::Err> {
        Ok(BoxedMasterState {
            max_updates: self.max_updates,
            lb: self.lb.clone(),
            ub: self.ub.clone(),
            η: self.η.clone(),
            convexity_sets: self.convexity_sets.clone(),
            λ: self.λ.clone(),
            λ_η: self.λ_η.clone(),
            primopt: self.primopt.clone(),
            primoptval: self.primoptval,
            dualoptnorm2: self.dualoptnorm2,
            model_eps: self.model_eps,
            need_new_candidate: self.need_new_candidate,
            cnt_updates: self.cnt_updates,
            unboxed: self.master.get_state()?,
        })
    }

    fn set_state(&mut self, state: Self::State) -> Result<(), Self::Err> {
        self.master.set_state(state.unboxed)?;
        self.max_updates = state.max_updates;
        self.lb = state.lb;
        self.ub = state.ub;
        self.η = state.η;
        self.convexity_sets = state.convexity_sets;
        self.λ = state.λ;
        self.λ_η = state.λ_η;
        self.primopt = state.primopt;
        self.primoptval = state.primoptval;
        self.dualoptnorm2 = state.dualoptnorm2;
        self.model_eps = state.model_eps;
        self.cnt_updates = state.cnt_updates;
        self.need_new_candidate = state.need_new_candidate;
        Ok(())
    }
}

/// Builder for `BoxedMasterProblem`.
///
/// `B` is a builder of the underlying `UnconstrainedMasterProblem`.
#[derive(Default)]
pub struct Builder<B>(B);

impl<Mn, B> super::Builder<Mn> for Builder<B>
where
    Mn: Minorant,
    B: unconstrained::Builder<Mn>,
    B::MasterProblem: UnconstrainedMasterProblem<Mn>,
{
    type MasterProblem = BoxedMasterProblem<Mn, B::MasterProblem>;

    fn build(&mut self) -> Result<Self::MasterProblem, <Self::MasterProblem as MasterProblem<Mn>>::Err> {
        self.0.build().map(BoxedMasterProblem::with_master)
    }
}

impl<B> std::ops::Deref for Builder<B> {
    type Target = B;

    fn deref(&self) -> &B {
        &self.0
    }
}

impl<B> std::ops::DerefMut for Builder<B> {
    fn deref_mut(&mut self) -> &mut B {
        &mut self.0
    }
}