RsBundle  Changes On Branch restructure

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Changes In Branch restructure Excluding Merge-Ins

This is equivalent to a diff from 3b15a29a64 to bda0e2d65c

2019-07-30
08:17
Merge restructure Closed-Leaf check-in: 00c46efc28 user: fifr tags: async
08:11
Move data structures like `DVector` and `Minorant` to `data` submodule Closed-Leaf check-in: bda0e2d65c user: fifr tags: restructure
08:01
Rearrange master problem module check-in: d5eed55bb2 user: fifr tags: restructure
07:25
Remove old sequential solver check-in: b194454b53 user: fifr tags: restructure
2019-07-29
19:08
Add `dyn` to trait object types check-in: 3b15a29a64 user: fifr tags: async
14:38
Merge trunk check-in: 51fbf78a7a user: fifr tags: async

Changes to examples/cflp.rs.
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37


38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
16
17
18
19
20
21
22

23
24
25
26
27
28
29
30
31
32
33



34
35

36

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51

52
53
54
55
56
57

58
59
60
61
62
63
64







-











-
-
-
+
+
-

-















-






-







 */

#![allow(non_upper_case_globals)]

//! Example implementation for a capacitated facility location problem.

use better_panic;
use crossbeam::channel::unbounded as channel;
use log::{info, Level};
use rustop::opts;
use std::error::Error;
use std::fmt::Write;
use std::io::Write as _;
use std::sync::Arc;

use env_logger::{self, fmt::Color};
use ordered_float::NotNan;
use threadpool::ThreadPool;

use bundle::parallel::{
    DefaultSolver as ParallelSolver, EvalResult, FirstOrderProblem as ParallelProblem,
    NoBundleSolver as NoParallelSolver, ResultSender,
use bundle::problem::{EvalResult, FirstOrderProblem as ParallelProblem, ResultSender};
use bundle::solver::sync::{DefaultSolver, NoBundleSolver};
};
use bundle::{dvec, DVector, Minorant, Real};
use bundle::{DefaultSolver, FirstOrderProblem, NoBundleSolver, SimpleEvaluation};

const Nfac: usize = 3;
const Ncus: usize = 5;
const F: [Real; Nfac] = [1000.0, 1000.0, 1000.0];
const CAP: [Real; Nfac] = [500.0, 500.0, 500.0];
const C: [[Real; Ncus]; Nfac] = [
    [4.0, 5.0, 6.0, 8.0, 10.0], //
    [6.0, 4.0, 3.0, 5.0, 8.0],  //
    [9.0, 7.0, 4.0, 3.0, 4.0],  //
];
const DEMAND: [Real; 5] = [80.0, 270.0, 250.0, 160.0, 180.0];

#[derive(Debug)]
enum EvalError {
    Customer(usize),
    NoObjective(usize),
}

impl std::fmt::Display for EvalError {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
        match self {
            EvalError::Customer(i) => writeln!(fmt, "Customer subproblem {} failed", i),
            EvalError::NoObjective(i) => writeln!(fmt, "No objective value generated for subproblem {}", i),
        }
    }
}

impl Error for EvalError {}

struct CFLProblem {
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
115
116
117
118
119
120
121














































122
123
124
125
126
127
128







-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-







            constant: objective,
            linear: subg,
        },
        primal,
    ))
}

impl FirstOrderProblem for CFLProblem {
    type Err = EvalError;

    type Primal = DVector;

    type EvalResult = SimpleEvaluation<Self::Primal>;

    fn num_variables(&self) -> usize {
        Nfac
    }

    fn lower_bounds(&self) -> Option<Vec<Real>> {
        Some(vec![0.0; FirstOrderProblem::num_variables(self)])
    }

    fn num_subproblems(&self) -> usize {
        Nfac + Ncus
    }

    fn evaluate(
        &mut self,
        index: usize,
        lambda: &[Real],
        _nullstep_bound: Real,
        _relprec: Real,
    ) -> Result<Self::EvalResult, Self::Err> {
        let (tx, rx) = channel();
        ParallelProblem::evaluate(self, index, Arc::new(lambda.iter().cloned().collect()), index, tx)?;
        let mut objective = None;
        let mut minorants = vec![];

        for r in rx {
            match r {
                Ok(EvalResult::ObjectiveValue { value, .. }) => objective = Some(value),
                Ok(EvalResult::Minorant { minorant, primal, .. }) => minorants.push((minorant, primal)),
                _ => break,
            }
        }

        Ok(SimpleEvaluation {
            objective: objective.ok_or(EvalError::NoObjective(index))?,
            minorants,
        })
    }
}

impl ParallelProblem for CFLProblem {
    type Err = EvalError;

    type Primal = DVector;

    fn num_variables(&self) -> usize {
        Nfac
300
301
302
303
304
305
306
307

308
309
310
311
312
313
314
315
316
317
318
319
320

321
322
323
324
325
326
327
328
329
330
331
332
333
334
248
249
250
251
252
253
254

255






256
257
258
259
260
261

262






263
264
265
266
267
268
269
270







-
+
-
-
-
-
-
-






-
+
-
-
-
-
-
-









    let (args, _) = opts! {
        synopsis "Solver a simple capacitated facility location problem";
        opt minimal:bool, desc:"Use the minimal master model";
    }
    .parse_or_exit();
    if !args.minimal {
        let mut slv = DefaultSolver::new(CFLProblem::new())?;
        let mut slv = DefaultSolver::<_>::new(CFLProblem::new());
        slv.params.max_bundle_size = 5;
        slv.terminator.termination_precision = 1e-9;
        slv.solve()?;
        show_primals(|i| slv.aggregated_primals(i))?;

        let mut slv = ParallelSolver::<_>::new(CFLProblem::new());
        slv.terminator.termination_precision = 1e-9;
        slv.master.max_bundle_size = 5;
        slv.solve()?;

        show_primals(|i| slv.aggregated_primal(i).unwrap())?;
    } else {
        let mut slv = NoBundleSolver::new(CFLProblem::new())?;
        let mut slv = NoBundleSolver::<_>::new(CFLProblem::new());
        slv.params.max_bundle_size = 2;
        slv.terminator.termination_precision = 1e-5;
        slv.solve()?;
        show_primals(|i| slv.aggregated_primals(i))?;

        let mut slv = NoParallelSolver::<_>::new(CFLProblem::new());
        slv.terminator.termination_precision = 1e-5;
        slv.solve()?;

        show_primals(|i| slv.aggregated_primal(i).unwrap())?;
    }

    Ok(())
}
Changes to examples/mmcf.rs.
17
18
19
20
21
22
23
24

25
26

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55

56
57
58

59
60
61
62
63
64
65
17
18
19
20
21
22
23

24
25

26
27


28
29
30
31



















32
33

34
35
36

37
38
39
40
41
42
43
44







-
+

-
+

-
-




-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-


-
+


-
+








use env_logger;
use env_logger::fmt::Color;
use log::{info, Level};
use rustop::opts;
use std::io::Write;

use bundle::master::{Builder as MasterBuilder, MasterProblem};
use bundle::master::{Builder, FullMasterBuilder, MasterProblem, MinimalMasterBuilder};
use bundle::mcf::MMCFProblem;
use bundle::parallel;
use bundle::solver::sync::Solver;
use bundle::{terminator::StandardTerminator, weighter::HKWeighter};
use bundle::{DefaultSolver, FirstOrderProblem, NoBundleSolver, Solver};
use bundle::{FullMasterBuilder, MinimalMasterBuilder};

use std::error::Error;
use std::result::Result;

fn solve_standard<M>(mut slv: Solver<MMCFProblem, StandardTerminator, HKWeighter, M>) -> Result<(), Box<dyn Error>>
where
    M: MasterBuilder + Default,
    M::MasterProblem: MasterProblem<MinorantIndex = usize>,
{
    slv.weighter.set_weight_bounds(1e-1, 100.0);
    slv.terminator.termination_precision = 1e-6;
    slv.solve()?;

    let costs: f64 = (0..slv.problem().num_subproblems())
        .map(|i| {
            let aggr_primals = slv.aggregated_primals(i);
            slv.problem().get_primal_costs(i, &aggr_primals)
        })
        .sum();
    info!("Primal costs: {}", costs);
    Ok(())
}

fn solve_parallel<M>(master: M, mmcf: MMCFProblem) -> Result<(), Box<dyn Error>>
where
    M: MasterBuilder,
    M: Builder,
    M::MasterProblem: MasterProblem<MinorantIndex = usize>,
{
    let mut slv = parallel::Solver::<_, StandardTerminator, HKWeighter, M>::with_master(mmcf, master);
    let mut slv = Solver::<_, StandardTerminator, HKWeighter, M>::with_master(mmcf, master);
    slv.weighter.set_weight_bounds(1e-1, 100.0);
    slv.terminator.termination_precision = 1e-6;
    slv.solve()?;

    let costs: f64 = (0..slv.problem().num_subproblems())
        .map(|i| {
            let aggr_primals = slv.aggregated_primal(i).unwrap();
103
104
105
106
107
108
109
110
111

112
113
114
115
116
117

118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137

138
139
140
141
142
143
144
145
146








147
148
149
150

151
152
153
154
155
156
157
158
159
160
161
162


163
164
165


166
167
168
169
170
82
83
84
85
86
87
88


89






90




















91
92








93
94
95
96
97
98
99
100

101


102












103
104
105


106
107

108
109
110
111







-
-
+
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+

-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-

-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+

-
-
+
+
-




    }
    .parse_or_exit();

    let filename = args.file;
    info!("Reading instance: {}", filename);

    if !args.minimal {
        {
            let mut mmcf = MMCFProblem::read_mnetgen(&filename)?;
        let mut mmcf = MMCFProblem::read_mnetgen(&filename)?;
            if args.aggregated {
                mmcf.multimodel = false;
                mmcf.set_separate_constraints(false);
            } else {
                mmcf.multimodel = true;
                mmcf.set_separate_constraints(args.separate);
        mmcf.set_separate_constraints(args.separate);
            }

            let mut solver = DefaultSolver::new(mmcf)?;
            solver.params.max_bundle_size = if args.bundle_size <= 1 {
                if args.aggregated {
                    50
                } else {
                    5
                }
            } else {
                args.bundle_size
            };
            solve_standard(solver)?;
        }

        println!("---------------------------------");
        {
            let mut mmcf = MMCFProblem::read_mnetgen(&filename)?;
            mmcf.set_separate_constraints(args.separate);
            mmcf.multimodel = true;
        mmcf.multimodel = true;

            let mut master = FullMasterBuilder::default();
            if args.aggregated {
                master.max_bundle_size(if args.bundle_size <= 1 { 50 } else { args.bundle_size });
                master.use_full_aggregation();
            } else {
                master.max_bundle_size(if args.bundle_size <= 1 { 5 } else { args.bundle_size });
            }
            solve_parallel(master, mmcf)?;
        let mut master = FullMasterBuilder::default();
        if args.aggregated {
            master.max_bundle_size(if args.bundle_size <= 1 { 50 } else { args.bundle_size });
            master.use_full_aggregation();
        } else {
            master.max_bundle_size(if args.bundle_size <= 1 { 5 } else { args.bundle_size });
        }
        solve_parallel(master, mmcf)?;
        }
    } else {
        {
            let mut mmcf = MMCFProblem::read_mnetgen(&filename)?;
        let mut mmcf = MMCFProblem::read_mnetgen(&filename)?;
            mmcf.multimodel = false;

            let mut solver = NoBundleSolver::new(mmcf)?;
            solver.params.max_bundle_size = 2;
            solve_standard(solver)?;
        }

        println!("---------------------------------");
        {
            let mut mmcf = MMCFProblem::read_mnetgen(&filename)?;
            mmcf.set_separate_constraints(args.separate);
            mmcf.multimodel = true;
        mmcf.set_separate_constraints(args.separate);
        mmcf.multimodel = true;

            let master = MinimalMasterBuilder::default();
            solve_parallel(master, mmcf)?;
        let master = MinimalMasterBuilder::default();
        solve_parallel(master, mmcf)?;
        }
    }

    Ok(())
}
Changes to examples/quadratic.rs.
23
24
25
26
27
28
29
30
31
32


33
34

35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116


117
118
119
120
121
122
123
124
125
126
127
128
129
130
131






















132
133
134
135
136
137
138









139
140
141
142
143
144
145
146
147
148
23
24
25
26
27
28
29



30
31


32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50












































51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68


69
70
71
72
73
74
75
76









77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98







99
100
101
102
103
104
105
106
107



108
109
110
111
112
113
114







-
-
-
+
+
-
-
+


















-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-


















-
-
+
+






-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
-







use env_logger::fmt::Color;
use log::{debug, Level};
use rustop::opts;
use std::io::Write;
use std::sync::Arc;
use std::thread;

use bundle::parallel::{
    DefaultSolver as ParallelSolver, EvalResult, FirstOrderProblem as ParallelProblem,
    NoBundleSolver as NoParallelSolver, ResultSender,
use bundle::problem::{EvalResult, FirstOrderProblem as ParallelProblem, ResultSender};
use bundle::solver::sync::{DefaultSolver, NoBundleSolver};
};
use bundle::{DVector, DefaultSolver, FirstOrderProblem, Minorant, NoBundleSolver, Real, SimpleEvaluation};
use bundle::{DVector, Minorant, Real};

#[derive(Clone)]
struct QuadraticProblem {
    a: [[Real; 2]; 2],
    b: [Real; 2],
    c: Real,
}

impl QuadraticProblem {
    fn new() -> QuadraticProblem {
        QuadraticProblem {
            a: [[5.0, 1.0], [1.0, 4.0]],
            b: [-12.0, -10.0],
            c: 3.0,
        }
    }
}

impl FirstOrderProblem for QuadraticProblem {
    type Err = Box<dyn Error + Send + Sync>;
    type Primal = ();
    type EvalResult = SimpleEvaluation<()>;

    fn num_variables(&self) -> usize {
        2
    }

    #[allow(unused_variables)]
    fn evaluate(
        &mut self,
        fidx: usize,
        x: &[Real],
        nullstep_bnd: Real,
        relprec: Real,
    ) -> Result<Self::EvalResult, Self::Err> {
        assert_eq!(fidx, 0);
        let mut objective = self.c;
        let mut g = dvec![0.0; 2];

        for i in 0..2 {
            g[i] += (0..2).map(|j| self.a[i][j] * x[j]).sum::<Real>();
            objective += x[i] * (g[i] + self.b[i]);
            g[i] = 2.0 * g[i] + self.b[i];
        }

        debug!("Evaluation at {:?}", x);
        debug!("  objective={}", objective);
        debug!("  subgradient={}", g);

        Ok(SimpleEvaluation {
            objective: objective,
            minorants: vec![(
                Minorant {
                    constant: objective,
                    linear: g,
                },
                (),
            )],
        })
    }
}

impl ParallelProblem for QuadraticProblem {
    type Err = Box<dyn Error + Send + Sync>;
    type Primal = ();

    fn num_variables(&self) -> usize {
        2
    }

    fn num_subproblems(&self) -> usize {
        1
    }

    fn start(&mut self) {}

    fn stop(&mut self) {}

    fn evaluate<I>(
        &mut self,
        i: usize,
        y: Arc<DVector>,
        fidx: usize,
        x: Arc<DVector>,
        index: I,
        tx: ResultSender<I, Self::Primal, Self::Err>,
    ) -> Result<(), Self::Err>
    where
        I: Send + Copy + 'static,
    {
        let y = y.clone();
        let mut p = self.clone();
        thread::spawn(move || match FirstOrderProblem::evaluate(&mut p, i, &y, 0.0, 0.0) {
            Ok(res) => {
                tx.send(Ok(EvalResult::ObjectiveValue {
                    index,
                    value: res.objective,
                }))
                .unwrap();
        let x = x.clone();
        let p = self.clone();
        thread::spawn(move || {
            assert_eq!(fidx, 0);
            let mut objective = p.c;
            let mut g = dvec![0.0; 2];

            for i in 0..2 {
                g[i] += (0..2).map(|j| p.a[i][j] * x[j]).sum::<Real>();
                objective += x[i] * (g[i] + p.b[i]);
                g[i] = 2.0 * g[i] + p.b[i];
            }

            debug!("Evaluation at {:?}", x);
            debug!("  objective={}", objective);
            debug!("  subgradient={}", g);

            tx.send(Ok(EvalResult::ObjectiveValue {
                index,
                value: objective,
            }))
            .unwrap();
                for (minorant, primal) in res.minorants {
                    tx.send(Ok(EvalResult::Minorant {
                        index,
                        minorant,
                        primal,
                    }))
                    .unwrap();
            tx.send(Ok(EvalResult::Minorant {
                index,
                minorant: Minorant {
                    constant: objective,
                    linear: g,
                },
                primal: (),
            }))
            .unwrap();
                }
            }
            Err(err) => tx.send(Err(err)).unwrap(),
        });
        Ok(())
    }
}

fn main() -> Result<(), Box<dyn Error>> {
    better_panic::install();
169
170
171
172
173
174
175
176
177
178
179



180
181
182
183



184
185
186
187
188



189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
135
136
137
138
139
140
141




142
143
144




145
146
147





148
149
150














151
152







-
-
-
-
+
+
+
-
-
-
-
+
+
+
-
-
-
-
-
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-



    let (args, _) = opts! {
        synopsis "Solver a simple quadratic optimization problem";
        opt minimal:bool, desc:"Use the minimal master model";
    }
    .parse_or_exit();

    {
        let f = QuadraticProblem::new();
        if !args.minimal {
            let mut solver = DefaultSolver::new(f).map_err(|e| format!("{}", e))?;
    let f = QuadraticProblem::new();
    if !args.minimal {
        let mut solver = DefaultSolver::<_>::new(f);
            solver.weighter.set_weight_bounds(1.0, 1.0);
            solver.solve().map_err(|e| format!("{}", e))?;
        } else {
            let mut solver = NoBundleSolver::new(f).map_err(|e| format!("{}", e))?;
        solver.solve().map_err(|e| format!("{}", e))?;
    } else {
        let mut solver = NoBundleSolver::<_>::new(f);
            solver.params.max_bundle_size = 2;
            solver.weighter.set_weight_bounds(1.0, 1.0);
            solver.solve().map_err(|e| format!("{}", e))?;
        }
    }
        solver.solve().map_err(|e| format!("{}", e))?;
    }


    println!("-------------------------");

    {
        let f = QuadraticProblem::new();
        if !args.minimal {
            let mut solver = ParallelSolver::<_>::new(f);
            solver.solve().map_err(|e| format!("{}", e))?;
        } else {
            let mut solver = NoParallelSolver::<_>::new(f);
            solver.solve().map_err(|e| format!("{}", e))?;
        }
    }

    Ok(())
}
Added src/data/aggregatable.rs.






















































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! Objects that can be combined linearly.

use super::Real;
use std::borrow::Borrow;

/// An aggregatable object.
pub trait Aggregatable: Default {
    /// Return a scaled version of `other`, i.e. `alpha * other`.
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: Borrow<Self>;

    /// Add a scaled version of `other` to `self`.
    ///
    /// This sets `self = self + alpha * other`.
    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: Borrow<Self>;

    /// Return $\sum\_{i=1}\^n alpha_i m_i$.
    ///
    /// If `aggregates` is empty return the default value.
    fn combine<I, A>(aggregates: I) -> Self
    where
        I: IntoIterator<Item = (Real, A)>,
        A: Borrow<Self>,
    {
        let mut it = aggregates.into_iter();
        let mut x;
        if let Some((alpha, y)) = it.next() {
            x = Self::new_scaled(alpha, y);
        } else {
            return Self::default();
        }

        for (alpha, y) in it {
            x.add_scaled(alpha, y);
        }

        x
    }
}

/// Implement for empty tuples.
impl Aggregatable for () {
    fn new_scaled<A>(_alpha: Real, _other: A) -> Self
    where
        A: Borrow<Self>,
    {
    }

    fn add_scaled<A>(&mut self, _alpha: Real, _other: A)
    where
        A: Borrow<Self>,
    {
    }
}

/// Implement for scalar values.
impl Aggregatable for Real {
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: Borrow<Self>,
    {
        alpha * other.borrow()
    }

    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: Borrow<Self>,
    {
        *self += alpha * other.borrow()
    }
}

/// Implement for vectors of aggregatable objects.
impl<T> Aggregatable for Vec<T>
where
    T: Aggregatable,
{
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: std::borrow::Borrow<Self>,
    {
        other
            .borrow()
            .iter()
            .map(|y| Aggregatable::new_scaled(alpha, y))
            .collect()
    }

    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: std::borrow::Borrow<Self>,
    {
        debug_assert_eq!(self.len(), other.borrow().len(), "Vectors must have the same size");
        for (ref mut x, y) in self.iter_mut().zip(other.borrow()) {
            x.add_scaled(alpha, y)
        }
    }
}
Added src/data/minorant.rs.










































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
// Copyright (c) 2016, 2017, 2018, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! A linear minorant.

use super::{Aggregatable, DVector, Real};

use std::borrow::Borrow;
use std::fmt;

/// A linear minorant of a convex function.
///
/// A linear minorant of a convex function $f \colon \mathbb{R}\^n \to
/// \mathbb{R}$ is a linear function of the form
///
///   \\[ l \colon \mathbb{R}\^n \to \mathbb{R}, x \mapsto \langle g, x
///   \rangle + c \\]
///
/// such that $l(x) \le f(x)$ for all $x \in \mathbb{R}\^n$.
#[derive(Clone, Debug)]
pub struct Minorant {
    /// The constant term.
    pub constant: Real,

    /// The linear term.
    pub linear: DVector,
}

impl fmt::Display for Minorant {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "{} + y * {}", self.constant, self.linear)?;
        Ok(())
    }
}

impl Default for Minorant {
    fn default() -> Minorant {
        Minorant {
            constant: 0.0,
            linear: dvec![],
        }
    }
}

impl Minorant {
    /// Return a new 0 minorant.
    pub fn new(constant: Real, linear: Vec<Real>) -> Minorant {
        Minorant {
            constant,
            linear: DVector(linear),
        }
    }

    /**
     * Evaluate minorant at some point.
     *
     * This function computes $c + \langle g, x \rangle$ for this minorant
     *   \\[\ell \colon \mathbb{R}\^n \to \mathbb{R}, x \mapsto c + \langle g, x \rangle\\]
     * and the given point $x \in \mathbb{R}\^n$.
     */
    pub fn eval(&self, x: &DVector) -> Real {
        self.constant + self.linear.dot(x)
    }

    /**
     * Move the center of the minorant.
     */
    pub fn move_center(&mut self, alpha: Real, d: &DVector) {
        self.constant += alpha * self.linear.dot(d);
    }
}

impl Aggregatable for Minorant {
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: Borrow<Self>,
    {
        let m = other.borrow();
        Minorant {
            constant: alpha * m.constant,
            linear: DVector::scaled(&m.linear, alpha),
        }
    }

    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: Borrow<Self>,
    {
        let m = other.borrow();
        self.constant += alpha * m.constant;
        self.linear.add_scaled(alpha, &m.linear);
    }
}
Added src/data/mod.rs.






























1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! General types and data structures.

pub mod vector;
pub use vector::{DVector, Vector};

pub mod aggregatable;
pub use aggregatable::Aggregatable;

pub mod minorant;
pub use minorant::Minorant;

/// Type used for real numbers throughout the library.
pub type Real = f64;
Added src/data/vector.rs.































































































































































































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
// Copyright (c) 2016, 2017, 2018, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! Finite-dimensional sparse and dense vectors.

use crate::{Aggregatable, Real};
use std::fmt;
use std::ops::{Deref, DerefMut};
// use std::cmp::min;
use std::borrow::Borrow;
use std::iter::FromIterator;
use std::vec::IntoIter;

#[cfg(feature = "blas")]
use {openblas_src as _, rs_blas as blas, std::os::raw::c_int};

/// Type of dense vectors.
#[derive(Debug, Clone, PartialEq, Default)]
pub struct DVector(pub Vec<Real>);

impl Deref for DVector {
    type Target = Vec<Real>;

    fn deref(&self) -> &Vec<Real> {
        &self.0
    }
}

impl DerefMut for DVector {
    fn deref_mut(&mut self) -> &mut Vec<Real> {
        &mut self.0
    }
}

impl fmt::Display for DVector {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "(")?;
        for (i, x) in self.iter().enumerate() {
            if i > 0 {
                write!(f, ", ")?;
            }
            write!(f, "{}", x)?
        }
        write!(f, ")")?;
        Ok(())
    }
}

impl FromIterator<Real> for DVector {
    fn from_iter<I: IntoIterator<Item = Real>>(iter: I) -> Self {
        DVector(Vec::from_iter(iter))
    }
}

impl IntoIterator for DVector {
    type Item = Real;
    type IntoIter = IntoIter<Real>;

    fn into_iter(self) -> IntoIter<Real> {
        self.0.into_iter()
    }
}

/// Type of dense or vectors.
#[derive(Debug, Clone)]
pub enum Vector {
    /// A vector with dense storage.
    Dense(DVector),

    /**
     * A vector with sparse storage.
     *
     * For each non-zero element this vector stores an index and the
     * value of the element in addition to the size of the vector.
     */
    Sparse { size: usize, elems: Vec<(usize, Real)> },
}

impl fmt::Display for Vector {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        match *self {
            Vector::Dense(ref v) => write!(f, "{}", v),
            Vector::Sparse { size, ref elems } => {
                let mut it = elems.iter();
                write!(f, "{}:(", size)?;
                if let Some(&(i, x)) = it.next() {
                    write!(f, "{}:{}", i, x)?;
                    for &(i, x) in it {
                        write!(f, ", {}:{}", i, x)?;
                    }
                }
                write!(f, ")")
            }
        }
    }
}

impl DVector {
    /// Set all elements to 0.
    pub fn init0(&mut self, size: usize) {
        self.clear();
        self.extend((0..size).map(|_| 0.0));
    }

    /// Set self = factor * y.
    pub fn scal(&mut self, factor: Real, y: &DVector) {
        self.clear();
        self.extend(y.iter().map(|y| factor * y));
    }

    /// Return factor * self.
    pub fn scaled(&self, factor: Real) -> DVector {
        let mut x = DVector::default();
        x.scal(factor, self);
        x
    }

    /// Return the inner product with another vector.
    pub fn dot(&self, other: &DVector) -> Real {
        assert_eq!(self.len(), other.len());
        self.dot_begin(other)
    }

    /// Return the inner product with another vector.
    ///
    /// The inner product is computed on the smaller of the two
    /// dimensions. All other elements are assumed to be zero.
    pub fn dot_begin(&self, other: &DVector) -> Real {
        #[cfg(feature = "blas")]
        unsafe {
            blas::ddot(self.len().min(other.len()) as c_int, &self, 1, &other, 1)
        }
        #[cfg(not(feature = "blas"))]
        {
            self.iter().zip(other.iter()).map(|(x, y)| x * y).sum::<Real>()
        }
    }

    /// Add two vectors and store result in this vector.
    pub fn add(&mut self, x: &DVector, y: &DVector) {
        assert_eq!(x.len(), y.len());
        self.clear();
        self.extend(x.iter().zip(y.iter()).map(|(a, b)| a + b));
    }

    /// Add two vectors and store result in this vector.
    pub fn add_scaled(&mut self, alpha: Real, y: &DVector) {
        assert_eq!(self.len(), y.len());
        #[cfg(feature = "blas")]
        unsafe {
            blas::daxpy(self.len() as c_int, alpha, &y, 1, &mut self[..], 1)
        }
        #[cfg(not(feature = "blas"))]
        {
            for (x, y) in self.iter_mut().zip(y.iter()) {
                *x += alpha * y;
            }
        }
    }

    /// Add two vectors and store result in this vector.
    ///
    /// In contrast to `add_scaled`, the two vectors might have
    /// different sizes. The size of the resulting vector is the
    /// larger of the two vector sizes and the remaining entries of
    /// the smaller vector are assumed to be 0.0.
    pub fn add_scaled_begin(&mut self, alpha: Real, y: &DVector) {
        #[cfg(feature = "blas")]
        unsafe {
            let n = self.len();
            blas::daxpy(n.min(y.len()) as c_int, alpha, &y, 1, &mut self[..], 1);
        }
        #[cfg(not(feature = "blas"))]
        {
            for (x, y) in self.iter_mut().zip(y.iter()) {
                *x += alpha * y;
            }
        }
        let n = self.len();
        if n < y.len() {
            self.extend(y[n..].iter().map(|y| alpha * y));
        }
    }

    /// Return the 2-norm of this vector.
    pub fn norm2(&self) -> Real {
        #[cfg(feature = "blas")]
        unsafe {
            blas::dnrm2(self.len() as c_int, &self, 1)
        }
        #[cfg(not(feature = "blas"))]
        {
            self.iter().map(|x| x * x).sum::<Real>().sqrt()
        }
    }
}

impl Aggregatable for DVector {
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: Borrow<Self>,
    {
        DVector::scaled(&other.borrow(), alpha)
    }

    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: Borrow<Self>,
    {
        DVector::add_scaled(self, alpha, &other.borrow())
    }
}

impl Vector {
    /**
     * Return a sparse vector with the given non-zeros.
     */
    pub fn new_sparse(n: usize, indices: &[usize], values: &[Real]) -> Vector {
        assert_eq!(indices.len(), values.len());

        if indices.is_empty() {
            Vector::Sparse { size: n, elems: vec![] }
        } else {
            let mut ordered: Vec<_> = (0..n).collect();
            ordered.sort_by_key(|&i| indices[i]);
            assert!(*indices.last().unwrap() < n);
            let mut elems = Vec::with_capacity(indices.len());
            let mut last_idx = n;
            for i in ordered {
                let val = unsafe { *values.get_unchecked(i) };
                if val != 0.0 {
                    let idx = unsafe { *indices.get_unchecked(i) };
                    if idx != last_idx {
                        elems.push((idx, val));
                        last_idx = idx;
                    } else {
                        elems.last_mut().unwrap().1 += val;
                        if elems.last_mut().unwrap().1 == 0.0 {
                            elems.pop();
                            last_idx = n;
                        }
                    }
                }
            }
            Vector::Sparse { size: n, elems }
        }
    }

    /**
     * Convert vector to a dense vector.
     *
     * This function always returns a copy of the vector.
     */
    pub fn to_dense(&self) -> DVector {
        match *self {
            Vector::Dense(ref x) => x.clone(),
            Vector::Sparse { size: n, elems: ref xs } => {
                let mut v = vec![0.0; n];
                for &(i, x) in xs {
                    unsafe { *v.get_unchecked_mut(i) = x };
                }
                DVector(v)
            }
        }
    }
}

#[test]
fn test_add_scaled_begin() {
    let mut x = dvec![1.0; 5];
    let y = dvec![2.0; 7];
    x.add_scaled_begin(3.0, &y);
    assert_eq!(x, dvec![7.0, 7.0, 7.0, 7.0, 7.0, 6.0, 6.0]);
}
Deleted src/firstorderproblem.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180




















































































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
// Copyright (c) 2016, 2017, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! Problem description of a first-order convex optimization problem.

use crate::solver::UpdateState;
use crate::{Aggregatable, Minorant, Real};

use std::result::Result;
use std::vec::IntoIter;

/**
 * Trait for results of an evaluation.
 *
 * An evaluation returns the function value at the point of evaluation
 * and one or more subgradients.
 *
 * The subgradients (linear minorants) can be obtained by iterating over the result. The
 * subgradients are centered around the point of evaluation.
 */
pub trait Evaluation<P>: IntoIterator<Item = (Minorant, P)> {
    /// Return the function value at the point of evaluation.
    fn objective(&self) -> Real;
}

/**
 * Simple standard evaluation result.
 *
 * This result consists of the function value and a list of one or
 * more minorants and associated primal information.
 */
pub struct SimpleEvaluation<P> {
    pub objective: Real,
    pub minorants: Vec<(Minorant, P)>,
}

impl<P> IntoIterator for SimpleEvaluation<P> {
    type Item = (Minorant, P);
    type IntoIter = IntoIter<(Minorant, P)>;

    fn into_iter(self) -> Self::IntoIter {
        self.minorants.into_iter()
    }
}

impl<P> Evaluation<P> for SimpleEvaluation<P> {
    fn objective(&self) -> Real {
        self.objective
    }
}

/// Problem update information.
///
/// The solver calls the `update` method of the problem regularly.
/// This method can modify the problem by adding (or removing)
/// variables. The possible updates are encoded in this type.
#[derive(Debug, Clone, Copy)]
pub enum Update {
    /// Add a variable with bounds.
    ///
    /// The initial value of the variable will be the feasible value
    /// closest to 0.
    AddVariable { lower: Real, upper: Real },
    /// Add a variable with bounds and initial value.
    AddVariableValue { lower: Real, upper: Real, value: Real },
    /// Change the current value of a variable. The bounds remain
    /// unchanged.
    MoveVariable { index: usize, value: Real },
}

/**
 * Trait for implementing a first-order problem description.
 *
 */
pub trait FirstOrderProblem {
    /// Error raised by this oracle.
    type Err;

    /// The primal information associated with a minorant.
    type Primal: Aggregatable;

    /// Custom evaluation result value.
    type EvalResult: Evaluation<Self::Primal>;

    /// Return the number of variables.
    fn num_variables(&self) -> usize;

    /**
     * Return the lower bounds on the variables.
     *
     * If no lower bounds a specified, $-\infty$ is assumed.
     *
     * The lower bounds must be less then or equal the upper bounds.
     */
    fn lower_bounds(&self) -> Option<Vec<Real>> {
        None
    }

    /**
     * Return the upper bounds on the variables.
     *
     * If no lower bounds a specified, $+\infty$ is assumed.
     *
     * The upper bounds must be greater than or equal the upper bounds.
     */
    fn upper_bounds(&self) -> Option<Vec<Real>> {
        None
    }

    /// Return the number of subproblems.
    fn num_subproblems(&self) -> usize {
        1
    }

    /**
     * Evaluate the i^th subproblem at the given point.
     *
     * The returned evaluation result must contain (an upper bound on)
     * the objective value at $y$ as well as at least one subgradient
     * centered at $y$.
     *
     * If the evaluation process reaches a lower bound on the function
     * value at $y$ and this bound is larger than $nullstep_bound$,
     * the evaluation may stop and return the lower bound and a
     * minorant. In this case the function value is guaranteed to be
     * large enough so that the new point is rejected as candidate.
     *
     * The returned objective value should be an upper bound on the
     * true function value within $relprec \cdot (\\|f(y)\\| + 1.0)$,
     * otherwise the returned objective should be the maximum of all
     * linear minorants at $y$.
     *
     * Note that `nullstep_bound` and `relprec` are usually only
     * useful if there is only a `single` subproblem.
     */
    fn evaluate(
        &mut self,
        i: usize,
        y: &[Real],
        nullstep_bound: Real,
        relprec: Real,
    ) -> Result<Self::EvalResult, Self::Err>;

    /// Return updates of the problem.
    ///
    /// The default implementation returns no updates.
    fn update(&mut self, _state: &UpdateState<Self::Primal>) -> Result<Vec<Update>, Self::Err> {
        Ok(vec![])
    }

    /// Return new components for a subgradient.
    ///
    /// The components are typically generated by some primal information. The
    /// corresponding primal along with its subproblem index is passed as a
    /// parameter.
    ///
    /// The default implementation fails because it should never be
    /// called.
    fn extend_subgradient(
        &mut self,
        _i: usize,
        _primal: &Self::Primal,
        _vars: &[usize],
    ) -> Result<Vec<Real>, Self::Err> {
        unimplemented!()
    }
}
Changes to src/lib.rs.
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

30
31

32
33
34
35
36
37
38

39
40
41

42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
12
13
14
15
16
17
18



19
20
21
22
23
24
25

26


27


28




29

30

31
32
33
34
35
36
37
38
39
















-
-
-







-
+
-
-
+
-
-

-
-
-
-
+
-

-
+








-
-
-
-
-
-
-
-
-
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! Proximal bundle method implementation.

/// Type used for real numbers throughout the library.
pub type Real = f64;

#[macro_export]
macro_rules! dvec {
    ( $ elem : expr ; $ n : expr ) => { DVector(vec![$elem; $n]) };
    ( $ ( $ x : expr ) , * ) => { DVector(vec![$($x),*]) };
    ( $ ( $ x : expr , ) * ) => { DVector(vec![$($x,)*]) };
}

pub mod vector;
mod data;
pub use crate::vector::{DVector, Vector};

pub use data::{Aggregatable, DVector, Minorant, Real, Vector};
pub mod minorant;
pub use crate::minorant::{Aggregatable, Minorant};

pub mod firstorderproblem;
pub use crate::firstorderproblem::{Evaluation, FirstOrderProblem, SimpleEvaluation, Update};

pub mod solver;
pub mod problem;
pub use crate::solver::{BundleState, FullMasterBuilder, IterationInfo, Solver, SolverParams, Step, UpdateState};

pub mod parallel;
pub mod solver;

pub mod weighter;

pub mod terminator;

pub mod master;

pub mod mcf;

/// The minimal bundle builder.
pub type MinimalMasterBuilder = master::boxed::Builder<master::minimal::Builder>;

/// The default bundle solver with general master problem.
pub type DefaultSolver<P> = Solver<P, terminator::StandardTerminator, weighter::HKWeighter, FullMasterBuilder>;

/// A bundle solver with a minimal cutting plane model.
pub type NoBundleSolver<P> = Solver<P, terminator::StandardTerminator, weighter::HKWeighter, MinimalMasterBuilder>;
Changes to src/master/boxed.rs.
10
11
12
13
14
15
16

17




18
19
20
21
22
23
24
10
11
12
13
14
15
16
17

18
19
20
21
22
23
24
25
26
27
28







+
-
+
+
+
+







// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

pub mod unconstrained;
use crate::master::{self, MasterProblem, SubgradientExtension, UnconstrainedMasterProblem};
use self::unconstrained::UnconstrainedMasterProblem;

use super::MasterProblem;
pub use super::SubgradientExtension;
use crate::{DVector, Minorant, Real};

use itertools::multizip;
use log::debug;
use std::f64::{EPSILON, INFINITY, NEG_INFINITY};

/**
374
375
376
377
378
379
380
381

382
383

384
385
386
387
388
389
390
378
379
380
381
382
383
384

385
386

387
388
389
390
391
392
393
394







-
+

-
+








/// Builder for `BoxedMasterProblem`.
///
/// `B` is a builder of the underlying `UnconstrainedMasterProblem`.
#[derive(Default)]
pub struct Builder<B>(B);

impl<B> master::Builder for Builder<B>
impl<B> super::Builder for Builder<B>
where
    B: master::unconstrained::Builder,
    B: unconstrained::Builder,
    B::MasterProblem: UnconstrainedMasterProblem,
{
    type MasterProblem = BoxedMasterProblem<B::MasterProblem>;

    fn build(&mut self) -> Result<Self::MasterProblem, <Self::MasterProblem as MasterProblem>::Err> {
        self.0.build().map(BoxedMasterProblem::with_master)
    }
Added src/master/boxed/unconstrained.rs.














































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
// Copyright (c) 2016, 2017, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

pub mod cpx;
pub mod minimal;

use crate::{DVector, Minorant, Real};

pub use super::SubgradientExtension;

use std::error::Error;

/**
 * Trait for master problems without box constraints.
 *
 * Implementors of this trait are supposed to solve quadratic
 * optimization problems of the form
 *
 * \\[ \min \left\\{ \hat{f}(d) + \frac{u}{2} \\| d \\|\^2 \colon
 *     d \in \mathbb{R}\^n \right\\}. \\]
 *
 * where $\hat{f}$ is a piecewise linear model, i.e.
 *
 * \\[ \hat{f}(d) = \max \\{ \ell_i(d) = c_i + \langle g_i, d \rangle \colon
 *                           i=1,\dotsc,k \\}
 *                = \max \left\\{ \sum_{i=1}\^k \alpha_i \ell_i(d) \colon
 *                                \alpha \in \Delta \right\\}, \\]
 *
 * where $\Delta := \left\\{ \alpha \in \mathbb{R}\^k \colon \sum_{i=1}\^k
 * \alpha_i = 1 \right\\}$. Note, the unconstrained solver is expected
 * to compute *dual* optimal solutions, i.e. the solver must compute
 * optimal coefficients $\bar{\alpha}$ for the dual problem
 *
 * \\[ \max_{\alpha \in \Delta} \min_{d \in \mathbb{R}\^n}
 *     \sum_{i=1}\^k \alpha_i \ell_i(d) + \frac{u}{2} \\| d \\|\^2. \\]
 */
pub trait UnconstrainedMasterProblem: Send + 'static {
    /// Unique index for a minorant.
    type MinorantIndex: Copy + Eq;

    /// Error type for this master problem.
    type Err: Error + Send + Sync;

    /// Return a new instance of the unconstrained master problem.
    fn new() -> Result<Self, Self::Err>
    where
        Self: Sized;

    /// Return the number of subproblems.
    fn num_subproblems(&self) -> usize;

    /// Set the number of subproblems (different function models.)
    fn set_num_subproblems(&mut self, n: usize) -> Result<(), Self::Err>;

    /// Return the current weight.
    fn weight(&self) -> Real;

    /// Set the weight of the quadratic term, must be > 0.
    fn set_weight(&mut self, weight: Real) -> Result<(), Self::Err>;

    /// Return the number of minorants of subproblem `fidx`.
    fn num_minorants(&self, fidx: usize) -> usize;

    /// Compress the bundle.
    ///
    /// When some minorants are compressed, the callback is called with the
    /// coefficients and indices of the compressed minorants and the index of
    /// the new minorant. The callback may be called several times.
    fn compress<F>(&mut self, f: F) -> Result<(), Self::Err>
    where
        F: FnMut(Self::MinorantIndex, &mut dyn Iterator<Item = (Self::MinorantIndex, Real)>);

    /// Add a new minorant to the model.
    fn add_minorant(&mut self, fidx: usize, minorant: Minorant) -> Result<Self::MinorantIndex, Self::Err>;

    /// Add or move some variables.
    ///
    /// The variables in `changed` have been changed, so the subgradient
    /// information must be updated. Furthermore, `nnew` new variables
    /// are added.
    fn add_vars(
        &mut self,
        nnew: usize,
        changed: &[usize],
        extend_subgradient: &mut SubgradientExtension<Self::MinorantIndex>,
    ) -> Result<(), Self::Err>;

    /// Solve the master problem.
    fn solve(&mut self, eta: &DVector, fbound: Real, augbound: Real, relprec: Real) -> Result<(), Self::Err>;

    /// Return the current dual optimal solution.
    fn dualopt(&self) -> &DVector;

    /// Return the current dual optimal solution value.
    fn dualopt_cutval(&self) -> Real;

    /// Return the multiplier associated with a minorant.
    fn multiplier(&self, min: Self::MinorantIndex) -> Real;

    /// Return the multipliers associated with a subproblem.
    fn opt_multipliers<'a>(&'a self, fidx: usize) -> Box<dyn Iterator<Item = (Self::MinorantIndex, Real)> + 'a>;

    /// Return the value of the current model at the given point.
    fn eval_model(&self, y: &DVector) -> Real;

    /// Aggregate the given minorants according to the current solution.
    ///
    /// The (indices of the) minorants to be aggregated get invalid
    /// after this operation. The function returns the index of the
    /// aggregated minorant along with the coefficients of the convex
    /// combination. The index of the new aggregated minorant might or
    /// might not be one of indices of the original minorants.
    ///
    /// # Error
    /// The indices of the minorants `mins` must belong to subproblem `fidx`.
    fn aggregate(&mut self, fidx: usize, mins: &[usize]) -> Result<(Self::MinorantIndex, DVector), Self::Err>;

    /// Move the center of the master problem along $\alpha \cdot d$.
    fn move_center(&mut self, alpha: Real, d: &DVector);
}

/// A builder for creating unconstrained master problem solvers.
pub trait Builder {
    /// The master problem to be build.
    type MasterProblem: UnconstrainedMasterProblem;

    /// Create a new master problem.
    fn build(&mut self) -> Result<Self::MasterProblem, <Self::MasterProblem as UnconstrainedMasterProblem>::Err>;
}
Added src/master/boxed/unconstrained/cpx.rs.














































































































































































































































































































































































































































































































































































































































































































































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
// Copyright (c) 2016, 2017, 2018, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! Master problem implementation using CPLEX.

#![allow(unused_unsafe)]

use super::{SubgradientExtension, UnconstrainedMasterProblem};
use crate::{Aggregatable, DVector, Minorant, Real};

use c_str_macro::c_str;
use cplex_sys as cpx;
use cplex_sys::trycpx;
use log::debug;

use std;
use std::f64::NEG_INFINITY;
use std::iter::repeat;
use std::ops::{Deref, DerefMut};
use std::os::raw::{c_char, c_int};
use std::ptr;
use std::sync::Arc;

#[derive(Debug)]
pub enum CplexMasterError {
    Cplex(cpx::CplexError),
    SubgradientExtension(Box<dyn std::error::Error + Send + Sync>),
    NoMinorants,
}

impl std::fmt::Display for CplexMasterError {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
        use CplexMasterError::*;
        match self {
            Cplex(err) => err.fmt(fmt),
            SubgradientExtension(err) => write!(fmt, "Subgradient extension failed: {}", err),
            NoMinorants => write!(fmt, "Master problem contains no minorants"),
        }
    }
}

impl std::error::Error for CplexMasterError {
    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
        use CplexMasterError::*;
        match self {
            Cplex(err) => Some(err),
            SubgradientExtension(err) => Some(err.as_ref()),
            NoMinorants => None,
        }
    }
}

impl From<cpx::CplexError> for CplexMasterError {
    fn from(err: cpx::CplexError) -> Self {
        CplexMasterError::Cplex(err)
    }
}

pub type Result<T> = std::result::Result<T, CplexMasterError>;

/// A minorant and its unique index.
struct MinorantInfo {
    minorant: Minorant,
    index: usize,
}

impl Deref for MinorantInfo {
    type Target = Minorant;
    fn deref(&self) -> &Minorant {
        &self.minorant
    }
}

impl DerefMut for MinorantInfo {
    fn deref_mut(&mut self) -> &mut Minorant {
        &mut self.minorant
    }
}

/// Maps a global index to a minorant.
#[derive(Clone, Copy)]
struct MinorantIdx {
    /// The function (subproblem) index.
    fidx: usize,
    /// The minorant index within the subproblem.
    idx: usize,
}

/// A submodel.
///
/// A submodel is a list of subproblems being aggregated in that model.
#[derive(Debug, Clone, Default)]
struct SubModel {
    /// The list of subproblems.
    subproblems: Vec<usize>,

    /// The number of minorants in this subproblem.
    ///
    /// The is the minimal number of minorants in each contained subproblem.
    num_mins: usize,
    /// The aggregated minorants of this submodel.
    ///
    /// This is just the sum of the corresponding minorants of the single
    /// functions contained in this submodel.
    minorants: Vec<Minorant>,
}

impl Deref for SubModel {
    type Target = Vec<usize>;
    fn deref(&self) -> &Vec<usize> {
        &self.subproblems
    }
}

impl DerefMut for SubModel {
    fn deref_mut(&mut self) -> &mut Vec<usize> {
        &mut self.subproblems
    }
}

pub struct CplexMaster {
    env: *mut cpx::Env,

    lp: *mut cpx::Lp,

    /// True if the QP must be updated.
    force_update: bool,

    /// List of free minorant indices.
    freeinds: Vec<usize>,

    /// List of minorant indices to be updated.
    updateinds: Vec<usize>,

    /// Mapping index to minorant.
    index2min: Vec<MinorantIdx>,

    /// The quadratic term.
    qterm: Vec<DVector>,

    /// The additional diagonal term to ensure positive definiteness.
    qdiag: Real,

    /// The weight of the quadratic term.
    weight: Real,

    /// The minorants for each subproblem in the model.
    minorants: Vec<Vec<MinorantInfo>>,

    /// The callback for the submodel for each subproblem.
    select_model: Arc<dyn Fn(usize) -> usize>,

    /// For each submodel the list of subproblems contained in that model.
    submodels: Vec<SubModel>,
    /// For each subproblem the submodel it is contained in.
    in_submodel: Vec<usize>,

    /// Optimal multipliers for each subproblem in the model.
    opt_mults: Vec<DVector>,
    /// Optimal aggregated minorant.
    opt_minorant: Minorant,

    /// Maximal bundle size.
    pub max_bundle_size: usize,
}

unsafe impl Send for CplexMaster {}

impl Drop for CplexMaster {
    fn drop(&mut self) {
        unsafe { cpx::freeprob(self.env, &mut self.lp) };
        unsafe { cpx::closeCPLEX(&mut self.env) };
    }
}

impl UnconstrainedMasterProblem for CplexMaster {
    type MinorantIndex = usize;

    type Err = CplexMasterError;

    fn new() -> Result<CplexMaster> {
        let env;

        trycpx!({
            let mut status = 0;
            env = cpx::openCPLEX(&mut status);
            status
        });

        Ok(CplexMaster {
            env,
            lp: ptr::null_mut(),
            force_update: true,
            freeinds: vec![],
            updateinds: vec![],
            index2min: vec![],
            qterm: vec![],
            qdiag: 0.0,
            weight: 1.0,
            minorants: vec![],
            select_model: Arc::new(|i| i),
            submodels: vec![],
            in_submodel: vec![],
            opt_mults: vec![],
            opt_minorant: Minorant::default(),
            max_bundle_size: 50,
        })
    }

    fn num_subproblems(&self) -> usize {
        self.minorants.len()
    }

    fn set_num_subproblems(&mut self, n: usize) -> Result<()> {
        trycpx!(cpx::setintparam(
            self.env,
            cpx::Param::Qpmethod.to_c(),
            cpx::Alg::Barrier.to_c()
        ));
        trycpx!(cpx::setdblparam(self.env, cpx::Param::Barepcomp.to_c(), 1e-12));

        self.index2min.clear();
        self.freeinds.clear();
        self.minorants = (0..n).map(|_| vec![]).collect();
        self.opt_mults = vec![dvec![]; n];
        self.update_submodels();

        Ok(())
    }

    fn weight(&self) -> Real {
        self.weight
    }

    fn set_weight(&mut self, weight: Real) -> Result<()> {
        assert!(weight > 0.0);
        self.weight = weight;
        Ok(())
    }

    fn num_minorants(&self, fidx: usize) -> usize {
        self.minorants[fidx].len()
    }

    fn compress<F>(&mut self, f: F) -> Result<()>
    where
        F: FnMut(Self::MinorantIndex, &mut dyn Iterator<Item = (Self::MinorantIndex, Real)>),
    {
        assert!(self.max_bundle_size >= 2, "Maximal bundle size must be >= 2");
        let mut f = f;
        for i in 0..self.num_subproblems() {
            let n = self.num_minorants(i);
            if n >= self.max_bundle_size {
                // aggregate minorants with smallest coefficients
                let mut inds = (0..n).collect::<Vec<_>>();
                inds.sort_by_key(|&j| -((1e6 * self.opt_mults[i][j]) as isize));
                let inds = inds[self.max_bundle_size - 2..]
                    .iter()
                    .map(|&j| self.minorants[i][j].index)
                    .collect::<Vec<_>>();
                let (newindex, coeffs) = self.aggregate(i, &inds)?;
                f(newindex, &mut inds.into_iter().zip(coeffs));
            }
        }
        Ok(())
    }

    fn add_minorant(&mut self, fidx: usize, minorant: Minorant) -> Result<usize> {
        debug!("Add minorant");
        debug!("  fidx={} index={}: {}", fidx, self.minorants[fidx].len(), minorant);

        // find new unique minorant index
        let min_idx = self.minorants[fidx].len();
        let index = if let Some(index) = self.freeinds.pop() {
            self.index2min[index] = MinorantIdx { fidx, idx: min_idx };
            index
        } else {
            self.index2min.push(MinorantIdx { fidx, idx: min_idx });
            self.index2min.len() - 1
        };
        self.updateinds.push(index);

        // store minorant
        self.minorants[fidx].push(MinorantInfo { minorant, index });
        self.opt_mults[fidx].push(0.0);

        Ok(index)
    }

    fn add_vars(
        &mut self,
        nnew: usize,
        changed: &[usize],
        extend_subgradient: &mut SubgradientExtension<Self::MinorantIndex>,
    ) -> Result<()> {
        debug_assert!(!self.minorants[0].is_empty());
        if changed.is_empty() && nnew == 0 {
            return Ok(());
        }
        let noldvars = self.minorants[0][0].linear.len();
        let nnewvars = noldvars + nnew;

        let mut changedvars = vec![];
        changedvars.extend_from_slice(changed);
        changedvars.extend(noldvars..nnewvars);
        for (fidx, mins) in self.minorants.iter_mut().enumerate() {
            for m in &mut mins[..] {
                let new_subg =
                    extend_subgradient(fidx, m.index, &changedvars).map_err(CplexMasterError::SubgradientExtension)?;
                for (&j, &g) in changed.iter().zip(new_subg.iter()) {
                    m.linear[j] = g;
                }
                m.linear.extend_from_slice(&new_subg[changed.len()..]);
            }
        }

        // update qterm because all minorants have changed
        self.force_update = true;

        Ok(())
    }

    fn solve(&mut self, eta: &DVector, _fbound: Real, _augbound: Real, _relprec: Real) -> Result<()> {
        if self.force_update || !self.updateinds.is_empty() {
            self.init_qp()?;
        }

        let nvars = unsafe { cpx::getnumcols(self.env, self.lp) as usize };
        debug_assert_eq!(
            nvars,
            self.submodels
                .iter()
                .map(|funs| funs.iter().map(|&fidx| self.minorants[fidx].len()).min().unwrap_or(0))
                .sum::<usize>()
        );
        if nvars == 0 {
            return Err(CplexMasterError::NoMinorants);
        }
        // update linear costs
        {
            let mut c = Vec::with_capacity(nvars);
            let mut inds = Vec::with_capacity(nvars);
            for submodel in &self.submodels {
                for i in 0..submodel.num_mins {
                    let m = &submodel.minorants[i];
                    let cost = -m.constant * self.weight - m.linear.dot(eta);
                    inds.push(c.len() as c_int);
                    c.push(cost);
                }
            }
            debug_assert_eq!(inds.len(), nvars);
            trycpx!(cpx::chgobj(
                self.env,
                self.lp,
                nvars as c_int,
                inds.as_ptr(),
                c.as_ptr()
            ));
        }

        trycpx!(cpx::qpopt(self.env, self.lp));
        let mut sol = vec![0.0; nvars];
        trycpx!(cpx::getx(self.env, self.lp, sol.as_mut_ptr(), 0, nvars as c_int - 1));

        let mut idx = 0;
        let mut mults = Vec::with_capacity(nvars);
        let mut mins = Vec::with_capacity(nvars);

        for submodel in &self.submodels {
            for i in 0..submodel.num_mins {
                for &fidx in submodel.iter() {
                    self.opt_mults[fidx][i] = sol[idx];
                    mults.push(sol[idx]);
                    mins.push(&self.minorants[fidx][i].minorant);
                }
                idx += 1;
            }
            // set all multipliers for unused minorants to 0
            for &fidx in submodel.iter() {
                for mult in &mut self.opt_mults[fidx][submodel.num_mins..] {
                    *mult = 0.0;
                }
            }
        }

        self.opt_minorant = Aggregatable::combine(mults.into_iter().zip(mins));

        Ok(())
    }

    fn dualopt(&self) -> &DVector {
        &self.opt_minorant.linear
    }

    fn dualopt_cutval(&self) -> Real {
        self.opt_minorant.constant
    }

    fn multiplier(&self, min: usize) -> Real {
        let MinorantIdx { fidx, idx } = self.index2min[min];
        self.opt_mults[fidx][idx]
    }

    fn opt_multipliers<'a>(&'a self, fidx: usize) -> Box<dyn Iterator<Item = (Self::MinorantIndex, Real)> + 'a> {
        Box::new(
            self.opt_mults[fidx]
                .iter()
                .enumerate()
                .map(move |(i, alpha)| (self.minorants[fidx][i].index, *alpha)),
        )
    }

    fn eval_model(&self, y: &DVector) -> Real {
        let mut result = 0.0;
        for submodel in &self.submodels {
            let mut this_val = NEG_INFINITY;
            for m in &submodel.minorants[0..submodel.num_mins] {
                this_val = this_val.max(m.eval(y));
            }
            result += this_val;
        }
        result
    }

    fn aggregate(&mut self, fidx: usize, mins: &[usize]) -> Result<(usize, DVector)> {
        assert!(!mins.is_empty(), "No minorants specified to be aggregated");

        if mins.len() == 1 {
            return Ok((mins[0], dvec![1.0]));
        }

        // scale coefficients
        let mut sum_coeffs = 0.0;
        for &i in mins {
            debug_assert_eq!(
                fidx, self.index2min[i].fidx,
                "Minorant {} does not belong to subproblem {} (belongs to: {})",
                i, fidx, self.index2min[i].fidx
            );
            sum_coeffs += self.opt_mults[fidx][self.index2min[i].idx];
        }
        let aggr_coeffs = if sum_coeffs != 0.0 {
            mins.iter()
                .map(|&i| self.opt_mults[fidx][self.index2min[i].idx] / sum_coeffs)
                .collect::<DVector>()
        } else {
            dvec![0.0; mins.len()]
        };

        // Compute the aggregated minorant.
        let aggr = Aggregatable::combine(
            aggr_coeffs.iter().cloned().zip(
                mins.iter()
                    .map(|&index| &self.minorants[fidx][self.index2min[index].idx].minorant),
            ),
        );

        // Remove the minorants that have been aggregated.
        for &i in mins {
            let MinorantIdx {
                fidx: min_fidx,
                idx: min_idx,
            } = self.index2min[i];
            debug_assert_eq!(
                fidx, min_fidx,
                "Minorant {} does not belong to subproblem {} (belongs to: {})",
                i, fidx, min_fidx
            );

            let m = self.minorants[fidx].swap_remove(min_idx);
            self.opt_mults[fidx].swap_remove(min_idx);
            self.freeinds.push(m.index);
            debug_assert_eq!(m.index, i);

            // Update index2min table and mark qterm to be updated.
            // This is only necessary if the removed minorant was not the last one.
            if min_idx < self.minorants[fidx].len() {
                self.index2min[self.minorants[fidx][min_idx].index].idx = min_idx;
                self.updateinds.push(self.minorants[fidx][min_idx].index);
            }
        }

        // Finally add the aggregated minorant.
        let aggr_idx = self.add_minorant(fidx, aggr)?;
        Ok((aggr_idx, aggr_coeffs))
    }

    fn move_center(&mut self, alpha: Real, d: &DVector) {
        for mins in &mut self.minorants {
            for m in mins.iter_mut() {
                m.move_center(alpha, d);
            }
        }
        for submod in &mut self.submodels {
            for m in &mut submod.minorants {
                m.move_center(alpha, d);
            }
        }
    }
}

impl CplexMaster {
    /// Set a custom submodel selector.
    ///
    /// For each subproblem index the selector should return a submodel index.
    /// All subproblems with the same submodel index are aggregated in a single
    /// cutting plane model.
    fn set_submodel_selection<F>(&mut self, selector: F)
    where
        F: Fn(usize) -> usize + 'static,
    {
        self.select_model = Arc::new(selector);
        self.update_submodels();
        //unimplemented!("Arbitrary submodels are not implemented, yet");
    }

    fn update_submodels(&mut self) {
        self.submodels.clear();
        self.in_submodel.resize(self.num_subproblems(), 0);
        for fidx in 0..self.num_subproblems() {
            let model_idx = (self.select_model)(fidx);
            if model_idx >= self.submodels.len() {
                self.submodels.resize_with(model_idx + 1, SubModel::default);
            }
            self.submodels[model_idx].push(fidx);
            self.in_submodel[fidx] = model_idx;
        }
    }

    /// Use a fully disaggregated model.
    ///
    /// A fully disaggregated model has one separate submodel for each subproblem.
    /// Hence, calling this method is equivalent to
    /// `CplexMaster::set_submodel_selection(|i| i)`.
    pub fn use_full_disaggregation(&mut self) {
        self.set_submodel_selection(|i| i)
    }

    /// Use a fully aggregated model.
    ///
    /// A fully aggregated model has one submodel for all subproblems.
    /// Hence, calling this method is equivalent to
    /// `CplexMaster::set_submodel_selection(|_| 0)`.
    pub fn use_full_aggregation(&mut self) {
        self.set_submodel_selection(|_| 0)
    }

    fn init_qp(&mut self) -> Result<()> {
        if self.force_update {
            self.updateinds.clear();
            for mins in &self.minorants {
                self.updateinds.extend(mins.iter().map(|m| m.index));
            }
        }

        let minorants = &self.minorants;

        // Compute the number of minorants in each submodel.
        for submodel in self.submodels.iter_mut() {
            submodel.num_mins = submodel.iter().map(|&fidx| minorants[fidx].len()).min().unwrap_or(0);
            submodel.minorants.resize_with(submodel.num_mins, Minorant::default);
        }

        // Only minorants belonging to the first subproblem of each submodel
        // must be updated.
        //
        // We filter all indices that
        // 1. belong to a subproblem being the first in its model
        // 2. is a valid index (< minimal number of minorants within this submodel)
        // and map them to (index, model_index, minorant_index) where
        // - `index` is the variable index (i.e. minorant index of first subproblem)
        // - `model_index` is the submodel index
        // - `minorant_index` is the index of the minorant within the model
        let updateinds = self
            .updateinds
            .iter()
            .filter_map(|&index| {
                let MinorantIdx { fidx, idx } = self.index2min[index];
                let mod_i = self.in_submodel[fidx];
                let submodel = &self.submodels[mod_i];
                if submodel[0] == fidx && idx < submodel.num_mins {
                    Some((index, mod_i, idx))
                } else {
                    None
                }
            })
            .collect::<Vec<_>>();

        // Compute the aggregated minorants.
        for &(_, mod_i, i) in &updateinds {
            let submodel = &mut self.submodels[mod_i];
            submodel.minorants[i] =
                Aggregatable::combine(submodel.iter().map(|&fidx| (1.0, &minorants[fidx][i].minorant)));
        }

        let submodels = &self.submodels;

        // Build quadratic term, this is <g_i, g_j> for all pairs of minorants where each g_i
        // is an aggregated minorant of a submodel.
        //
        // For simplicity we always store the terms in the index of the minorant of the first
        // subproblem in each submodel.
        let ntotalminorants = self.index2min.len();
        if ntotalminorants > self.qterm.len() {
            self.qterm.resize(ntotalminorants, dvec![]);
            for i in 0..self.qterm.len() {
                self.qterm[i].resize(ntotalminorants, 0.0);
            }
        }

        // - i is the number of the minorant within the submodel submodel_i
        // - idx_i is the unique index of that minorant (of the first subproblem)
        // - j is the number of the minorant within the submodel submodel_j
        // - idx_j is the unique index of that minorant (of the first subproblem)
        for submodel_i in submodels.iter() {
            // Compute the minorant g_i for each i
            for i in 0..submodel_i.num_mins {
                // Store the computed values at the index of the first subproblem in this model.
                let idx_i = minorants[submodel_i[0]][i].index;
                let g_i = &submodel_i.minorants[i].linear;
                // Now compute the inner product with each other minorant
                // that has to be updated.
                for &(idx_j, mod_j, j) in updateinds.iter() {
                    let x = submodels[mod_j].minorants[j].linear.dot(g_i);
                    self.qterm[idx_i][idx_j] = x;
                    self.qterm[idx_j][idx_i] = x;
                }
            }
        }

        // We verify that the qterm is correct
        if cfg!(debug_assertions) {
            for submod_i in submodels.iter() {
                for i in 0..submod_i.num_mins {
                    let idx_i = self.minorants[submod_i[0]][i].index;
                    for submod_j in submodels.iter() {
                        for j in 0..submod_j.num_mins {
                            let idx_j = self.minorants[submod_j[0]][j].index;
                            let x = submod_i.minorants[i].linear.dot(&submod_j.minorants[j].linear);
                            debug_assert!((x - self.qterm[idx_i][idx_j]) < 1e-6);
                        }
                    }
                }
            }
        }

        // main diagonal plus small identity to ensure Q being semi-definite
        self.qdiag = 0.0;
        for submodel in submodels.iter() {
            for i in 0..submodel.num_mins {
                let idx = minorants[submodel[0]][i].index;
                self.qdiag = Real::max(self.qdiag, self.qterm[idx][idx]);
            }
        }
        self.qdiag *= 1e-8;

        // We have updated everything.
        self.updateinds.clear();
        self.force_update = false;

        self.init_cpx_qp()
    }

    fn init_cpx_qp(&mut self) -> Result<()> {
        if !self.lp.is_null() {
            trycpx!(cpx::freeprob(self.env, &mut self.lp));
        }
        trycpx!({
            let mut status = 0;
            self.lp = cpx::createprob(self.env, &mut status, c_str!("mastercp").as_ptr());
            status
        });

        let nsubmodels = self.submodels.len();
        let submodels = &self.submodels;
        let minorants = &self.minorants;

        // add convexity constraints
        let sense: Vec<c_char> = vec!['E' as c_char; nsubmodels];
        let rhs = dvec![1.0; nsubmodels];
        let mut rmatbeg = Vec::with_capacity(nsubmodels);
        let mut rmatind = Vec::with_capacity(self.index2min.len());
        let mut rmatval = Vec::with_capacity(self.index2min.len());

        let mut nvars = 0;
        for submodel in submodels.iter() {
            if submodel.is_empty() {
                // this should only happen if the submodel selector leaves
                // holes
                continue;
            }

            rmatbeg.push(nvars as c_int);
            rmatind.extend((nvars as c_int..).take(submodel.num_mins));
            rmatval.extend(repeat(1.0).take(submodel.num_mins));
            nvars += submodel.num_mins;
        }

        trycpx!(cpx::addrows(
            self.env,
            self.lp,
            nvars as c_int,
            rmatbeg.len() as c_int,
            rmatind.len() as c_int,
            rhs.as_ptr(),
            sense.as_ptr(),
            rmatbeg.as_ptr(),
            rmatind.as_ptr(),
            rmatval.as_ptr(),
            ptr::null(),
            ptr::null()
        ));

        // update coefficients
        let mut var_i = 0;
        for (mod_i, submodel_i) in submodels.iter().enumerate() {
            for i in 0..submodel_i.num_mins {
                let idx_i = minorants[submodel_i[0]][i].index;
                let mut var_j = 0;
                for (mod_j, submodel_j) in submodels.iter().enumerate() {
                    for j in 0..submodel_j.num_mins {
                        let idx_j = minorants[submodel_j[0]][j].index;
                        let q = self.qterm[idx_i][idx_j] + if mod_i != mod_j || i != j { 0.0 } else { self.qdiag };
                        trycpx!(cpx::chgqpcoef(self.env, self.lp, var_i as c_int, var_j as c_int, q));
                        var_j += 1;
                    }
                }
                var_i += 1;
            }
        }

        Ok(())
    }
}

pub struct Builder {
    /// The maximal bundle size used in the master problem.
    pub max_bundle_size: usize,
    /// The submodel selector.
    select_model: Arc<dyn Fn(usize) -> usize>,
}

impl Default for Builder {
    fn default() -> Self {
        Builder {
            max_bundle_size: 50,
            select_model: Arc::new(|i| i),
        }
    }
}

impl super::Builder for Builder {
    type MasterProblem = CplexMaster;

    fn build(&mut self) -> Result<CplexMaster> {
        let mut cpx = CplexMaster::new()?;
        cpx.max_bundle_size = self.max_bundle_size;
        cpx.select_model = self.select_model.clone();
        cpx.update_submodels();
        Ok(cpx)
    }
}

impl Builder {
    pub fn max_bundle_size(&mut self, s: usize) -> &mut Self {
        assert!(s >= 2, "The maximal bundle size must be >= 2");
        self.max_bundle_size = s;
        self
    }

    /// Set a custom submodel selector.
    ///
    /// For each subproblem index the selector should return a submodel index.
    /// All subproblems with the same submodel index are aggregated in a single
    /// cutting plane model.
    pub fn submodel_selection<F>(&mut self, selector: F) -> &mut Self
    where
        F: Fn(usize) -> usize + 'static,
    {
        self.select_model = Arc::new(selector);
        self
    }

    /// Use a fully disaggregated model.
    ///
    /// A fully disaggregated model has one separate submodel for each subproblem.
    /// Hence, calling this method is equivalent to
    /// `CplexMaster::set_submodel_selection(|i| i)`.
    pub fn use_full_disaggregation(&mut self) -> &mut Self {
        self.submodel_selection(|i| i)
    }

    /// Use a fully aggregated model.
    ///
    /// A fully aggregated model has one submodel for all subproblems.
    /// Hence, calling this method is equivalent to
    /// `CplexMaster::set_submodel_selection(|_| 0)`.
    pub fn use_full_aggregation(&mut self) -> &mut Self {
        self.submodel_selection(|_| 0)
    }
}
Added src/master/boxed/unconstrained/minimal.rs.

















































































































































































































































































































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
// Copyright (c) 2016, 2017, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

use super::{SubgradientExtension, UnconstrainedMasterProblem};
use crate::{Aggregatable, DVector, Minorant, Real};

use log::debug;

use std::error::Error;
use std::f64::NEG_INFINITY;
use std::fmt;
use std::result;

/// Minimal master problem error.
#[derive(Debug)]
pub enum MinimalMasterError {
    NoMinorants,
    MaxMinorants { subproblem: usize },
    SubgradientExtension(Box<dyn Error + Send + Sync>),
}

impl fmt::Display for MinimalMasterError {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
        use self::MinimalMasterError::*;
        match self {
            MaxMinorants { subproblem } => write!(
                fmt,
                "The minimal master problem allows at most two minorants (subproblem: {})",
                subproblem
            ),
            NoMinorants => write!(fmt, "The master problem does not contain a minorant"),
            SubgradientExtension(err) => write!(fmt, "Subgradient extension failed: {}", err),
        }
    }
}

impl Error for MinimalMasterError {
    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
        use MinimalMasterError::*;
        match self {
            SubgradientExtension(err) => Some(err.as_ref()),
            _ => None,
        }
    }
}

/**
 * A minimal master problem with only two minorants.
 *
 * This is the simplest possible master problem for bundle methods. It
 * has only two minorants and only one function model. The advantage
 * is that this model can be solved explicitely and very quickly, but
 * it is only a very loose approximation of the objective function.
 *
 * Because of its properties, it can only be used if the problem to be
 * solved has a maximal number of minorants of two and only one
 * subproblem.
 */
pub struct MinimalMaster {
    /// The weight of the quadratic term.
    weight: Real,

    /// The minorants in the model.
    ///
    /// There are up to two minorants, each minorant consists of one part for
    /// each subproblem.
    ///
    /// The minorants for the i-th subproblem have the indices `2*i` and
    /// `2*i+1`.
    minorants: [Vec<Minorant>; 2],
    /// The number of minorants. Only the minorants with index less than this
    /// number are valid.
    num_minorants: usize,
    /// The number of minorants for each subproblem.
    num_minorants_of: Vec<usize>,
    /// The number of subproblems.
    num_subproblems: usize,
    /// The number of subproblems with at least 1 minorant.
    num_subproblems_with_1: usize,
    /// The number of subproblems with at least 2 minorants.
    num_subproblems_with_2: usize,
    /// Optimal multipliers.
    opt_mult: [Real; 2],
    /// Optimal aggregated minorant.
    opt_minorant: Minorant,
}

impl UnconstrainedMasterProblem for MinimalMaster {
    type MinorantIndex = usize;

    type Err = MinimalMasterError;

    fn new() -> Result<MinimalMaster, Self::Err> {
        Ok(MinimalMaster {
            weight: 1.0,
            num_minorants: 0,
            num_minorants_of: vec![],
            num_subproblems: 0,
            num_subproblems_with_1: 0,
            num_subproblems_with_2: 0,
            minorants: [vec![], vec![]],
            opt_mult: [0.0, 0.0],
            opt_minorant: Minorant::default(),
        })
    }

    fn num_subproblems(&self) -> usize {
        self.num_subproblems
    }

    fn set_num_subproblems(&mut self, n: usize) -> Result<(), Self::Err> {
        self.num_subproblems = n;
        self.num_minorants = 0;
        self.num_minorants_of = vec![0; n];
        self.num_subproblems_with_1 = 0;
        self.num_subproblems_with_2 = 0;
        self.minorants = [vec![Minorant::default(); n], vec![Minorant::default(); n]];
        Ok(())
    }

    fn compress<F>(&mut self, f: F) -> Result<(), Self::Err>
    where
        F: FnMut(Self::MinorantIndex, &mut dyn Iterator<Item = (Self::MinorantIndex, Real)>),
    {
        if self.num_minorants == 2 {
            debug!("Aggregate");
            debug!("  {} * {:?}", self.opt_mult[0], self.minorants[0]);
            debug!("  {} * {:?}", self.opt_mult[1], self.minorants[1]);

            let mut f = f;
            for fidx in 0..self.num_subproblems {
                f(
                    2 * fidx,
                    &mut self
                        .opt_mult
                        .iter()
                        .enumerate()
                        .map(|(i, alpha)| (2 * fidx + i, *alpha)),
                );
            }

            self.minorants[0] = Aggregatable::combine(self.opt_mult.iter().cloned().zip(&self.minorants));
            self.opt_mult[0] = 1.0;
            self.num_minorants = 1;
            self.num_minorants_of.clear();
            self.num_minorants_of.resize(self.num_subproblems, 1);
            self.num_subproblems_with_2 = 0;

            debug!("  {:?}", self.minorants[0]);
        }
        Ok(())
    }

    fn weight(&self) -> Real {
        self.weight
    }

    fn set_weight(&mut self, weight: Real) -> Result<(), Self::Err> {
        assert!(weight > 0.0);
        self.weight = weight;
        Ok(())
    }

    fn num_minorants(&self, fidx: usize) -> usize {
        self.num_minorants_of[fidx]
    }

    fn add_minorant(&mut self, fidx: usize, minorant: Minorant) -> Result<usize, Self::Err> {
        if self.num_minorants_of[fidx] >= 2 {
            return Err(MinimalMasterError::MaxMinorants { subproblem: fidx });
        }

        let minidx = self.num_minorants_of[fidx];
        self.num_minorants_of[fidx] += 1;
        self.minorants[minidx][fidx] = minorant;

        match minidx {
            0 => {
                self.num_subproblems_with_1 += 1;
                if self.num_subproblems_with_1 == self.num_subproblems {
                    self.num_minorants = 1;
                    self.opt_mult[0] = 0.0;
                }
                Ok(2 * fidx)
            }
            1 => {
                self.num_subproblems_with_2 += 1;
                if self.num_subproblems_with_2 == self.num_subproblems {
                    self.num_minorants = 2;
                    self.opt_mult[1] = 0.0;
                }
                Ok(2 * fidx + 1)
            }
            _ => unreachable!("Invalid number of minorants in subproblem {}", fidx),
        }
    }

    fn add_vars(
        &mut self,
        nnew: usize,
        changed: &[usize],
        extend_subgradient: &mut SubgradientExtension<Self::MinorantIndex>,
    ) -> Result<(), Self::Err> {
        if self.num_subproblems_with_1 == 0 {
            return Ok(());
        }

        let noldvars = self.minorants[0][self.num_minorants_of.iter().position(|&n| n > 0).unwrap()]
            .linear
            .len();
        let mut changedvars = vec![];
        changedvars.extend_from_slice(changed);
        changedvars.extend(noldvars..noldvars + nnew);

        for fidx in 0..self.num_subproblems {
            for i in 0..self.num_minorants_of[fidx] {
                let new_subg = extend_subgradient(fidx, 2 * fidx + i, &changedvars)
                    .map_err(MinimalMasterError::SubgradientExtension)?;
                let m = &mut self.minorants[i][fidx];
                for (&j, &g) in changed.iter().zip(new_subg.iter()) {
                    m.linear[j] = g;
                }
                m.linear.extend_from_slice(&new_subg[changed.len()..]);
            }
        }

        Ok(())
    }

    #[allow(unused_variables)]
    fn solve(&mut self, eta: &DVector, fbound: Real, augbound: Real, relprec: Real) -> Result<(), Self::Err> {
        for fidx in 0..self.num_subproblems {
            for i in 0..self.num_minorants_of[fidx] {
                debug!("  min(fidx:{}, i:{}) = {}", fidx, i, self.minorants[i][fidx]);
            }
        }

        if self.num_minorants == 2 {
            let min0 = Minorant::combine((0..self.num_subproblems).map(|fidx| (1.0, &self.minorants[0][fidx])));
            let min1 = Minorant::combine((0..self.num_subproblems).map(|fidx| (1.0, &self.minorants[1][fidx])));
            let xx = min0.linear.dot(&min0.linear);
            let yy = min1.linear.dot(&min1.linear);
            let xy = min0.linear.dot(&min1.linear);
            let xeta = min0.linear.dot(eta);
            let yeta = min1.linear.dot(eta);
            let a = yy - 2.0 * xy + xx;
            let b = xy - xx - yeta + xeta;

            let mut alpha2 = 0.0;
            if a > 0.0 {
                alpha2 = ((min1.constant - min0.constant) * self.weight - b) / a;
                alpha2 = alpha2.max(0.0).min(1.0);
            }
            self.opt_mult[0] = 1.0 - alpha2;
            self.opt_mult[1] = alpha2;
            self.opt_minorant = Aggregatable::combine(self.opt_mult.iter().cloned().zip([min0, min1].iter()));
        } else if self.num_minorants == 1 {
            let min0 = Aggregatable::combine((0..self.num_subproblems).map(|fidx| (1.0, &self.minorants[0][fidx])));
            self.opt_minorant = min0;
            self.opt_mult[0] = 1.0;
        } else {
            return Err(MinimalMasterError::NoMinorants);
        }

        debug!("Unrestricted");
        debug!("  opt_minorant={}", self.opt_minorant);
        debug!("  opt_mult={:?}", &self.opt_mult[0..self.num_minorants]);

        Ok(())
    }

    fn dualopt(&self) -> &DVector {
        &self.opt_minorant.linear
    }

    fn dualopt_cutval(&self) -> Real {
        self.opt_minorant.constant
    }

    fn multiplier(&self, min: usize) -> Real {
        self.opt_mult[min % 2]
    }

    fn opt_multipliers<'a>(&'a self, fidx: usize) -> Box<dyn Iterator<Item = (Self::MinorantIndex, Real)> + 'a> {
        Box::new(
            self.opt_mult
                .iter()
                .take(self.num_minorants_of[fidx])
                .enumerate()
                .map(move |(i, alpha)| (2 * fidx + i, *alpha)),
        )
    }

    fn eval_model(&self, y: &DVector) -> Real {
        let mut result = NEG_INFINITY;
        for mins in &self.minorants[0..self.num_minorants] {
            result = result.max(mins.iter().map(|m| m.eval(y)).sum());
        }
        result
    }

    fn aggregate(&mut self, fidx: usize, mins: &[usize]) -> Result<(usize, DVector), Self::Err> {
        debug!("Aggregate minorants {:?} of subproblem {}", mins, fidx);
        if mins.len() == 2 {
            debug_assert_ne!(mins[0], mins[1], "Minorants to be aggregated must be different");
            debug_assert_eq!(
                mins[0] / 2,
                fidx,
                "Minorant {} does not belong to subproblem {}",
                mins[0],
                fidx
            );
            debug_assert_eq!(
                mins[1] / 2,
                fidx,
                "Minorant {} does not belong to subproblem {}",
                mins[1],
                fidx
            );
            debug_assert!(
                mins[0] % 2 < self.num_minorants_of[fidx],
                "Invalid minorant index for subproblem {}: {}",
                fidx,
                mins[0]
            );
            debug_assert!(
                mins[1] % 2 < self.num_minorants_of[fidx],
                "Invalid minorant index for subproblem {}: {}",
                fidx,
                mins[1]
            );

            let min0 = mins[0] % 2;
            let min1 = mins[1] % 2;

            debug!("Aggregate");
            debug!("  {} * {}", self.opt_mult[min0], self.minorants[min0][fidx]);
            debug!("  {} * {}", self.opt_mult[min1], self.minorants[min1][fidx]);
            self.minorants[0][fidx] = Aggregatable::combine(
                [
                    (self.opt_mult[min0], &self.minorants[min0][fidx]),
                    (self.opt_mult[min1], &self.minorants[min1][fidx]),
                ]
                .iter()
                .cloned(),
            );

            if self.num_subproblems_with_2 == self.num_subproblems {
                self.num_minorants -= 1;
            }
            self.num_subproblems_with_2 -= 1;
            self.num_minorants_of[fidx] -= 1;

            let coeffs = dvec![self.opt_mult[min0], self.opt_mult[min1]];

            debug!("  {}", self.minorants[0][fidx]);
            Ok((2 * fidx, coeffs))
        } else if mins.len() == 1 {
            Ok((mins[0], dvec![1.0]))
        } else {
            panic!("No minorants specified to be aggregated");
        }
    }

    fn move_center(&mut self, alpha: Real, d: &DVector) {
        for fidx in 0..self.num_subproblems {
            for i in 0..self.num_minorants_of[fidx] {
                self.minorants[i][fidx].move_center(alpha, d);
            }
        }
    }
}

pub struct Builder;

impl Default for Builder {
    fn default() -> Self {
        Builder
    }
}

impl super::Builder for Builder {
    type MasterProblem = MinimalMaster;

    fn build(&mut self) -> Result<MinimalMaster, MinimalMasterError> {
        MinimalMaster::new()
    }
}
Deleted src/master/cpx.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815















































































































































































































































































































































































































































































































































































































































































































































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
// Copyright (c) 2016, 2017, 2018, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! Master problem implementation using CPLEX.

#![allow(unused_unsafe)]

use crate::master::unconstrained::{self, UnconstrainedMasterProblem};
use crate::master::SubgradientExtension;
use crate::{Aggregatable, DVector, Minorant, Real};

use c_str_macro::c_str;
use cplex_sys as cpx;
use cplex_sys::trycpx;
use log::debug;

use std;
use std::f64::NEG_INFINITY;
use std::iter::repeat;
use std::ops::{Deref, DerefMut};
use std::os::raw::{c_char, c_int};
use std::ptr;
use std::sync::Arc;

#[derive(Debug)]
pub enum CplexMasterError {
    Cplex(cpx::CplexError),
    SubgradientExtension(Box<dyn std::error::Error + Send + Sync>),
    NoMinorants,
}

impl std::fmt::Display for CplexMasterError {
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
        use CplexMasterError::*;
        match self {
            Cplex(err) => err.fmt(fmt),
            SubgradientExtension(err) => write!(fmt, "Subgradient extension failed: {}", err),
            NoMinorants => write!(fmt, "Master problem contains no minorants"),
        }
    }
}

impl std::error::Error for CplexMasterError {
    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
        use CplexMasterError::*;
        match self {
            Cplex(err) => Some(err),
            SubgradientExtension(err) => Some(err.as_ref()),
            NoMinorants => None,
        }
    }
}

impl From<cpx::CplexError> for CplexMasterError {
    fn from(err: cpx::CplexError) -> Self {
        CplexMasterError::Cplex(err)
    }
}

pub type Result<T> = std::result::Result<T, CplexMasterError>;

/// A minorant and its unique index.
struct MinorantInfo {
    minorant: Minorant,
    index: usize,
}

impl Deref for MinorantInfo {
    type Target = Minorant;
    fn deref(&self) -> &Minorant {
        &self.minorant
    }
}

impl DerefMut for MinorantInfo {
    fn deref_mut(&mut self) -> &mut Minorant {
        &mut self.minorant
    }
}

/// Maps a global index to a minorant.
#[derive(Clone, Copy)]
struct MinorantIdx {
    /// The function (subproblem) index.
    fidx: usize,
    /// The minorant index within the subproblem.
    idx: usize,
}

/// A submodel.
///
/// A submodel is a list of subproblems being aggregated in that model.
#[derive(Debug, Clone, Default)]
struct SubModel {
    /// The list of subproblems.
    subproblems: Vec<usize>,

    /// The number of minorants in this subproblem.
    ///
    /// The is the minimal number of minorants in each contained subproblem.
    num_mins: usize,
    /// The aggregated minorants of this submodel.
    ///
    /// This is just the sum of the corresponding minorants of the single
    /// functions contained in this submodel.
    minorants: Vec<Minorant>,
}

impl Deref for SubModel {
    type Target = Vec<usize>;
    fn deref(&self) -> &Vec<usize> {
        &self.subproblems
    }
}

impl DerefMut for SubModel {
    fn deref_mut(&mut self) -> &mut Vec<usize> {
        &mut self.subproblems
    }
}

pub struct CplexMaster {
    env: *mut cpx::Env,

    lp: *mut cpx::Lp,

    /// True if the QP must be updated.
    force_update: bool,

    /// List of free minorant indices.
    freeinds: Vec<usize>,

    /// List of minorant indices to be updated.
    updateinds: Vec<usize>,

    /// Mapping index to minorant.
    index2min: Vec<MinorantIdx>,

    /// The quadratic term.
    qterm: Vec<DVector>,

    /// The additional diagonal term to ensure positive definiteness.
    qdiag: Real,

    /// The weight of the quadratic term.
    weight: Real,

    /// The minorants for each subproblem in the model.
    minorants: Vec<Vec<MinorantInfo>>,

    /// The callback for the submodel for each subproblem.
    select_model: Arc<dyn Fn(usize) -> usize>,

    /// For each submodel the list of subproblems contained in that model.
    submodels: Vec<SubModel>,
    /// For each subproblem the submodel it is contained in.
    in_submodel: Vec<usize>,

    /// Optimal multipliers for each subproblem in the model.
    opt_mults: Vec<DVector>,
    /// Optimal aggregated minorant.
    opt_minorant: Minorant,

    /// Maximal bundle size.
    pub max_bundle_size: usize,
}

unsafe impl Send for CplexMaster {}

impl Drop for CplexMaster {
    fn drop(&mut self) {
        unsafe { cpx::freeprob(self.env, &mut self.lp) };
        unsafe { cpx::closeCPLEX(&mut self.env) };
    }
}

impl UnconstrainedMasterProblem for CplexMaster {
    type MinorantIndex = usize;

    type Err = CplexMasterError;

    fn new() -> Result<CplexMaster> {
        let env;

        trycpx!({
            let mut status = 0;
            env = cpx::openCPLEX(&mut status);
            status
        });

        Ok(CplexMaster {
            env,
            lp: ptr::null_mut(),
            force_update: true,
            freeinds: vec![],
            updateinds: vec![],
            index2min: vec![],
            qterm: vec![],
            qdiag: 0.0,
            weight: 1.0,
            minorants: vec![],
            select_model: Arc::new(|i| i),
            submodels: vec![],
            in_submodel: vec![],
            opt_mults: vec![],
            opt_minorant: Minorant::default(),
            max_bundle_size: 50,
        })
    }

    fn num_subproblems(&self) -> usize {
        self.minorants.len()
    }

    fn set_num_subproblems(&mut self, n: usize) -> Result<()> {
        trycpx!(cpx::setintparam(
            self.env,
            cpx::Param::Qpmethod.to_c(),
            cpx::Alg::Barrier.to_c()
        ));
        trycpx!(cpx::setdblparam(self.env, cpx::Param::Barepcomp.to_c(), 1e-12));

        self.index2min.clear();
        self.freeinds.clear();
        self.minorants = (0..n).map(|_| vec![]).collect();
        self.opt_mults = vec![dvec![]; n];
        self.update_submodels();

        Ok(())
    }

    fn weight(&self) -> Real {
        self.weight
    }

    fn set_weight(&mut self, weight: Real) -> Result<()> {
        assert!(weight > 0.0);
        self.weight = weight;
        Ok(())
    }

    fn num_minorants(&self, fidx: usize) -> usize {
        self.minorants[fidx].len()
    }

    fn compress<F>(&mut self, f: F) -> Result<()>
    where
        F: FnMut(Self::MinorantIndex, &mut dyn Iterator<Item = (Self::MinorantIndex, Real)>),
    {
        assert!(self.max_bundle_size >= 2, "Maximal bundle size must be >= 2");
        let mut f = f;
        for i in 0..self.num_subproblems() {
            let n = self.num_minorants(i);
            if n >= self.max_bundle_size {
                // aggregate minorants with smallest coefficients
                let mut inds = (0..n).collect::<Vec<_>>();
                inds.sort_by_key(|&j| -((1e6 * self.opt_mults[i][j]) as isize));
                let inds = inds[self.max_bundle_size - 2..]
                    .iter()
                    .map(|&j| self.minorants[i][j].index)
                    .collect::<Vec<_>>();
                let (newindex, coeffs) = self.aggregate(i, &inds)?;
                f(newindex, &mut inds.into_iter().zip(coeffs));
            }
        }
        Ok(())
    }

    fn add_minorant(&mut self, fidx: usize, minorant: Minorant) -> Result<usize> {
        debug!("Add minorant");
        debug!("  fidx={} index={}: {}", fidx, self.minorants[fidx].len(), minorant);

        // find new unique minorant index
        let min_idx = self.minorants[fidx].len();
        let index = if let Some(index) = self.freeinds.pop() {
            self.index2min[index] = MinorantIdx { fidx, idx: min_idx };
            index
        } else {
            self.index2min.push(MinorantIdx { fidx, idx: min_idx });
            self.index2min.len() - 1
        };
        self.updateinds.push(index);

        // store minorant
        self.minorants[fidx].push(MinorantInfo { minorant, index });
        self.opt_mults[fidx].push(0.0);

        Ok(index)
    }

    fn add_vars(
        &mut self,
        nnew: usize,
        changed: &[usize],
        extend_subgradient: &mut SubgradientExtension<Self::MinorantIndex>,
    ) -> Result<()> {
        debug_assert!(!self.minorants[0].is_empty());
        if changed.is_empty() && nnew == 0 {
            return Ok(());
        }
        let noldvars = self.minorants[0][0].linear.len();
        let nnewvars = noldvars + nnew;

        let mut changedvars = vec![];
        changedvars.extend_from_slice(changed);
        changedvars.extend(noldvars..nnewvars);
        for (fidx, mins) in self.minorants.iter_mut().enumerate() {
            for m in &mut mins[..] {
                let new_subg =
                    extend_subgradient(fidx, m.index, &changedvars).map_err(CplexMasterError::SubgradientExtension)?;
                for (&j, &g) in changed.iter().zip(new_subg.iter()) {
                    m.linear[j] = g;
                }
                m.linear.extend_from_slice(&new_subg[changed.len()..]);
            }
        }

        // update qterm because all minorants have changed
        self.force_update = true;

        Ok(())
    }

    fn solve(&mut self, eta: &DVector, _fbound: Real, _augbound: Real, _relprec: Real) -> Result<()> {
        if self.force_update || !self.updateinds.is_empty() {
            self.init_qp()?;
        }

        let nvars = unsafe { cpx::getnumcols(self.env, self.lp) as usize };
        debug_assert_eq!(
            nvars,
            self.submodels
                .iter()
                .map(|funs| funs.iter().map(|&fidx| self.minorants[fidx].len()).min().unwrap_or(0))
                .sum::<usize>()
        );
        if nvars == 0 {
            return Err(CplexMasterError::NoMinorants);
        }
        // update linear costs
        {
            let mut c = Vec::with_capacity(nvars);
            let mut inds = Vec::with_capacity(nvars);
            for submodel in &self.submodels {
                for i in 0..submodel.num_mins {
                    let m = &submodel.minorants[i];
                    let cost = -m.constant * self.weight - m.linear.dot(eta);
                    inds.push(c.len() as c_int);
                    c.push(cost);
                }
            }
            debug_assert_eq!(inds.len(), nvars);
            trycpx!(cpx::chgobj(
                self.env,
                self.lp,
                nvars as c_int,
                inds.as_ptr(),
                c.as_ptr()
            ));
        }

        trycpx!(cpx::qpopt(self.env, self.lp));
        let mut sol = vec![0.0; nvars];
        trycpx!(cpx::getx(self.env, self.lp, sol.as_mut_ptr(), 0, nvars as c_int - 1));

        let mut idx = 0;
        let mut mults = Vec::with_capacity(nvars);
        let mut mins = Vec::with_capacity(nvars);

        for submodel in &self.submodels {
            for i in 0..submodel.num_mins {
                for &fidx in submodel.iter() {
                    self.opt_mults[fidx][i] = sol[idx];
                    mults.push(sol[idx]);
                    mins.push(&self.minorants[fidx][i].minorant);
                }
                idx += 1;
            }
            // set all multipliers for unused minorants to 0
            for &fidx in submodel.iter() {
                for mult in &mut self.opt_mults[fidx][submodel.num_mins..] {
                    *mult = 0.0;
                }
            }
        }

        self.opt_minorant = Aggregatable::combine(mults.into_iter().zip(mins));

        Ok(())
    }

    fn dualopt(&self) -> &DVector {
        &self.opt_minorant.linear
    }

    fn dualopt_cutval(&self) -> Real {
        self.opt_minorant.constant
    }

    fn multiplier(&self, min: usize) -> Real {
        let MinorantIdx { fidx, idx } = self.index2min[min];
        self.opt_mults[fidx][idx]
    }

    fn opt_multipliers<'a>(&'a self, fidx: usize) -> Box<dyn Iterator<Item = (Self::MinorantIndex, Real)> + 'a> {
        Box::new(
            self.opt_mults[fidx]
                .iter()
                .enumerate()
                .map(move |(i, alpha)| (self.minorants[fidx][i].index, *alpha)),
        )
    }

    fn eval_model(&self, y: &DVector) -> Real {
        let mut result = 0.0;
        for submodel in &self.submodels {
            let mut this_val = NEG_INFINITY;
            for m in &submodel.minorants[0..submodel.num_mins] {
                this_val = this_val.max(m.eval(y));
            }
            result += this_val;
        }
        result
    }

    fn aggregate(&mut self, fidx: usize, mins: &[usize]) -> Result<(usize, DVector)> {
        assert!(!mins.is_empty(), "No minorants specified to be aggregated");

        if mins.len() == 1 {
            return Ok((mins[0], dvec![1.0]));
        }

        // scale coefficients
        let mut sum_coeffs = 0.0;
        for &i in mins {
            debug_assert_eq!(
                fidx, self.index2min[i].fidx,
                "Minorant {} does not belong to subproblem {} (belongs to: {})",
                i, fidx, self.index2min[i].fidx
            );
            sum_coeffs += self.opt_mults[fidx][self.index2min[i].idx];
        }
        let aggr_coeffs = if sum_coeffs != 0.0 {
            mins.iter()
                .map(|&i| self.opt_mults[fidx][self.index2min[i].idx] / sum_coeffs)
                .collect::<DVector>()
        } else {
            dvec![0.0; mins.len()]
        };

        // Compute the aggregated minorant.
        let aggr = Aggregatable::combine(
            aggr_coeffs.iter().cloned().zip(
                mins.iter()
                    .map(|&index| &self.minorants[fidx][self.index2min[index].idx].minorant),
            ),
        );

        // Remove the minorants that have been aggregated.
        for &i in mins {
            let MinorantIdx {
                fidx: min_fidx,
                idx: min_idx,
            } = self.index2min[i];
            debug_assert_eq!(
                fidx, min_fidx,
                "Minorant {} does not belong to subproblem {} (belongs to: {})",
                i, fidx, min_fidx
            );

            let m = self.minorants[fidx].swap_remove(min_idx);
            self.opt_mults[fidx].swap_remove(min_idx);
            self.freeinds.push(m.index);
            debug_assert_eq!(m.index, i);

            // Update index2min table and mark qterm to be updated.
            // This is only necessary if the removed minorant was not the last one.
            if min_idx < self.minorants[fidx].len() {
                self.index2min[self.minorants[fidx][min_idx].index].idx = min_idx;
                self.updateinds.push(self.minorants[fidx][min_idx].index);
            }
        }

        // Finally add the aggregated minorant.
        let aggr_idx = self.add_minorant(fidx, aggr)?;
        Ok((aggr_idx, aggr_coeffs))
    }

    fn move_center(&mut self, alpha: Real, d: &DVector) {
        for mins in &mut self.minorants {
            for m in mins.iter_mut() {
                m.move_center(alpha, d);
            }
        }
        for submod in &mut self.submodels {
            for m in &mut submod.minorants {
                m.move_center(alpha, d);
            }
        }
    }
}

impl CplexMaster {
    /// Set a custom submodel selector.
    ///
    /// For each subproblem index the selector should return a submodel index.
    /// All subproblems with the same submodel index are aggregated in a single
    /// cutting plane model.
    fn set_submodel_selection<F>(&mut self, selector: F)
    where
        F: Fn(usize) -> usize + 'static,
    {
        self.select_model = Arc::new(selector);
        self.update_submodels();
        //unimplemented!("Arbitrary submodels are not implemented, yet");
    }

    fn update_submodels(&mut self) {
        self.submodels.clear();
        self.in_submodel.resize(self.num_subproblems(), 0);
        for fidx in 0..self.num_subproblems() {
            let model_idx = (self.select_model)(fidx);
            if model_idx >= self.submodels.len() {
                self.submodels.resize_with(model_idx + 1, SubModel::default);
            }
            self.submodels[model_idx].push(fidx);
            self.in_submodel[fidx] = model_idx;
        }
    }

    /// Use a fully disaggregated model.
    ///
    /// A fully disaggregated model has one separate submodel for each subproblem.
    /// Hence, calling this method is equivalent to
    /// `CplexMaster::set_submodel_selection(|i| i)`.
    pub fn use_full_disaggregation(&mut self) {
        self.set_submodel_selection(|i| i)
    }

    /// Use a fully aggregated model.
    ///
    /// A fully aggregated model has one submodel for all subproblems.
    /// Hence, calling this method is equivalent to
    /// `CplexMaster::set_submodel_selection(|_| 0)`.
    pub fn use_full_aggregation(&mut self) {
        self.set_submodel_selection(|_| 0)
    }

    fn init_qp(&mut self) -> Result<()> {
        if self.force_update {
            self.updateinds.clear();
            for mins in &self.minorants {
                self.updateinds.extend(mins.iter().map(|m| m.index));
            }
        }

        let minorants = &self.minorants;

        // Compute the number of minorants in each submodel.
        for submodel in self.submodels.iter_mut() {
            submodel.num_mins = submodel.iter().map(|&fidx| minorants[fidx].len()).min().unwrap_or(0);
            submodel.minorants.resize_with(submodel.num_mins, Minorant::default);
        }

        // Only minorants belonging to the first subproblem of each submodel
        // must be updated.
        //
        // We filter all indices that
        // 1. belong to a subproblem being the first in its model
        // 2. is a valid index (< minimal number of minorants within this submodel)
        // and map them to (index, model_index, minorant_index) where
        // - `index` is the variable index (i.e. minorant index of first subproblem)
        // - `model_index` is the submodel index
        // - `minorant_index` is the index of the minorant within the model
        let updateinds = self
            .updateinds
            .iter()
            .filter_map(|&index| {
                let MinorantIdx { fidx, idx } = self.index2min[index];
                let mod_i = self.in_submodel[fidx];
                let submodel = &self.submodels[mod_i];
                if submodel[0] == fidx && idx < submodel.num_mins {
                    Some((index, mod_i, idx))
                } else {
                    None
                }
            })
            .collect::<Vec<_>>();

        // Compute the aggregated minorants.
        for &(_, mod_i, i) in &updateinds {
            let submodel = &mut self.submodels[mod_i];
            submodel.minorants[i] =
                Aggregatable::combine(submodel.iter().map(|&fidx| (1.0, &minorants[fidx][i].minorant)));
        }

        let submodels = &self.submodels;

        // Build quadratic term, this is <g_i, g_j> for all pairs of minorants where each g_i
        // is an aggregated minorant of a submodel.
        //
        // For simplicity we always store the terms in the index of the minorant of the first
        // subproblem in each submodel.
        let ntotalminorants = self.index2min.len();
        if ntotalminorants > self.qterm.len() {
            self.qterm.resize(ntotalminorants, dvec![]);
            for i in 0..self.qterm.len() {
                self.qterm[i].resize(ntotalminorants, 0.0);
            }
        }

        // - i is the number of the minorant within the submodel submodel_i
        // - idx_i is the unique index of that minorant (of the first subproblem)
        // - j is the number of the minorant within the submodel submodel_j
        // - idx_j is the unique index of that minorant (of the first subproblem)
        for submodel_i in submodels.iter() {
            // Compute the minorant g_i for each i
            for i in 0..submodel_i.num_mins {
                // Store the computed values at the index of the first subproblem in this model.
                let idx_i = minorants[submodel_i[0]][i].index;
                let g_i = &submodel_i.minorants[i].linear;
                // Now compute the inner product with each other minorant
                // that has to be updated.
                for &(idx_j, mod_j, j) in updateinds.iter() {
                    let x = submodels[mod_j].minorants[j].linear.dot(g_i);
                    self.qterm[idx_i][idx_j] = x;
                    self.qterm[idx_j][idx_i] = x;
                }
            }
        }

        // We verify that the qterm is correct
        if cfg!(debug_assertions) {
            for submod_i in submodels.iter() {
                for i in 0..submod_i.num_mins {
                    let idx_i = self.minorants[submod_i[0]][i].index;
                    for submod_j in submodels.iter() {
                        for j in 0..submod_j.num_mins {
                            let idx_j = self.minorants[submod_j[0]][j].index;
                            let x = submod_i.minorants[i].linear.dot(&submod_j.minorants[j].linear);
                            debug_assert!((x - self.qterm[idx_i][idx_j]) < 1e-6);
                        }
                    }
                }
            }
        }

        // main diagonal plus small identity to ensure Q being semi-definite
        self.qdiag = 0.0;
        for submodel in submodels.iter() {
            for i in 0..submodel.num_mins {
                let idx = minorants[submodel[0]][i].index;
                self.qdiag = Real::max(self.qdiag, self.qterm[idx][idx]);
            }
        }
        self.qdiag *= 1e-8;

        // We have updated everything.
        self.updateinds.clear();
        self.force_update = false;

        self.init_cpx_qp()
    }

    fn init_cpx_qp(&mut self) -> Result<()> {
        if !self.lp.is_null() {
            trycpx!(cpx::freeprob(self.env, &mut self.lp));
        }
        trycpx!({
            let mut status = 0;
            self.lp = cpx::createprob(self.env, &mut status, c_str!("mastercp").as_ptr());
            status
        });

        let nsubmodels = self.submodels.len();
        let submodels = &self.submodels;
        let minorants = &self.minorants;

        // add convexity constraints
        let sense: Vec<c_char> = vec!['E' as c_char; nsubmodels];
        let rhs = dvec![1.0; nsubmodels];
        let mut rmatbeg = Vec::with_capacity(nsubmodels);
        let mut rmatind = Vec::with_capacity(self.index2min.len());
        let mut rmatval = Vec::with_capacity(self.index2min.len());

        let mut nvars = 0;
        for submodel in submodels.iter() {
            if submodel.is_empty() {
                // this should only happen if the submodel selector leaves
                // holes
                continue;
            }

            rmatbeg.push(nvars as c_int);
            rmatind.extend((nvars as c_int..).take(submodel.num_mins));
            rmatval.extend(repeat(1.0).take(submodel.num_mins));
            nvars += submodel.num_mins;
        }

        trycpx!(cpx::addrows(
            self.env,
            self.lp,
            nvars as c_int,
            rmatbeg.len() as c_int,
            rmatind.len() as c_int,
            rhs.as_ptr(),
            sense.as_ptr(),
            rmatbeg.as_ptr(),
            rmatind.as_ptr(),
            rmatval.as_ptr(),
            ptr::null(),
            ptr::null()
        ));

        // update coefficients
        let mut var_i = 0;
        for (mod_i, submodel_i) in submodels.iter().enumerate() {
            for i in 0..submodel_i.num_mins {
                let idx_i = minorants[submodel_i[0]][i].index;
                let mut var_j = 0;
                for (mod_j, submodel_j) in submodels.iter().enumerate() {
                    for j in 0..submodel_j.num_mins {
                        let idx_j = minorants[submodel_j[0]][j].index;
                        let q = self.qterm[idx_i][idx_j] + if mod_i != mod_j || i != j { 0.0 } else { self.qdiag };
                        trycpx!(cpx::chgqpcoef(self.env, self.lp, var_i as c_int, var_j as c_int, q));
                        var_j += 1;
                    }
                }
                var_i += 1;
            }
        }

        Ok(())
    }
}

pub struct Builder {
    /// The maximal bundle size used in the master problem.
    pub max_bundle_size: usize,
    /// The submodel selector.
    select_model: Arc<dyn Fn(usize) -> usize>,
}

impl Default for Builder {
    fn default() -> Self {
        Builder {
            max_bundle_size: 50,
            select_model: Arc::new(|i| i),
        }
    }
}

impl unconstrained::Builder for Builder {
    type MasterProblem = CplexMaster;

    fn build(&mut self) -> Result<CplexMaster> {
        let mut cpx = CplexMaster::new()?;
        cpx.max_bundle_size = self.max_bundle_size;
        cpx.select_model = self.select_model.clone();
        cpx.update_submodels();
        Ok(cpx)
    }
}

impl Builder {
    pub fn max_bundle_size(&mut self, s: usize) -> &mut Self {
        assert!(s >= 2, "The maximal bundle size must be >= 2");
        self.max_bundle_size = s;
        self
    }

    /// Set a custom submodel selector.
    ///
    /// For each subproblem index the selector should return a submodel index.
    /// All subproblems with the same submodel index are aggregated in a single
    /// cutting plane model.
    pub fn submodel_selection<F>(&mut self, selector: F) -> &mut Self
    where
        F: Fn(usize) -> usize + 'static,
    {
        self.select_model = Arc::new(selector);
        self
    }

    /// Use a fully disaggregated model.
    ///
    /// A fully disaggregated model has one separate submodel for each subproblem.
    /// Hence, calling this method is equivalent to
    /// `CplexMaster::set_submodel_selection(|i| i)`.
    pub fn use_full_disaggregation(&mut self) -> &mut Self {
        self.submodel_selection(|i| i)
    }

    /// Use a fully aggregated model.
    ///
    /// A fully aggregated model has one submodel for all subproblems.
    /// Hence, calling this method is equivalent to
    /// `CplexMaster::set_submodel_selection(|_| 0)`.
    pub fn use_full_aggregation(&mut self) -> &mut Self {
        self.submodel_selection(|_| 0)
    }
}
Deleted src/master/minimal.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402


















































































































































































































































































































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
// Copyright (c) 2016, 2017, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

use crate::master::unconstrained;
use crate::master::{SubgradientExtension, UnconstrainedMasterProblem};
use crate::{Aggregatable, DVector, Minorant, Real};

use log::debug;

use std::error::Error;
use std::f64::NEG_INFINITY;
use std::fmt;
use std::result;

/// Minimal master problem error.
#[derive(Debug)]
pub enum MinimalMasterError {
    NoMinorants,
    MaxMinorants { subproblem: usize },
    SubgradientExtension(Box<dyn Error + Send + Sync>),
}

impl fmt::Display for MinimalMasterError {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
        use self::MinimalMasterError::*;
        match self {
            MaxMinorants { subproblem } => write!(
                fmt,
                "The minimal master problem allows at most two minorants (subproblem: {})",
                subproblem
            ),
            NoMinorants => write!(fmt, "The master problem does not contain a minorant"),
            SubgradientExtension(err) => write!(fmt, "Subgradient extension failed: {}", err),
        }
    }
}

impl Error for MinimalMasterError {
    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
        use MinimalMasterError::*;
        match self {
            SubgradientExtension(err) => Some(err.as_ref()),
            _ => None,
        }
    }
}

/**
 * A minimal master problem with only two minorants.
 *
 * This is the simplest possible master problem for bundle methods. It
 * has only two minorants and only one function model. The advantage
 * is that this model can be solved explicitely and very quickly, but
 * it is only a very loose approximation of the objective function.
 *
 * Because of its properties, it can only be used if the problem to be
 * solved has a maximal number of minorants of two and only one
 * subproblem.
 */
pub struct MinimalMaster {
    /// The weight of the quadratic term.
    weight: Real,

    /// The minorants in the model.
    ///
    /// There are up to two minorants, each minorant consists of one part for
    /// each subproblem.
    ///
    /// The minorants for the i-th subproblem have the indices `2*i` and
    /// `2*i+1`.
    minorants: [Vec<Minorant>; 2],
    /// The number of minorants. Only the minorants with index less than this
    /// number are valid.
    num_minorants: usize,
    /// The number of minorants for each subproblem.
    num_minorants_of: Vec<usize>,
    /// The number of subproblems.
    num_subproblems: usize,
    /// The number of subproblems with at least 1 minorant.
    num_subproblems_with_1: usize,
    /// The number of subproblems with at least 2 minorants.
    num_subproblems_with_2: usize,
    /// Optimal multipliers.
    opt_mult: [Real; 2],
    /// Optimal aggregated minorant.
    opt_minorant: Minorant,
}

impl UnconstrainedMasterProblem for MinimalMaster {
    type MinorantIndex = usize;

    type Err = MinimalMasterError;

    fn new() -> Result<MinimalMaster, Self::Err> {
        Ok(MinimalMaster {
            weight: 1.0,
            num_minorants: 0,
            num_minorants_of: vec![],
            num_subproblems: 0,
            num_subproblems_with_1: 0,
            num_subproblems_with_2: 0,
            minorants: [vec![], vec![]],
            opt_mult: [0.0, 0.0],
            opt_minorant: Minorant::default(),
        })
    }

    fn num_subproblems(&self) -> usize {
        self.num_subproblems
    }

    fn set_num_subproblems(&mut self, n: usize) -> Result<(), Self::Err> {
        self.num_subproblems = n;
        self.num_minorants = 0;
        self.num_minorants_of = vec![0; n];
        self.num_subproblems_with_1 = 0;
        self.num_subproblems_with_2 = 0;
        self.minorants = [vec![Minorant::default(); n], vec![Minorant::default(); n]];
        Ok(())
    }

    fn compress<F>(&mut self, f: F) -> Result<(), Self::Err>
    where
        F: FnMut(Self::MinorantIndex, &mut dyn Iterator<Item = (Self::MinorantIndex, Real)>),
    {
        if self.num_minorants == 2 {
            debug!("Aggregate");
            debug!("  {} * {:?}", self.opt_mult[0], self.minorants[0]);
            debug!("  {} * {:?}", self.opt_mult[1], self.minorants[1]);

            let mut f = f;
            for fidx in 0..self.num_subproblems {
                f(
                    2 * fidx,
                    &mut self
                        .opt_mult
                        .iter()
                        .enumerate()
                        .map(|(i, alpha)| (2 * fidx + i, *alpha)),
                );
            }

            self.minorants[0] = Aggregatable::combine(self.opt_mult.iter().cloned().zip(&self.minorants));
            self.opt_mult[0] = 1.0;
            self.num_minorants = 1;
            self.num_minorants_of.clear();
            self.num_minorants_of.resize(self.num_subproblems, 1);
            self.num_subproblems_with_2 = 0;

            debug!("  {:?}", self.minorants[0]);
        }
        Ok(())
    }

    fn weight(&self) -> Real {
        self.weight
    }

    fn set_weight(&mut self, weight: Real) -> Result<(), Self::Err> {
        assert!(weight > 0.0);
        self.weight = weight;
        Ok(())
    }

    fn num_minorants(&self, fidx: usize) -> usize {
        self.num_minorants_of[fidx]
    }

    fn add_minorant(&mut self, fidx: usize, minorant: Minorant) -> Result<usize, Self::Err> {
        if self.num_minorants_of[fidx] >= 2 {
            return Err(MinimalMasterError::MaxMinorants { subproblem: fidx });
        }

        let minidx = self.num_minorants_of[fidx];
        self.num_minorants_of[fidx] += 1;
        self.minorants[minidx][fidx] = minorant;

        match minidx {
            0 => {
                self.num_subproblems_with_1 += 1;
                if self.num_subproblems_with_1 == self.num_subproblems {
                    self.num_minorants = 1;
                    self.opt_mult[0] = 0.0;
                }
                Ok(2 * fidx)
            }
            1 => {
                self.num_subproblems_with_2 += 1;
                if self.num_subproblems_with_2 == self.num_subproblems {
                    self.num_minorants = 2;
                    self.opt_mult[1] = 0.0;
                }
                Ok(2 * fidx + 1)
            }
            _ => unreachable!("Invalid number of minorants in subproblem {}", fidx),
        }
    }

    fn add_vars(
        &mut self,
        nnew: usize,
        changed: &[usize],
        extend_subgradient: &mut SubgradientExtension<Self::MinorantIndex>,
    ) -> Result<(), Self::Err> {
        if self.num_subproblems_with_1 == 0 {
            return Ok(());
        }

        let noldvars = self.minorants[0][self.num_minorants_of.iter().position(|&n| n > 0).unwrap()]
            .linear
            .len();
        let mut changedvars = vec![];
        changedvars.extend_from_slice(changed);
        changedvars.extend(noldvars..noldvars + nnew);

        for fidx in 0..self.num_subproblems {
            for i in 0..self.num_minorants_of[fidx] {
                let new_subg = extend_subgradient(fidx, 2 * fidx + i, &changedvars)
                    .map_err(MinimalMasterError::SubgradientExtension)?;
                let m = &mut self.minorants[i][fidx];
                for (&j, &g) in changed.iter().zip(new_subg.iter()) {
                    m.linear[j] = g;
                }
                m.linear.extend_from_slice(&new_subg[changed.len()..]);
            }
        }

        Ok(())
    }

    #[allow(unused_variables)]
    fn solve(&mut self, eta: &DVector, fbound: Real, augbound: Real, relprec: Real) -> Result<(), Self::Err> {
        for fidx in 0..self.num_subproblems {
            for i in 0..self.num_minorants_of[fidx] {
                debug!("  min(fidx:{}, i:{}) = {}", fidx, i, self.minorants[i][fidx]);
            }
        }

        if self.num_minorants == 2 {
            let min0 = Minorant::combine((0..self.num_subproblems).map(|fidx| (1.0, &self.minorants[0][fidx])));
            let min1 = Minorant::combine((0..self.num_subproblems).map(|fidx| (1.0, &self.minorants[1][fidx])));
            let xx = min0.linear.dot(&min0.linear);
            let yy = min1.linear.dot(&min1.linear);
            let xy = min0.linear.dot(&min1.linear);
            let xeta = min0.linear.dot(eta);
            let yeta = min1.linear.dot(eta);
            let a = yy - 2.0 * xy + xx;
            let b = xy - xx - yeta + xeta;

            let mut alpha2 = 0.0;
            if a > 0.0 {
                alpha2 = ((min1.constant - min0.constant) * self.weight - b) / a;
                alpha2 = alpha2.max(0.0).min(1.0);
            }
            self.opt_mult[0] = 1.0 - alpha2;
            self.opt_mult[1] = alpha2;
            self.opt_minorant = Aggregatable::combine(self.opt_mult.iter().cloned().zip([min0, min1].iter()));
        } else if self.num_minorants == 1 {
            let min0 = Aggregatable::combine((0..self.num_subproblems).map(|fidx| (1.0, &self.minorants[0][fidx])));
            self.opt_minorant = min0;
            self.opt_mult[0] = 1.0;
        } else {
            return Err(MinimalMasterError::NoMinorants);
        }

        debug!("Unrestricted");
        debug!("  opt_minorant={}", self.opt_minorant);
        debug!("  opt_mult={:?}", &self.opt_mult[0..self.num_minorants]);

        Ok(())
    }

    fn dualopt(&self) -> &DVector {
        &self.opt_minorant.linear
    }

    fn dualopt_cutval(&self) -> Real {
        self.opt_minorant.constant
    }

    fn multiplier(&self, min: usize) -> Real {
        self.opt_mult[min % 2]
    }

    fn opt_multipliers<'a>(&'a self, fidx: usize) -> Box<dyn Iterator<Item = (Self::MinorantIndex, Real)> + 'a> {
        Box::new(
            self.opt_mult
                .iter()
                .take(self.num_minorants_of[fidx])
                .enumerate()
                .map(move |(i, alpha)| (2 * fidx + i, *alpha)),
        )
    }

    fn eval_model(&self, y: &DVector) -> Real {
        let mut result = NEG_INFINITY;
        for mins in &self.minorants[0..self.num_minorants] {
            result = result.max(mins.iter().map(|m| m.eval(y)).sum());
        }
        result
    }

    fn aggregate(&mut self, fidx: usize, mins: &[usize]) -> Result<(usize, DVector), Self::Err> {
        debug!("Aggregate minorants {:?} of subproblem {}", mins, fidx);
        if mins.len() == 2 {
            debug_assert_ne!(mins[0], mins[1], "Minorants to be aggregated must be different");
            debug_assert_eq!(
                mins[0] / 2,
                fidx,
                "Minorant {} does not belong to subproblem {}",
                mins[0],
                fidx
            );
            debug_assert_eq!(
                mins[1] / 2,
                fidx,
                "Minorant {} does not belong to subproblem {}",
                mins[1],
                fidx
            );
            debug_assert!(
                mins[0] % 2 < self.num_minorants_of[fidx],
                "Invalid minorant index for subproblem {}: {}",
                fidx,
                mins[0]
            );
            debug_assert!(
                mins[1] % 2 < self.num_minorants_of[fidx],
                "Invalid minorant index for subproblem {}: {}",
                fidx,
                mins[1]
            );

            let min0 = mins[0] % 2;
            let min1 = mins[1] % 2;

            debug!("Aggregate");
            debug!("  {} * {}", self.opt_mult[min0], self.minorants[min0][fidx]);
            debug!("  {} * {}", self.opt_mult[min1], self.minorants[min1][fidx]);
            self.minorants[0][fidx] = Aggregatable::combine(
                [
                    (self.opt_mult[min0], &self.minorants[min0][fidx]),
                    (self.opt_mult[min1], &self.minorants[min1][fidx]),
                ]
                .iter()
                .cloned(),
            );

            if self.num_subproblems_with_2 == self.num_subproblems {
                self.num_minorants -= 1;
            }
            self.num_subproblems_with_2 -= 1;
            self.num_minorants_of[fidx] -= 1;

            let coeffs = dvec![self.opt_mult[min0], self.opt_mult[min1]];

            debug!("  {}", self.minorants[0][fidx]);
            Ok((2 * fidx, coeffs))
        } else if mins.len() == 1 {
            Ok((mins[0], dvec![1.0]))
        } else {
            panic!("No minorants specified to be aggregated");
        }
    }

    fn move_center(&mut self, alpha: Real, d: &DVector) {
        for fidx in 0..self.num_subproblems {
            for i in 0..self.num_minorants_of[fidx] {
                self.minorants[i][fidx].move_center(alpha, d);
            }
        }
    }
}

pub struct Builder;

impl Default for Builder {
    fn default() -> Self {
        Builder
    }
}

impl unconstrained::Builder for Builder {
    type MasterProblem = MinimalMaster;

    fn build(&mut self) -> Result<MinimalMaster, MinimalMasterError> {
        MinimalMaster::new()
    }
}
Changes to src/master/mod.rs.
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
35
36
37
38
39
40
41











42
43
44
45
46
47
48







-
-
-
-
-
-
-
-
-
-
-







//! * moving the center of the linear functions $\ell_i$ (and the
//!   bounds), i.e. replacing $\hat{f}$ by $d \mapsto \hat{f}(d -
//!   \hat{d})$ for some given $\hat{d} \in \mathbb{R}\^n$.

pub mod boxed;
pub use self::boxed::BoxedMasterProblem;

pub mod unconstrained;
pub use self::unconstrained::UnconstrainedMasterProblem;

pub mod minimal;
pub use self::minimal::MinimalMaster;

// pub mod grb;
// pub use master::grb::GurobiMaster;

pub mod cpx;

pub(crate) mod primalmaster;

use crate::{DVector, Minorant, Real};
use std::error::Error;
use std::result::Result;

/// Callback for subgradient extensions.
173
174
175
176
177
178
179






162
163
164
165
166
167
168
169
170
171
172
173
174







+
+
+
+
+
+
pub trait Builder {
    /// The master problem to be build.
    type MasterProblem: MasterProblem;

    /// Create a new master problem.
    fn build(&mut self) -> Result<Self::MasterProblem, <Self::MasterProblem as MasterProblem>::Err>;
}

/// The minimal bundle builder.
pub type FullMasterBuilder = boxed::Builder<boxed::unconstrained::cpx::Builder>;

/// The minimal bundle builder.
pub type MinimalMasterBuilder = boxed::Builder<boxed::unconstrained::minimal::Builder>;
Changes to src/master/primalmaster.rs.
14
15
16
17
18
19
20
21

22
23
24
25
26
27
28
14
15
16
17
18
19
20

21
22
23
24
25
26
27
28







-
+







 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! A wrapper around master problems to handle primal information.

use super::MasterProblem;
use crate::parallel::SubgradientExtender;
use crate::problem::SubgradientExtender;
use crate::{Aggregatable, Minorant, Real};

use std::collections::HashMap;
use std::ops::{Deref, DerefMut};

/// A wrapper around `MasterProblem` to handle primal information.
///
Deleted src/master/unconstrained.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139











































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
// Copyright (c) 2016, 2017, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

use crate::{DVector, Minorant, Real};

use super::SubgradientExtension;

use std::error::Error;

/**
 * Trait for master problems without box constraints.
 *
 * Implementors of this trait are supposed to solve quadratic
 * optimization problems of the form
 *
 * \\[ \min \left\\{ \hat{f}(d) + \frac{u}{2} \\| d \\|\^2 \colon
 *     d \in \mathbb{R}\^n \right\\}. \\]
 *
 * where $\hat{f}$ is a piecewise linear model, i.e.
 *
 * \\[ \hat{f}(d) = \max \\{ \ell_i(d) = c_i + \langle g_i, d \rangle \colon
 *                           i=1,\dotsc,k \\}
 *                = \max \left\\{ \sum_{i=1}\^k \alpha_i \ell_i(d) \colon
 *                                \alpha \in \Delta \right\\}, \\]
 *
 * where $\Delta := \left\\{ \alpha \in \mathbb{R}\^k \colon \sum_{i=1}\^k
 * \alpha_i = 1 \right\\}$. Note, the unconstrained solver is expected
 * to compute *dual* optimal solutions, i.e. the solver must compute
 * optimal coefficients $\bar{\alpha}$ for the dual problem
 *
 * \\[ \max_{\alpha \in \Delta} \min_{d \in \mathbb{R}\^n}
 *     \sum_{i=1}\^k \alpha_i \ell_i(d) + \frac{u}{2} \\| d \\|\^2. \\]
 */
pub trait UnconstrainedMasterProblem: Send + 'static {
    /// Unique index for a minorant.
    type MinorantIndex: Copy + Eq;

    /// Error type for this master problem.
    type Err: Error + Send + Sync;

    /// Return a new instance of the unconstrained master problem.
    fn new() -> Result<Self, Self::Err>
    where
        Self: Sized;

    /// Return the number of subproblems.
    fn num_subproblems(&self) -> usize;

    /// Set the number of subproblems (different function models.)
    fn set_num_subproblems(&mut self, n: usize) -> Result<(), Self::Err>;

    /// Return the current weight.
    fn weight(&self) -> Real;

    /// Set the weight of the quadratic term, must be > 0.
    fn set_weight(&mut self, weight: Real) -> Result<(), Self::Err>;

    /// Return the number of minorants of subproblem `fidx`.
    fn num_minorants(&self, fidx: usize) -> usize;

    /// Compress the bundle.
    ///
    /// When some minorants are compressed, the callback is called with the
    /// coefficients and indices of the compressed minorants and the index of
    /// the new minorant. The callback may be called several times.
    fn compress<F>(&mut self, f: F) -> Result<(), Self::Err>
    where
        F: FnMut(Self::MinorantIndex, &mut dyn Iterator<Item = (Self::MinorantIndex, Real)>);

    /// Add a new minorant to the model.
    fn add_minorant(&mut self, fidx: usize, minorant: Minorant) -> Result<Self::MinorantIndex, Self::Err>;

    /// Add or move some variables.
    ///
    /// The variables in `changed` have been changed, so the subgradient
    /// information must be updated. Furthermore, `nnew` new variables
    /// are added.
    fn add_vars(
        &mut self,
        nnew: usize,
        changed: &[usize],
        extend_subgradient: &mut SubgradientExtension<Self::MinorantIndex>,
    ) -> Result<(), Self::Err>;

    /// Solve the master problem.
    fn solve(&mut self, eta: &DVector, fbound: Real, augbound: Real, relprec: Real) -> Result<(), Self::Err>;

    /// Return the current dual optimal solution.
    fn dualopt(&self) -> &DVector;

    /// Return the current dual optimal solution value.
    fn dualopt_cutval(&self) -> Real;

    /// Return the multiplier associated with a minorant.
    fn multiplier(&self, min: Self::MinorantIndex) -> Real;

    /// Return the multipliers associated with a subproblem.
    fn opt_multipliers<'a>(&'a self, fidx: usize) -> Box<dyn Iterator<Item = (Self::MinorantIndex, Real)> + 'a>;

    /// Return the value of the current model at the given point.
    fn eval_model(&self, y: &DVector) -> Real;

    /// Aggregate the given minorants according to the current solution.
    ///
    /// The (indices of the) minorants to be aggregated get invalid
    /// after this operation. The function returns the index of the
    /// aggregated minorant along with the coefficients of the convex
    /// combination. The index of the new aggregated minorant might or
    /// might not be one of indices of the original minorants.
    ///
    /// # Error
    /// The indices of the minorants `mins` must belong to subproblem `fidx`.
    fn aggregate(&mut self, fidx: usize, mins: &[usize]) -> Result<(Self::MinorantIndex, DVector), Self::Err>;

    /// Move the center of the master problem along $\alpha \cdot d$.
    fn move_center(&mut self, alpha: Real, d: &DVector);
}

/// A builder for creating unconstrained master problem solvers.
pub trait Builder {
    /// The master problem to be build.
    type MasterProblem: UnconstrainedMasterProblem;

    /// Create a new master problem.
    fn build(&mut self) -> Result<Self::MasterProblem, <Self::MasterProblem as UnconstrainedMasterProblem>::Err>;
}
Changes to src/mcf/problem.rs.
11
12
13
14
15
16
17
18

19
20
21
22

23
24
25
26
27
28
29
11
12
13
14
15
16
17

18
19
20
21

22
23
24
25
26
27
28
29







-
+



-
+







// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

use crate::mcf;
use crate::parallel::{
use crate::problem::{
    EvalResult, FirstOrderProblem as ParallelProblem, ResultSender, Update as ParallelUpdate, UpdateSender,
    UpdateState as ParallelUpdateState,
};
use crate::{Aggregatable, DVector, FirstOrderProblem, Minorant, Real, SimpleEvaluation, Update, UpdateState};
use crate::{DVector, Minorant, Real};

use itertools::izip;
use log::{debug, warn};
use num_traits::Float;
use threadpool::ThreadPool;

use std::f64::INFINITY;
181
182
183
184
185
186
187




188
189
190
191
192
193
194
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198







+
+
+
+







            .map(|elem| elem.val * primal[elem.ind])
            .sum::<Real>();
        rhs - lhs
    }
}

impl MMCFProblem {
    pub fn num_subproblems(&self) -> usize {
        self.subs.len()
    }

    pub fn read_mnetgen(basename: &str) -> std::result::Result<MMCFProblem, MMCFReadError> {
        let mut buffer = String::new();
        {
            let mut f = File::open(&format!("{}.nod", basename))?;
            f.read_to_string(&mut buffer)?;
        }
        let fnod = buffer
358
359
360
361
362
363
364
365

366
367
368

369
370
371
372
373
374
375
376
377

378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
362
363
364
365
366
367
368

369
370
371

372


373
374
375
376
377
378

379





























































































































380
381
382
383
384
385
386







-
+


-
+
-
-






-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-







            }
        }

        aggr
    }
}

impl FirstOrderProblem for MMCFProblem {
impl ParallelProblem for MMCFProblem {
    type Err = Error;

    type Primal = Vec<DVector>;
    type Primal = DVector;

    type EvalResult = SimpleEvaluation<Vec<DVector>>;

    fn num_variables(&self) -> usize {
        self.active_constraints.len()
    }

    fn lower_bounds(&self) -> Option<Vec<Real>> {
        Some(vec![0.0; self.active_constraints.len()])
        Some(vec![0.0; self.num_variables()])
    }

    fn upper_bounds(&self) -> Option<Vec<Real>> {
        None
    }

    fn num_subproblems(&self) -> usize {
        if self.multimodel {
            self.subs.len()
        } else {
            1
        }
    }

    fn evaluate(&mut self, fidx: usize, y: &[Real], _nullstep_bound: Real, _relprec: Real) -> Result<Self::EvalResult> {
        let (objective, subg, sol) = if self.multimodel {
            let (objective, subg, sol) = self.subs[fidx]
                .write()
                .unwrap()
                .evaluate(y, self.active_constraints.iter().cloned())?;
            (objective, subg, vec![sol])
        } else {
            let mut objective = 0.0;
            let mut subg = dvec![0.0; y.len()];
            let mut sols = Vec::with_capacity(self.subs.len());
            for sub in &mut self.subs {
                let (obj, sg, sol) = sub
                    .write()
                    .unwrap()
                    .evaluate(y, self.active_constraints.iter().cloned())?;
                objective += obj;
                subg.add_scaled(1.0, &sg);
                sols.push(sol);
            }
            (objective, subg, sols)
        };
        Ok(SimpleEvaluation {
            objective,
            minorants: vec![(
                Minorant {
                    constant: objective,
                    linear: subg,
                },
                sol,
            )],
        })
    }

    fn update(&mut self, state: &UpdateState<Self::Primal>) -> Result<Vec<Update>> {
        if self.inactive_constraints.is_empty() {
            return Ok(vec![]);
        }

        let nold = self.active_constraints.len();
        let subs = &self.subs;

        // if state.step != Step::Descent && !self.active_constraints.is_empty() {
        //     return Ok(vec![]);
        // }

        let newconstraints = self
            .inactive_constraints
            .iter()
            .map(|&cidx| {
                subs.iter()
                    .enumerate()
                    .map(|(fidx, sub)| {
                        let primals = state.aggregated_primals(fidx);
                        let aggr = Aggregatable::combine(primals.into_iter().map(|(alpha, x)| (alpha, &x[0])));
                        sub.read().unwrap().evaluate_constraint(&aggr, cidx)
                    })
                    .sum::<Real>()
            })
            .enumerate()
            .filter_map(|(i, sg)| if sg < 1e-3 { Some(i) } else { None })
            .collect::<Vec<_>>();

        let inactive = &mut self.inactive_constraints;
        self.active_constraints
            .extend(newconstraints.into_iter().rev().map(|i| inactive.swap_remove(i)));

        // *** The following code needs `drain_filter`, which is experimental as
        // of rust 1.36 ***

        // self.active_constraints
        //     .extend(self.inactive_constraints.drain_filter(|&mut cidx| {
        //         subs.iter()
        //             .enumerate()
        //             .map(|(fidx, sub)| {
        //                 let primals = state.aggregated_primals(fidx);
        //                 let aggr = Aggregatable::combine(primals.into_iter().map(|(alpha, x)| (alpha, &x[0])));
        //                 sub.read().unwrap().evaluate_constraint(&aggr, cidx)
        //             })
        //             .sum::<Real>() < -1e-3
        //     }));

        Ok(vec![
            Update::AddVariable {
                lower: 0.0,
                upper: Real::infinity()
            };
            self.active_constraints.len() - nold
        ])
    }

    fn extend_subgradient(&mut self, fidx: usize, primal: &Self::Primal, inds: &[usize]) -> Result<Vec<Real>> {
        let sub = self.subs[fidx].read().unwrap();
        Ok(inds
            .iter()
            .map(|&i| sub.evaluate_constraint(&primal[0], self.active_constraints[i]))
            .collect())
    }
}

impl ParallelProblem for MMCFProblem {
    type Err = <Self as FirstOrderProblem>::Err;

    type Primal = DVector;

    fn num_variables(&self) -> usize {
        FirstOrderProblem::num_variables(self)
    }

    fn lower_bounds(&self) -> Option<Vec<Real>> {
        FirstOrderProblem::lower_bounds(self)
    }

    fn num_subproblems(&self) -> usize {
        self.subs.len()
    }

    fn start(&mut self) {
Deleted src/minorant.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203











































































































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
// Copyright (c) 2016, 2017, 2018, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! A linear minorant.

use crate::{DVector, Real};

use std::borrow::Borrow;
use std::fmt;

/// An aggregatable object.
pub trait Aggregatable: Default {
    /// Return a scaled version of `other`, i.e. `alpha * other`.
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: Borrow<Self>;

    /// Add a scaled version of `other` to `self`.
    ///
    /// This sets `self = self + alpha * other`.
    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: Borrow<Self>;

    /// Return $\sum\_{i=1}\^n alpha_i m_i$.
    ///
    /// If `aggregates` is empty return the default value.
    fn combine<I, A>(aggregates: I) -> Self
    where
        I: IntoIterator<Item = (Real, A)>,
        A: Borrow<Self>,
    {
        let mut it = aggregates.into_iter();
        let mut x;
        if let Some((alpha, y)) = it.next() {
            x = Self::new_scaled(alpha, y);
        } else {
            return Self::default();
        }

        for (alpha, y) in it {
            x.add_scaled(alpha, y);
        }

        x
    }
}

/// Implement for empty tuples.
impl Aggregatable for () {
    fn new_scaled<A>(_alpha: Real, _other: A) -> Self
    where
        A: Borrow<Self>,
    {
    }

    fn add_scaled<A>(&mut self, _alpha: Real, _other: A)
    where
        A: Borrow<Self>,
    {
    }
}

impl Aggregatable for Real {
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: Borrow<Self>,
    {
        alpha * other.borrow()
    }

    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: Borrow<Self>,
    {
        *self += alpha * other.borrow()
    }
}

/**
 * A linear minorant of a convex function.
 *
 * A linear minorant of a convex function $f \colon \mathbb{R}\^n \to
 * \mathbb{R}$ is a linear function of the form
 *
 *   \\[ l \colon \mathbb{R}\^n \to \mathbb{R}, x \mapsto \langle g, x
 *   \rangle + c \\]
 *
 * such that $l(x) \le f(x)$ for all $x \in \mathbb{R}\^n$.
 */
#[derive(Clone, Debug)]
pub struct Minorant {
    /// The constant term.
    pub constant: Real,

    /// The linear term.
    pub linear: DVector,
}

impl fmt::Display for Minorant {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "{} + y * {}", self.constant, self.linear)?;
        Ok(())
    }
}

impl Default for Minorant {
    fn default() -> Minorant {
        Minorant {
            constant: 0.0,
            linear: dvec![],
        }
    }
}

impl Minorant {
    /// Return a new 0 minorant.
    pub fn new(constant: Real, linear: Vec<Real>) -> Minorant {
        Minorant {
            constant,
            linear: DVector(linear),
        }
    }

    /**
     * Evaluate minorant at some point.
     *
     * This function computes $c + \langle g, x \rangle$ for this minorant
     *   \\[\ell \colon \mathbb{R}\^n \to \mathbb{R}, x \mapsto c + \langle g, x \rangle\\]
     * and the given point $x \in \mathbb{R}\^n$.
     */
    pub fn eval(&self, x: &DVector) -> Real {
        self.constant + self.linear.dot(x)
    }

    /**
     * Move the center of the minorant.
     */
    pub fn move_center(&mut self, alpha: Real, d: &DVector) {
        self.constant += alpha * self.linear.dot(d);
    }
}

impl Aggregatable for Minorant {
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: Borrow<Self>,
    {
        let m = other.borrow();
        Minorant {
            constant: alpha * m.constant,
            linear: DVector::scaled(&m.linear, alpha),
        }
    }

    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: Borrow<Self>,
    {
        let m = other.borrow();
        self.constant += alpha * m.constant;
        self.linear.add_scaled(alpha, &m.linear);
    }
}

impl<T> Aggregatable for Vec<T>
where
    T: Aggregatable,
{
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: std::borrow::Borrow<Self>,
    {
        other
            .borrow()
            .iter()
            .map(|y| Aggregatable::new_scaled(alpha, y))
            .collect()
    }

    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: std::borrow::Borrow<Self>,
    {
        debug_assert_eq!(self.len(), other.borrow().len(), "Vectors must have the same size");
        for (ref mut x, y) in self.iter_mut().zip(other.borrow()) {
            x.add_scaled(alpha, y)
        }
    }
}
Deleted src/parallel/masterprocess.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274


















































































































































































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! Asynchronous process solving a master problem.

use crossbeam::channel::{unbounded as channel, Receiver, Sender};
use log::{debug, warn};
use std::sync::Arc;
use threadpool::ThreadPool;

use super::problem::{FirstOrderProblem, SubgradientExtender};
use super::solver::Error;
use crate::master::primalmaster::PrimalMaster;
use crate::master::MasterProblem;
use crate::{DVector, Minorant, Real};

/// Configuration information for setting up a master problem.
pub struct MasterConfig {
    /// The number of subproblems.
    pub num_subproblems: usize,
    /// The number of variables.
    pub num_vars: usize,
    /// The lower bounds on the variables.
    pub lower_bounds: Option<DVector>,
    /// The lower bounds on the variables.
    pub upper_bounds: Option<DVector>,
}

/// A task for the master problem.
enum MasterTask<Pr, PErr, M>
where
    M: MasterProblem,
{
    /// Add new variables to the master problem.
    AddVariables(Vec<(Option<usize>, Real, Real)>, Box<SubgradientExtender<Pr, PErr>>),

    /// Add a new minorant for a subfunction to the master problem.
    AddMinorant(usize, Minorant, Pr),

    /// Move the center of the master problem in the given direction.
    MoveCenter(Real, Arc<DVector>),

    /// Start a new computation of the master problem.
    Solve { center_value: Real },

    /// Compress the bundle.
    Compress,

    /// Set the weight parameter of the master problem.
    SetWeight { weight: Real },

    /// Return the current aggregated primal.
    GetAggregatedPrimal {
        subproblem: usize,
        tx: Sender<Result<Pr, M::Err>>,
    },
}

/// The response send from a master process.
///
/// The response contains the evaluation results of the latest
pub struct MasterResponse {
    pub nxt_d: DVector,
    pub nxt_mod: Real,
    pub sgnorm: Real,
    /// The number of internal iterations.
    pub cnt_updates: usize,
}

type ToMasterSender<P, M> = Sender<MasterTask<<P as FirstOrderProblem>::Primal, <P as FirstOrderProblem>::Err, M>>;

type ToMasterReceiver<P, M> = Receiver<MasterTask<<P as FirstOrderProblem>::Primal, <P as FirstOrderProblem>::Err, M>>;

type MasterSender<E> = Sender<Result<MasterResponse, E>>;

pub type MasterReceiver<E> = Receiver<Result<MasterResponse, E>>;

pub struct MasterProcess<P, M>
where
    P: FirstOrderProblem,
    M: MasterProblem,
{
    /// The channel to transmit new tasks to the master problem.
    tx: ToMasterSender<P, M>,

    /// The channel to receive solutions from the master problem.
    pub rx: MasterReceiver<M::Err>,

    phantom: std::marker::PhantomData<M>,
}

impl<P, M> MasterProcess<P, M>
where
    P: FirstOrderProblem,
    P::Primal: Send + 'static,
    P::Err: Into<Box<dyn std::error::Error + Sync + Send>> + 'static,
    M: MasterProblem + Send + 'static,
    M::MinorantIndex: std::hash::Hash,
    M::Err: Send + 'static,
{
    pub fn start(master: M, master_config: MasterConfig, threadpool: &mut ThreadPool) -> Self {
        // Create a pair of communication channels.
        let (to_master_tx, to_master_rx) = channel();
        let (from_master_tx, from_master_rx) = channel();

        // The the master process thread.
        threadpool.execute(move || {
            debug!("Master process started");
            let mut from_master_tx = from_master_tx;
            if let Err(err) = Self::master_main(master, master_config, &mut from_master_tx, to_master_rx) {
                #[allow(unused_must_use)]
                {
                    from_master_tx.send(Err(err));
                }
            }
            debug!("Master proces stopped");
        });

        MasterProcess {
            tx: to_master_tx,
            rx: from_master_rx,
            phantom: std::marker::PhantomData,
        }
    }

    /// Add new variables to the master problem.
    pub fn add_vars(
        &mut self,
        vars: Vec<(Option<usize>, Real, Real)>,
        sgext: Box<SubgradientExtender<P::Primal, P::Err>>,
    ) -> Result<(), Error<P::Err>>
    where
        P::Err: 'static,
    {
        self.tx
            .send(MasterTask::AddVariables(vars, sgext))
            .map_err(|err| Error::Process(err.into()))
    }

    /// Add a new minorant to the master problem model.
    ///
    /// This adds the specified `minorant` with associated `primal` data to the
    /// model of subproblem `i`.
    pub fn add_minorant(&mut self, i: usize, minorant: Minorant, primal: P::Primal) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::AddMinorant(i, minorant, primal))
            .map_err(|err| Error::Process(err.into()))
    }

    /// Move the center of the master problem.
    ///
    /// This moves the master problem's center in direction $\\alpha \\cdot d$.
    pub fn move_center(&mut self, alpha: Real, d: Arc<DVector>) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::MoveCenter(alpha, d))
            .map_err(|err| Error::Process(err.into()))
    }

    /// Solve the master problem.
    ///
    /// The current function value in the center `center_value`.
    /// Once the master problem is solved the process will send a
    /// [`MasterResponse`] message to the `tx` channel.
    pub fn solve(&mut self, center_value: Real) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::Solve { center_value })
            .map_err(|err| Error::Process(err.into()))
    }

    /// Compresses the model.
    pub fn compress(&mut self) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::Compress)
            .map_err(|err| Error::Process(err.into()))
    }

    /// Sets the new weight of the proximal term in the master problem.
    pub fn set_weight(&mut self, weight: Real) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::SetWeight { weight })
            .map_err(|err| Error::Process(err.into()))
    }

    /// Get the current aggregated primal for a certain subproblem.
    pub fn get_aggregated_primal(&self, subproblem: usize) -> Result<P::Primal, Error<P::Err>> {
        let (tx, rx) = channel();
        self.tx
            .send(MasterTask::GetAggregatedPrimal { subproblem, tx })
            .map_err(|err| Error::Process(err.into()))?;
        rx.recv()
            .map_err(|err| Error::Process(err.into()))?
            .map_err(|err| Error::Master(err.into()))
    }

    /// The main loop of the master process.
    fn master_main(
        master: M,
        master_config: MasterConfig,
        tx: &mut MasterSender<M::Err>,
        rx: ToMasterReceiver<P, M>,
    ) -> Result<(), M::Err> {
        let mut master = PrimalMaster::<_, P::Primal>::new(master);

        // Initialize the master problem.
        master.set_num_subproblems(master_config.num_subproblems)?;
        master.set_vars(
            master_config.num_vars,
            master_config.lower_bounds,
            master_config.upper_bounds,
        )?;

        // The main iteration: wait for new tasks.
        for m in rx {
            match m {
                MasterTask::AddVariables(vars, sgext) => {
                    debug!("master: add {} variables to the subproblem", vars.len());
                    master.add_vars(vars, sgext)?;
                }
                MasterTask::AddMinorant(i, m, primal) => {
                    debug!("master: add minorant to subproblem {}", i);
                    master.add_minorant(i, m, primal)?;
                }
                MasterTask::MoveCenter(alpha, d) => {
                    debug!("master: move center");
                    master.move_center(alpha, &d);
                }
                MasterTask::Compress => {
                    debug!("Compress bundle");
                    master.compress()?;
                }
                MasterTask::Solve { center_value } => {
                    debug!("master: solve with center_value {}", center_value);
                    master.solve(center_value)?;
                    let master_response = MasterResponse {
                        nxt_d: master.get_primopt(),
                        nxt_mod: master.get_primoptval(),
                        sgnorm: master.get_dualoptnorm2().sqrt(),
                        cnt_updates: master.cnt_updates(),
                    };
                    if let Err(err) = tx.send(Ok(master_response)) {
                        warn!("Master process cancelled because of channel error: {}", err);
                        break;
                    }
                }
                MasterTask::SetWeight { weight } => {
                    debug!("master: set weight {}", weight);
                    master.set_weight(weight)?;
                }
                MasterTask::GetAggregatedPrimal { subproblem, tx } => {
                    debug!("master: get aggregated primal for {}", subproblem);
                    if tx.send(master.aggregated_primal(subproblem)).is_err() {
                        warn!("Sending of aggregated primal for {} failed", subproblem);
                    };
                }
            };
        }

        Ok(())
    }
}
Deleted src/parallel/mod.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36




































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! Implementation of a asynchronous parallel proximal bundle method.

mod problem;
pub use self::problem::{
    EvalResult, FirstOrderProblem, ResultSender, SubgradientExtender, Update, UpdateSender, UpdateState,
};

mod solver;
pub use self::solver::Solver;

/// The default bundle solver with general master problem.
pub type DefaultSolver<P> =
    Solver<P, crate::terminator::StandardTerminator, crate::weighter::HKWeighter, crate::FullMasterBuilder>;

/// A bundle solver with a minimal cutting plane model.
pub type NoBundleSolver<P> =
    Solver<P, crate::terminator::StandardTerminator, crate::weighter::HKWeighter, crate::MinimalMasterBuilder>;

mod masterprocess;
Deleted src/parallel/problem.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190






























































































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! An asynchronous first-order oracle.

use crate::{Aggregatable, DVector, Minorant, Real};
use crossbeam::channel::Sender;
use std::sync::Arc;

/// Evaluation result.
///
/// The result of an evaluation is new information to be made
/// available to the solver and the master problem. There are
/// essentially two types of information:
///
///    1. The (exact) function value of a sub-function at some point.
///    2. A minorant of some sub-function.
#[derive(Debug)]
pub enum EvalResult<I, P> {
    /// The objective value at some point.
    ObjectiveValue { index: I, value: Real },
    /// A minorant with an associated primal.
    Minorant { index: I, minorant: Minorant, primal: P },
}

/// Channel to send evaluation results to.
pub type ResultSender<I, P, E> = Sender<Result<EvalResult<I, P>, E>>;

/// Problem update information.
///
/// The solver calls the `update` method of the problem regularly.
/// This method can modify the problem by adding (or moving)
/// variables. The possible updates are encoded in this type.
pub enum Update<I, P, E> {
    /// Add new variables with bounds.
    ///
    /// The initial value of each variable will be the feasible value
    /// closest to 0.
    AddVariables {
        index: I,
        bounds: Vec<(Real, Real)>,
        sgext: Box<SubgradientExtender<P, E>>,
    },
}

/// The subgradient extender is a callback used to update existing minorants
/// given their associated primal data.
pub type SubgradientExtender<P, E> = dyn FnMut(usize, &P, &[usize]) -> Result<DVector, E> + Send;

/// This trait provides information available in the
/// [`FirstOrderProblem::update`] method.
pub trait UpdateState<P> {
    /// Whether the last step was a descent step.
    fn was_descent(&self) -> bool;

    /// Whether the last step was a null step.
    fn was_null(&self) -> bool {
        !self.was_descent()
    }

    /// The (old) current center of stability.
    fn center(&self) -> Arc<DVector>;

    /// The candidate point.
    ///
    /// After a descent step, i.e. if [`UpdateState::was_descent`] is `true`,
    /// this is the new center of stability.
    fn candidate(&self) -> Arc<DVector>;

    /// The current aggregated primal information.
    ///
    /// Return the aggregated primal information for the given subproblem.
    fn aggregated_primal(&self, i: usize) -> P;
}

/// Channel to send problem updates to.
pub type UpdateSender<I, P, E> = Sender<Result<Update<I, P, E>, E>>;

/// Trait for implementing a first-order problem description.
///
/// All computations made by an implementation are supposed to
/// be asynchronous. Hence, the interface is slightly different
/// compared with [`crate::FirstOrderProblem`].
pub trait FirstOrderProblem {
    /// Error raised by this oracle.
    type Err;

    /// The primal information associated with a minorant.
    type Primal: Aggregatable + Send + 'static;

    /// Return the number of variables.
    fn num_variables(&self) -> usize;

    /// Return the lower bounds on the variables.
    ///
    /// If no lower bounds a specified, $-\infty$ is assumed.
    ///
    /// The lower bounds must be less then or equal the upper bounds.
    fn lower_bounds(&self) -> Option<Vec<Real>> {
        None
    }

    /**
     * Return the upper bounds on the variables.
     *
     * If no lower bounds a specified, $+\infty$ is assumed.
     *
     * The upper bounds must be greater than or equal the upper bounds.
     */
    fn upper_bounds(&self) -> Option<Vec<Real>> {
        None
    }

    /// Return the number of subproblems.
    fn num_subproblems(&self) -> usize {
        1
    }

    /// Start background processes.
    ///
    /// This method is called right before the solver starts the solution process.
    /// It can be used to setup any background tasks required for the evaluation
    /// of the subfunctions.
    ///
    /// Remember that background processes should be cleanup when the problem
    /// is deleted (e.g. by implementing the [`Drop`] trait).
    ///
    /// The default implementation does nothing.
    fn start(&mut self) {}

    /// Stop background processes.
    ///
    /// This method is called right after the solver stops the solution process.
    /// It can be used to stop any background tasks required for the evaluation
    /// of the subfunctions.
    ///
    /// A correct implementation of should cleanup all processes from the [`Drop`]
    /// thread.
    ///
    /// The default implementation does nothing.
    fn stop(&mut self) {}

    /// Start the evaluation of the i^th subproblem at the given point.
    ///
    /// The results of the evaluation should be passed to the provided channel.
    /// In order to work correctly, the results must contain (an upper bound on)
    /// the objective value at $y$ as well as at least one subgradient centered
    /// at $y$ eventually.
    fn evaluate<I: Send + Copy + 'static>(
        &mut self,
        i: usize,
        y: Arc<DVector>,
        index: I,
        tx: ResultSender<I, Self::Primal, Self::Err>,
    ) -> Result<(), Self::Err>;

    /// Called to update the problem.
    ///
    /// This method is called regularly by the solver. The problem should send problem update
    /// information (e.g. adding new variables) to the provided channel.
    ///
    /// The updates might be generated asynchronously.
    ///
    /// The default implementation does nothing.
    fn update<I, U>(
        &mut self,
        _state: &U,
        _index: I,
        _tx: UpdateSender<I, Self::Primal, Self::Err>,
    ) -> Result<(), Self::Err>
    where
        U: UpdateState<Self::Primal>,
    {
        Ok(())
    }
}
Deleted src/parallel/solver.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
























































































































































































































































































































































































































































































































































































































































































































































































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! An asynchronous parallel bundle solver.

use crossbeam::channel::{select, unbounded as channel, Receiver, Sender};
use log::{debug, info};
use num_cpus;
use num_traits::Float;
use std::sync::Arc;
use std::time::Instant;
use threadpool::ThreadPool;

use crate::{DVector, Real};

use super::masterprocess::{MasterConfig, MasterProcess, MasterResponse};
use super::problem::{EvalResult, FirstOrderProblem, Update, UpdateState};
use crate::master::{self, MasterProblem};
use crate::solver::{SolverParams, Step};
use crate::terminator::{StandardTerminatable, StandardTerminator, Terminator};
use crate::weighter::{HKWeightable, HKWeighter, Weighter};

/// The default iteration limit.
pub const DEFAULT_ITERATION_LIMIT: usize = 10_000;

/// Error raised by the parallel bundle [`Solver`].
#[derive(Debug)]
pub enum Error<E> {
    /// An error raised when creating a new master problem solver.
    BuildMaster(Box<dyn std::error::Error>),
    /// An error raised by the master problem process.
    Master(Box<dyn std::error::Error>),
    /// The iteration limit has been reached.
    IterationLimit { limit: usize },
    /// An error raised by a subproblem evaluation.
    Evaluation(E),
    /// An error raised subproblem update.
    Update(E),
    /// The dimension of some data is wrong.
    Dimension(String),
    /// Invalid bounds for a variable.
    InvalidBounds { lower: Real, upper: Real },
    /// The value of a variable is outside its bounds.
    ViolatedBounds { lower: Real, upper: Real, value: Real },
    /// The variable index is out of bounds.
    InvalidVariable { index: usize, nvars: usize },
    /// An error occurred in a subprocess.
    Process(Box<dyn std::error::Error>),
    /// A method requiring an initialized solver has been called.
    NotInitialized,
    /// The problem has not been solved yet.
    NotSolved,
}

impl<E> std::fmt::Display for Error<E>
where
    E: std::fmt::Display,
{
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
        use Error::*;
        match self {
            BuildMaster(err) => writeln!(fmt, "Cannot create master problem solver: {}", err),
            Master(err) => writeln!(fmt, "Error in master problem: {}", err),
            IterationLimit { limit } => writeln!(fmt, "The iteration limit has been reached: {}", limit),
            Evaluation(err) => writeln!(fmt, "Error in subproblem evaluation: {}", err),
            Update(err) => writeln!(fmt, "Error in subproblem update: {}", err),
            Dimension(what) => writeln!(fmt, "Wrong dimension for {}", what),
            InvalidBounds { lower, upper } => write!(fmt, "Invalid bounds, lower:{}, upper:{}", lower, upper),
            ViolatedBounds { lower, upper, value } => write!(
                fmt,
                "Violated bounds, lower:{}, upper:{}, value:{}",
                lower, upper, value
            ),
            InvalidVariable { index, nvars } => {
                write!(fmt, "Variable index out of bounds, got:{} must be < {}", index, nvars)
            }
            Process(err) => writeln!(fmt, "Error in subprocess: {}", err),
            NotInitialized => writeln!(fmt, "The solver must be initialized (called Solver::init()?)"),
            NotSolved => writeln!(fmt, "The problem has not been solved yet"),
        }
    }
}

impl<E> std::error::Error for Error<E>
where
    E: std::error::Error + 'static,
{
    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
        use Error::*;
        match self {
            BuildMaster(err) => Some(err.as_ref()),
            Master(err) => Some(err.as_ref()),
            Evaluation(err) => Some(err),
            Process(err) => Some(err.as_ref()),
            _ => None,
        }
    }
}

type ClientSender<P> =
    Sender<std::result::Result<EvalResult<usize, <P as FirstOrderProblem>::Primal>, <P as FirstOrderProblem>::Err>>;

type ClientReceiver<P> =
    Receiver<std::result::Result<EvalResult<usize, <P as FirstOrderProblem>::Primal>, <P as FirstOrderProblem>::Err>>;

/// Parameters for tuning the solver.
pub type Parameters = SolverParams;

pub struct SolverData {
    /// Current center of stability.
    cur_y: DVector,

    /// Function value in the current point.
    cur_val: Real,

    /// Function value at the current candidate.
    nxt_val: Real,

    /// Model value at the current candidate.
    nxt_mod: Real,

    /// The value of the new minorant in the current center.
    new_cutval: Real,

    /// The current expected progress.
    ///
    /// This value is actually `cur_val - nxt_val`. We store it separately only
    /// for debugging purposes because after a descent step `cur_val` will be
    /// changed and we could not see the "old" expected progress anymore that
    /// led to the descent step.
    expected_progress: Real,

    /// Norm of current aggregated subgradient.
    sgnorm: Real,

    /// The currently used master problem weight.
    cur_weight: Real,
}

impl SolverData {
    /// Reset solver data to initial values.
    ///
    /// This means that almost everything is set to +infinity so that
    /// a null-step is forced after the first evaluation.
    fn init(&mut self, y: DVector) {
        self.cur_y = y;
        self.cur_val = Real::infinity();
        self.nxt_val = Real::infinity();
        self.nxt_mod = -Real::infinity();
        self.new_cutval = -Real::infinity();
        self.expected_progress = Real::infinity();
        self.sgnorm = Real::infinity();
        self.cur_weight = 1.0;
    }
}

impl StandardTerminatable for SolverData {
    fn center_value(&self) -> Real {
        self.cur_val
    }

    fn expected_progress(&self) -> Real {
        self.expected_progress
    }
}

impl HKWeightable for SolverData {
    fn current_weight(&self) -> Real {
        self.cur_weight
    }

    fn center(&self) -> &DVector {
        &self.cur_y
    }

    fn center_value(&self) -> Real {
        self.cur_val
    }

    fn candidate_value(&self) -> Real {
        self.nxt_val
    }

    fn candidate_model(&self) -> Real {
        self.nxt_mod
    }

    fn new_cutvalue(&self) -> Real {
        self.new_cutval
    }

    fn sgnorm(&self) -> Real {
        self.sgnorm
    }
}

/// Internal data used during the main iteration loop.
struct IterData {
    /// Maximal number of iterations.
    max_iter: usize,
    cnt_iter: usize,
    cnt_updates: usize,
    nxt_ubs: Vec<Real>,
    cnt_remaining_ubs: usize,
    nxt_cutvals: Vec<Real>,
    cnt_remaining_mins: usize,
    nxt_d: Arc<DVector>,
    nxt_y: Arc<DVector>,
    /// True if the problem has been updated after the last evaluation.
    updated: bool,
}

impl IterData {
    fn new(num_subproblems: usize, num_variables: usize, max_iter: usize) -> Self {
        IterData {
            max_iter,
            cnt_iter: 0,
            cnt_updates: 0,
            nxt_ubs: vec![Real::infinity(); num_subproblems],
            cnt_remaining_ubs: num_subproblems,
            nxt_cutvals: vec![-Real::infinity(); num_subproblems],
            cnt_remaining_mins: num_subproblems,
            nxt_d: Arc::new(dvec![0.0; num_variables]),
            nxt_y: Arc::new(dvec![]),
            updated: true,
        }
    }
}

/// Data providing access for updating the problem.
struct UpdateData<'a, P, M>
where
    P: FirstOrderProblem,
    M: MasterProblem,
{
    /// Type of step.
    step: Step,

    /// Current center of stability.
    cur_y: &'a DVector,

    /// Current candidate.
    nxt_y: &'a Arc<DVector>,

    /// The master process.
    master_proc: &'a MasterProcess<P, M>,
}

impl<'a, P, M> UpdateState<P::Primal> for UpdateData<'a, P, M>
where
    P: FirstOrderProblem,
    P::Err: Into<Box<dyn std::error::Error + Sync + Send>> + 'static,
    M: MasterProblem,
    M::MinorantIndex: std::hash::Hash,
{
    fn was_descent(&self) -> bool {
        self.step == Step::Descent
    }

    fn center(&self) -> Arc<DVector> {
        Arc::new(self.cur_y.clone())
    }

    fn candidate(&self) -> Arc<DVector> {
        self.nxt_y.clone()
    }

    fn aggregated_primal(&self, i: usize) -> P::Primal {
        self.master_proc
            .get_aggregated_primal(i)
            .map_err(|_| "get_aggregated_primal".to_string())
            .expect("Cannot get aggregated primal from master process")
    }
}

/// Implementation of a parallel bundle method.
pub struct Solver<P, T = StandardTerminator, W = HKWeighter, M = crate::FullMasterBuilder>
where
    P: FirstOrderProblem,
    M: master::Builder,
{
    /// Parameters for the solver.
    pub params: Parameters,

    /// Termination predicate.
    pub terminator: T,

    /// Weighter heuristic.
    pub weighter: W,

    /// The threadpool of the solver.
    pub threadpool: ThreadPool,

    /// The master problem builder.
    pub master: M,

    /// The first order problem.
    problem: P,

    /// The algorithm data.
    data: SolverData,

    /// The master problem process.
    master_proc: Option<MasterProcess<P, M::MasterProblem>>,

    /// The channel to receive the evaluation results from subproblems.
    client_tx: Option<ClientSender<P>>,

    /// The channel to receive the evaluation results from subproblems.
    client_rx: Option<ClientReceiver<P>>,

    /// Number of descent steps.
    cnt_descent: usize,

    /// Number of null steps.
    cnt_null: usize,

    /// Number of function evaluation.
    cnt_evals: usize,

    /// Time when the solution process started.
    ///
    /// This is actually the time of the last call to `Solver::init`.
    start_time: Instant,
}

impl<P, T, W, M> Solver<P, T, W, M>
where
    P: FirstOrderProblem,
    P::Err: Into<Box<dyn std::error::Error + Sync + Send>> + 'static,
    T: Terminator<SolverData> + Default,
    W: Weighter<SolverData> + Default,
    M: master::Builder,
    M::MasterProblem: MasterProblem,
    <M::MasterProblem as MasterProblem>::MinorantIndex: std::hash::Hash,
{
    /// Create a new parallel bundle solver.
    pub fn new(problem: P) -> Self
    where
        M: Default,
    {
        Solver {
            params: Parameters::default(),
            terminator: Default::default(),
            weighter: Default::default(),
            problem,
            data: SolverData {
                cur_y: dvec![],
                cur_val: 0.0,
                nxt_val: 0.0,
                nxt_mod: 0.0,
                new_cutval: 0.0,
                expected_progress: 0.0,
                sgnorm: 0.0,
                cur_weight: 1.0,
            },

            threadpool: ThreadPool::with_name("Parallel bundle solver".to_string(), num_cpus::get()),
            master: M::default(),
            master_proc: None,
            client_tx: None,
            client_rx: None,

            cnt_descent: 0,
            cnt_null: 0,
            cnt_evals: 0,

            start_time: Instant::now(),
        }
    }

    /// Create a new parallel bundle solver.
    pub fn with_master(problem: P, master: M) -> Self {
        Solver {
            params: Parameters::default(),
            terminator: Default::default(),
            weighter: Default::default(),
            problem,
            data: SolverData {
                cur_y: dvec![],
                cur_val: 0.0,
                nxt_val: 0.0,
                nxt_mod: 0.0,
                new_cutval: 0.0,
                expected_progress: 0.0,
                sgnorm: 0.0,
                cur_weight: 1.0,
            },

            threadpool: ThreadPool::with_name("Parallel bundle solver".to_string(), num_cpus::get()),
            master,
            master_proc: None,
            client_tx: None,
            client_rx: None,

            cnt_descent: 0,
            cnt_null: 0,
            cnt_evals: 0,

            start_time: Instant::now(),
        }
    }

    /// Return the underlying threadpool.
    ///
    /// In order to use the same threadpool for concurrent processes,
    /// just clone the returned `ThreadPool`.
    pub fn threadpool(&self) -> &ThreadPool {
        &self.threadpool
    }

    /// Set the threadpool.
    ///
    /// This function allows to use a specific threadpool for all processes
    /// spawned by the solver. Note that this does not involve any threads
    /// used by the problem because the solver is not responsible for executing
    /// the evaluation process of the subproblems. However, the problem might
    /// use the same threadpool as the solver.
    pub fn set_threadpool(&mut self, threadpool: ThreadPool) {
        self.threadpool = threadpool;
    }

    /// Return the current problem associated with the solver.
    pub fn problem(&self) -> &P {
        &self.problem
    }

    /// Initialize the solver.
    ///
    /// This will reset the internal data structures so that a new fresh
    /// solution process can be started.
    ///
    /// It will also setup all worker processes.
    ///
    /// This function is automatically called by [`Solver::solve`].
    pub fn init(&mut self) -> Result<(), Error<P::Err>> {
        debug!("Initialize solver");

        let n = self.problem.num_variables();
        let m = self.problem.num_subproblems();

        self.data.init(dvec![0.0; n]);
        self.cnt_descent = 0;
        self.cnt_null = 0;
        self.cnt_evals = 0;

        let (tx, rx) = channel();
        self.client_tx = Some(tx);
        self.client_rx = Some(rx);

        let master_config = MasterConfig {
            num_subproblems: m,
            num_vars: n,
            lower_bounds: self.problem.lower_bounds().map(DVector),
            upper_bounds: self.problem.upper_bounds().map(DVector),
        };

        if master_config
            .lower_bounds
            .as_ref()
            .map(|lb| lb.len() != n)
            .unwrap_or(false)
        {
            return Err(Error::Dimension("lower bounds".to_string()));
        }
        if master_config
            .upper_bounds
            .as_ref()
            .map(|ub| ub.len() != n)
            .unwrap_or(false)
        {
            return Err(Error::Dimension("upper bounds".to_string()));
        }

        debug!("Start master process");
        self.master_proc = Some(MasterProcess::start(
            self.master.build().map_err(|err| Error::BuildMaster(err.into()))?,
            master_config,
            &mut self.threadpool,
        ));

        debug!("Initial problem evaluation");
        // We need an initial evaluation of all oracles for the first center.
        let y = Arc::new(self.data.cur_y.clone());
        for i in 0..m {
            self.problem
                .evaluate(i, y.clone(), i, self.client_tx.clone().unwrap())
                .map_err(Error::Evaluation)?;
        }

        debug!("Initialization complete");

        self.start_time = Instant::now();

        Ok(())
    }

    /// Solve the problem with the default maximal iteration limit [`DEFAULT_ITERATION_LIMIT`].
    pub fn solve(&mut self) -> Result<(), Error<P::Err>> {
        self.solve_with_limit(DEFAULT_ITERATION_LIMIT)
    }

    /// Solve the problem with a maximal iteration limit.
    pub fn solve_with_limit(&mut self, limit: usize) -> Result<(), Error<P::Err>> {
        // First initialize the internal data structures.
        self.init()?;

        if self.solve_iter(limit)? {
            Ok(())
        } else {
            Err(Error::IterationLimit { limit })
        }
    }

    /// Solve the problem but stop after at most `niter` iterations.
    ///
    /// The function returns `Ok(true)` if the termination criterion
    /// has been satisfied. Otherwise it returns `Ok(false)` or an
    /// error code.
    ///
    /// If this function is called again, the solution process is
    /// continued from the previous point. Because of this one *must*
    /// call `init()` before the first call to this function.
    pub fn solve_iter(&mut self, niter: usize) -> Result<bool, Error<P::Err>> {
        debug!("Start solving up to {} iterations", niter);

        let mut itdata = IterData::new(self.problem.num_subproblems(), self.problem.num_variables(), niter);

        loop {
            select! {
                recv(self.client_rx.as_ref().ok_or(Error::NotInitialized)?) -> msg => {
                    let msg = msg
                        .map_err(|err| Error::Process(err.into()))?
                        .map_err(Error::Evaluation)?;
                    if self.handle_client_response(msg, &mut itdata)? {
                        return Ok(false);
                    }
                },
                recv(self.master_proc.as_ref().ok_or(Error::NotInitialized)?.rx) -> msg => {
                    debug!("Receive master response");
                    // Receive result (new candidate) from the master
                    let master_res = msg
                        .map_err(|err| Error::Process(err.into()))?
                        .map_err(|err| Error::Master(err.into()))?;

                    if self.handle_master_response(master_res, &mut itdata)? {
                        return Ok(true);
                    }
                },
            }
        }
    }

    /// Handle a response from a subproblem evaluation.
    ///
    /// The function returns `Ok(true)` if the final iteration count has been reached.
    fn handle_client_response(
        &mut self,
        msg: EvalResult<usize, <P as FirstOrderProblem>::Primal>,
        itdata: &mut IterData,
    ) -> Result<bool, Error<P::Err>> {
        let master = self.master_proc.as_mut().ok_or(Error::NotInitialized)?;
        match msg {
            EvalResult::ObjectiveValue { index, value } => {
                debug!("Receive objective from subproblem {}: {}", index, value);
                if itdata.nxt_ubs[index].is_infinite() {
                    itdata.cnt_remaining_ubs -= 1;
                }
                itdata.nxt_ubs[index] = itdata.nxt_ubs[index].min(value);
            }
            EvalResult::Minorant {
                index,
                mut minorant,
                primal,
            } => {
                debug!("Receive minorant from subproblem {}", index);
                if itdata.nxt_cutvals[index].is_infinite() {
                    itdata.cnt_remaining_mins -= 1;
                }
                // move center of minorant to cur_y
                minorant.move_center(-1.0, &itdata.nxt_d);
                itdata.nxt_cutvals[index] = itdata.nxt_cutvals[index].max(minorant.constant);
                // add minorant to master problem
                master.add_minorant(index, minorant, primal)?;
            }
        }

        if itdata.cnt_remaining_ubs > 0 || itdata.cnt_remaining_mins > 0 {
            // Haven't received data from all subproblems, yet.
            return Ok(false);
        }

        // All subproblems have been evaluated, do a step.
        let nxt_ub = itdata.nxt_ubs.iter().sum::<Real>();
        let descent_bnd = Self::get_descent_bound(self.params.acceptance_factor, &self.data);

        self.data.nxt_val = nxt_ub;
        self.data.new_cutval = itdata.nxt_cutvals.iter().sum::<Real>();

        debug!("Step");
        debug!("  cur_val    ={}", self.data.cur_val);
        debug!("  nxt_mod    ={}", self.data.nxt_mod);
        debug!("  nxt_ub     ={}", nxt_ub);
        debug!("  descent_bnd={}", descent_bnd);

        itdata.updated = false;
        let step;
        if self.data.cur_val.is_infinite() {
            // This is the first evaluation. We effectively get
            // the function value at the current center but
            // we do not have a model estimate yet. Hence, we do not know
            // a good guess for the weight.
            step = Step::Descent;
            self.data.cur_val = nxt_ub;
            self.data.cur_weight = Real::infinity();
            master.set_weight(1.0)?;

            itdata.updated = true;

            debug!("First Step");
            debug!("  cur_val={}", self.data.cur_val);
            debug!("  cur_y={}", self.data.cur_y);
        } else if nxt_ub <= descent_bnd {
            step = Step::Descent;
            self.cnt_descent += 1;

            // Note that we must update the weight *before* we
            // change the internal data, so the old information
            // that caused the descent step is still available.
            self.data.cur_weight = self.weighter.descent_weight(&self.data);
            self.data.cur_y = itdata.nxt_y.as_ref().clone();
            self.data.cur_val = nxt_ub;

            master.move_center(1.0, itdata.nxt_d.clone())?;
            master.set_weight(self.data.cur_weight)?;

            debug!("Descent Step");
            debug!("  dir ={}", itdata.nxt_d);
            debug!("  newy={}", self.data.cur_y);
        } else {
            step = Step::Null;
            self.cnt_null += 1;
            self.data.cur_weight = self.weighter.null_weight(&self.data);
            master.set_weight(self.data.cur_weight)?;
        }

        Self::show_info(
            &self.start_time,
            step,
            &self.data,
            self.cnt_descent,
            self.cnt_null,
            itdata.cnt_updates,
        );
        itdata.cnt_iter += 1;

        // Update problem.
        if Self::update_problem(&mut self.problem, step, &mut self.data, itdata, master)? {
            itdata.updated = true;
        }

        // Compute the new candidate. The main loop will wait for the result of
        // this solution process of the master problem.
        master.solve(self.data.cur_val)?;

        Ok(itdata.cnt_iter >= itdata.max_iter)
    }

    fn handle_master_response(
        &mut self,
        master_res: MasterResponse,
        itdata: &mut IterData,
    ) -> Result<bool, Error<P::Err>> {
        let master = self.master_proc.as_mut().ok_or(Error::NotInitialized)?;

        self.data.nxt_mod = master_res.nxt_mod;
        self.data.sgnorm = master_res.sgnorm;
        self.data.expected_progress = self.data.cur_val - self.data.nxt_mod;
        itdata.cnt_updates = master_res.cnt_updates;

        // If this is the very first solution of the model,
        // we use its result as to make a good guess for the initial weight
        // of the proximal term and resolve.
        if self.data.cur_weight.is_infinite() {
            self.data.cur_weight = self.weighter.initial_weight(&self.data);
            master.set_weight(self.data.cur_weight)?;
            master.solve(self.data.cur_val)?;
            return Ok(false);
        }

        if self.terminator.terminate(&self.data) && !itdata.updated {
            Self::show_info(
                &self.start_time,
                Step::Term,
                &self.data,
                self.cnt_descent,
                self.cnt_null,
                itdata.cnt_updates,
            );
            info!("Termination criterion satisfied");
            return Ok(true);
        }

        // Compress bundle
        master.compress()?;

        // Compute new candidate.
        let mut next_y = dvec![];
        itdata.nxt_d = Arc::new(master_res.nxt_d);
        next_y.add(&self.data.cur_y, &itdata.nxt_d);
        itdata.nxt_y = Arc::new(next_y);

        // Reset evaluation data.
        itdata.nxt_ubs.clear();
        itdata.nxt_ubs.resize(self.problem.num_subproblems(), Real::infinity());
        itdata.cnt_remaining_ubs = self.problem.num_subproblems();
        itdata.nxt_cutvals.clear();
        itdata
            .nxt_cutvals
            .resize(self.problem.num_subproblems(), -Real::infinity());
        itdata.cnt_remaining_mins = self.problem.num_subproblems();

        // Start evaluation of all subproblems at the new candidate.
        let client_tx = self.client_tx.as_ref().ok_or(Error::NotInitialized)?;
        for i in 0..self.problem.num_subproblems() {
            self.problem
                .evaluate(i, itdata.nxt_y.clone(), i, client_tx.clone())
                .map_err(Error::Evaluation)?;
        }
        Ok(false)
    }

    fn update_problem(
        problem: &mut P,
        step: Step,
        data: &mut SolverData,
        itdata: &mut IterData,
        master_proc: &mut MasterProcess<P, M::MasterProblem>,
    ) -> Result<bool, Error<P::Err>> {
        let (update_tx, update_rx) = channel();
        problem
            .update(
                &UpdateData {
                    cur_y: &data.cur_y,
                    nxt_y: &itdata.nxt_y,
                    step,
                    master_proc,
                },
                itdata.cnt_iter,
                update_tx,
            )
            .map_err(Error::Update)?;

        let mut have_update = false;
        for update in update_rx {
            let update = update.map_err(Error::Update)?;
            have_update = true;
            match update {
                Update::AddVariables { bounds, sgext, .. } => {
                    let mut newvars = Vec::with_capacity(bounds.len());
                    for (lower, upper) in bounds {
                        if lower > upper {
                            return Err(Error::InvalidBounds { lower, upper });
                        }
                        let value = if lower > 0.0 {
                            lower
                        } else if upper < 0.0 {
                            upper
                        } else {
                            0.0
                        };
                        //self.bounds.push((lower, upper));
                        newvars.push((None, lower - value, upper - value, value));
                    }
                    if !newvars.is_empty() {
                        // modify moved variables
                        for (index, val) in newvars.iter().filter_map(|v| v.0.map(|i| (i, v.3))) {
                            data.cur_y[index] = val;
                        }

                        // add new variables
                        data.cur_y.extend(newvars.iter().filter(|v| v.0.is_none()).map(|v| v.3));

                        master_proc.add_vars(newvars.iter().map(|v| (v.0, v.1, v.2)).collect(), sgext)?;
                    }
                }
            }
        }

        Ok(have_update)
    }

    /// Return the bound the function value must be below of to enforce a descent step.
    ///
    /// If the oracle guarantees that $f(\bar{y}) \le$ this bound, the
    /// bundle method will perform a descent step.
    ///
    /// This value is $f(\hat{y}) + \varrho \cdot \Delta$ where
    /// $\Delta = f(\hat{y}) - \hat{f}(\bar{y})$ is the expected
    /// progress and $\varrho$ is the `acceptance_factor`.
    fn get_descent_bound(acceptance_factor: Real, data: &SolverData) -> Real {
        data.cur_val - acceptance_factor * (data.cur_val - data.nxt_mod)
    }

    fn show_info(
        start_time: &Instant,
        step: Step,
        data: &SolverData,
        cnt_descent: usize,
        cnt_null: usize,
        cnt_updates: usize,
    ) {
        let time = start_time.elapsed();
        info!(
            "{} {:0>2}:{:0>2}:{:0>2}.{:0>2} {:4} {:4} {:4}{:1}  {:9.4} {:9.4} \
             {:12.6e}({:12.6e}) {:12.6e}",
            if step == Step::Term { "_endit" } else { "endit " },
            time.as_secs() / 3600,
            (time.as_secs() / 60) % 60,
            time.as_secs() % 60,
            time.subsec_nanos() / 10_000_000,
            cnt_descent,
            cnt_descent + cnt_null,
            cnt_updates,
            if step == Step::Descent { "*" } else { " " },
            data.cur_weight,
            data.expected_progress(),
            data.nxt_mod,
            data.nxt_val,
            data.cur_val
        );
    }

    /// Return the aggregated primal of the given subproblem.
    pub fn aggregated_primal(&self, subproblem: usize) -> Result<P::Primal, Error<P::Err>> {
        Ok(self
            .master_proc
            .as_ref()
            .ok_or(Error::NotSolved)?
            .get_aggregated_primal(subproblem)?)
    }
}
Added src/problem.rs.






























































































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! An asynchronous first-order oracle.

use crate::{Aggregatable, DVector, Minorant, Real};
use crossbeam::channel::Sender;
use std::sync::Arc;

/// Evaluation result.
///
/// The result of an evaluation is new information to be made
/// available to the solver and the master problem. There are
/// essentially two types of information:
///
///    1. The (exact) function value of a sub-function at some point.
///    2. A minorant of some sub-function.
#[derive(Debug)]
pub enum EvalResult<I, P> {
    /// The objective value at some point.
    ObjectiveValue { index: I, value: Real },
    /// A minorant with an associated primal.
    Minorant { index: I, minorant: Minorant, primal: P },
}

/// Channel to send evaluation results to.
pub type ResultSender<I, P, E> = Sender<Result<EvalResult<I, P>, E>>;

/// Problem update information.
///
/// The solver calls the `update` method of the problem regularly.
/// This method can modify the problem by adding (or moving)
/// variables. The possible updates are encoded in this type.
pub enum Update<I, P, E> {
    /// Add new variables with bounds.
    ///
    /// The initial value of each variable will be the feasible value
    /// closest to 0.
    AddVariables {
        index: I,
        bounds: Vec<(Real, Real)>,
        sgext: Box<SubgradientExtender<P, E>>,
    },
}

/// The subgradient extender is a callback used to update existing minorants
/// given their associated primal data.
pub type SubgradientExtender<P, E> = dyn FnMut(usize, &P, &[usize]) -> Result<DVector, E> + Send;

/// This trait provides information available in the
/// [`FirstOrderProblem::update`] method.
pub trait UpdateState<P> {
    /// Whether the last step was a descent step.
    fn was_descent(&self) -> bool;

    /// Whether the last step was a null step.
    fn was_null(&self) -> bool {
        !self.was_descent()
    }

    /// The (old) current center of stability.
    fn center(&self) -> Arc<DVector>;

    /// The candidate point.
    ///
    /// After a descent step, i.e. if [`UpdateState::was_descent`] is `true`,
    /// this is the new center of stability.
    fn candidate(&self) -> Arc<DVector>;

    /// The current aggregated primal information.
    ///
    /// Return the aggregated primal information for the given subproblem.
    fn aggregated_primal(&self, i: usize) -> P;
}

/// Channel to send problem updates to.
pub type UpdateSender<I, P, E> = Sender<Result<Update<I, P, E>, E>>;

/// Trait for implementing a first-order problem description.
///
/// All computations made by an implementation are supposed to
/// be asynchronous. Hence, the interface is slightly different
/// compared with [`crate::FirstOrderProblem`].
pub trait FirstOrderProblem {
    /// Error raised by this oracle.
    type Err;

    /// The primal information associated with a minorant.
    type Primal: Aggregatable + Send + 'static;

    /// Return the number of variables.
    fn num_variables(&self) -> usize;

    /// Return the lower bounds on the variables.
    ///
    /// If no lower bounds a specified, $-\infty$ is assumed.
    ///
    /// The lower bounds must be less then or equal the upper bounds.
    fn lower_bounds(&self) -> Option<Vec<Real>> {
        None
    }

    /**
     * Return the upper bounds on the variables.
     *
     * If no lower bounds a specified, $+\infty$ is assumed.
     *
     * The upper bounds must be greater than or equal the upper bounds.
     */
    fn upper_bounds(&self) -> Option<Vec<Real>> {
        None
    }

    /// Return the number of subproblems.
    fn num_subproblems(&self) -> usize {
        1
    }

    /// Start background processes.
    ///
    /// This method is called right before the solver starts the solution process.
    /// It can be used to setup any background tasks required for the evaluation
    /// of the subfunctions.
    ///
    /// Remember that background processes should be cleanup when the problem
    /// is deleted (e.g. by implementing the [`Drop`] trait).
    ///
    /// The default implementation does nothing.
    fn start(&mut self) {}

    /// Stop background processes.
    ///
    /// This method is called right after the solver stops the solution process.
    /// It can be used to stop any background tasks required for the evaluation
    /// of the subfunctions.
    ///
    /// A correct implementation of should cleanup all processes from the [`Drop`]
    /// thread.
    ///
    /// The default implementation does nothing.
    fn stop(&mut self) {}

    /// Start the evaluation of the i^th subproblem at the given point.
    ///
    /// The results of the evaluation should be passed to the provided channel.
    /// In order to work correctly, the results must contain (an upper bound on)
    /// the objective value at $y$ as well as at least one subgradient centered
    /// at $y$ eventually.
    fn evaluate<I: Send + Copy + 'static>(
        &mut self,
        i: usize,
        y: Arc<DVector>,
        index: I,
        tx: ResultSender<I, Self::Primal, Self::Err>,
    ) -> Result<(), Self::Err>;

    /// Called to update the problem.
    ///
    /// This method is called regularly by the solver. The problem should send problem update
    /// information (e.g. adding new variables) to the provided channel.
    ///
    /// The updates might be generated asynchronously.
    ///
    /// The default implementation does nothing.
    fn update<I, U>(
        &mut self,
        _state: &U,
        _index: I,
        _tx: UpdateSender<I, Self::Primal, Self::Err>,
    ) -> Result<(), Self::Err>
    where
        U: UpdateState<Self::Primal>,
    {
        Ok(())
    }
}
Changes to src/solver.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112

113
114
115
116
117
118
119
120
121
122
123

124
125
126

127
128
129

130
131
132
133
134
135

136
137
138

139
140
141

142
143
144

145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278

279
280
281
282
283

284
285
286
287
288
289

290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403

404
405
406

407
408
409

410
411
412

413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438

439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540


541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824

825
826

827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937


938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
















































































































1











2



3



4






5



6



7



8






































































































































9





10






11


















































































































12



13



14



15


























16






































































































17
18









































































































































































































































































19


















20


21















































































































22
23



































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
+
-
-
-
+
-
-
-
+
-
-
-
-
-
-
+
-
-
-
+
-
-
-
+
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
-
-
-
-
+
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
-
-
+
-
-
-
+
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-

-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
// Copyright (c) 2016, 2017, 2018, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! The main bundle method solver.

use crate::{Aggregatable, DVector, Real};
use crate::{Evaluation, FirstOrderProblem, Update};

use crate::master::{self, MasterProblem};
use crate::terminator::{StandardTerminatable, StandardTerminator, Terminator};
use crate::weighter::{HKWeightable, HKWeighter, Weighter};

use log::{debug, info, warn};

use std::error::Error;
use std::f64::{INFINITY, NEG_INFINITY};
use std::fmt;
use std::mem::swap;
use std::time::Instant;

/// A solver error.
#[derive(Debug)]
pub enum SolverError<E, MErr> {
    /// An error occurred during oracle evaluation.
    Evaluation(E),
    /// An error occurred during oracle update.
    Update(E),
    /// An error has been raised by the master problem.
    BuildMaster(Box<dyn Error>),
    /// An error has been raised by the master problem.
    Master(MErr),
    /// The oracle did not return a minorant.
    NoMinorant,
    /// The dimension of some data is wrong.
    Dimension,
    /// Some parameter has an invalid value.
    Parameter(ParameterError),
    /// The lower bound of a variable is larger than the upper bound.
    InvalidBounds { lower: Real, upper: Real },
    /// The value of a variable is outside its bounds.
    ViolatedBounds { lower: Real, upper: Real, value: Real },
    /// The variable index is out of bounds.
    InvalidVariable { index: usize, nvars: usize },
    /// Iteration limit has been reached.
    IterationLimit { limit: usize },
}

impl<E, MErr> fmt::Display for SolverError<E, MErr>
where
    E: fmt::Display,
    MErr: fmt::Display,
{
    fn fmt(&self, fmt: &mut fmt::Formatter) -> std::result::Result<(), fmt::Error> {
        use self::SolverError::*;
        match self {
            Evaluation(err) => write!(fmt, "Oracle evaluation failed: {}", err),
            Update(err) => write!(fmt, "Oracle update failed: {}", err),
            BuildMaster(err) => write!(fmt, "Creation of master problem failed: {}", err),
            Master(err) => write!(fmt, "Master problem failed: {}", err),
            NoMinorant => write!(fmt, "The oracle did not return a minorant"),
            Dimension => write!(fmt, "Dimension of lower bounds does not match number of variables"),
            Parameter(msg) => write!(fmt, "Parameter error: {}", msg),
            InvalidBounds { lower, upper } => write!(fmt, "Invalid bounds, lower:{}, upper:{}", lower, upper),
            ViolatedBounds { lower, upper, value } => write!(
                fmt,
                "Violated bounds, lower:{}, upper:{}, value:{}",
                lower, upper, value
            ),
            InvalidVariable { index, nvars } => {
                write!(fmt, "Variable index out of bounds, got:{} must be < {}", index, nvars)
            }
            IterationLimit { limit } => write!(fmt, "The iteration limit of {} has been reached.", limit),
        }
    }
}

impl<E, MErr> Error for SolverError<E, MErr>
where
    E: Error + 'static,
    MErr: Error + 'static,
{
    fn source(&self) -> Option<&(dyn Error + 'static)> {
        match self {
            SolverError::Evaluation(err) => Some(err),
            SolverError::Update(err) => Some(err),
            SolverError::Master(err) => Some(err),
            SolverError::BuildMaster(err) => Some(err.as_ref()),
            _ => None,
        }
    }
}

impl<E, MErr> From<MErr> for SolverError<E, MErr> {
    fn from(err: MErr) -> SolverError<E, MErr> {
        SolverError::Master(err)
    }
}

/**
/*
 * The current state of the bundle method.
 *
 * Captures the current state of the bundle method during the run of
 * the algorithm. This state is passed to certain callbacks like
 * Terminator or Weighter so that they can compute their result
 * depending on the state.
 */
pub struct BundleState<'a> {
    /// Current center of stability.
    pub cur_y: &'a DVector,

 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
    /// Function value in current center.
    pub cur_val: Real,

 *
    /// Current candidate, point of last evaluation.
    pub nxt_y: &'a DVector,

 * This program is free software: you can redistribute it and/or
    /// Function value in candidate.
    pub nxt_val: Real,

    /// Model value in candidate.
    pub nxt_mod: Real,

 * modify it under the terms of the GNU General Public License as
    /// Cut value of new subgradient in current center.
    pub new_cutval: Real,

 * published by the Free Software Foundation, either version 3 of the
    /// The current aggregated subgradient norm.
    pub sgnorm: Real,

 * License, or (at your option) any later version.
    /// The expected progress of the current model.
    pub expected_progress: Real,

 *
    /// Currently used weight of quadratic term.
    pub weight: Real,

    /**
     * The type of the current step.
     *
     * If the current step is Step::Term, the weighter should be reset.
     */
    pub step: Step,
}

macro_rules! current_state {
    ($slf:ident, $step:expr) => {
        BundleState {
            cur_y: &$slf.cur_y,
            cur_val: $slf.cur_val,
            nxt_y: &$slf.nxt_y,
            nxt_mod: $slf.nxt_mod,
            nxt_val: $slf.nxt_val,
            new_cutval: $slf.new_cutval,
            sgnorm: $slf.sgnorm,
            weight: $slf.master.weight(),
            step: $step,
            expected_progress: $slf.expected_progress,
        }
    };
}

impl<'a> HKWeightable for BundleState<'a> {
    fn current_weight(&self) -> Real {
        self.weight
    }

    fn center(&self) -> &DVector {
        self.cur_y
    }

    fn center_value(&self) -> Real {
        self.cur_val
    }

    fn candidate_value(&self) -> Real {
        self.nxt_val
    }

    fn candidate_model(&self) -> Real {
        self.nxt_mod
    }

    fn new_cutvalue(&self) -> Real {
        self.new_cutval
    }

    fn sgnorm(&self) -> Real {
        self.sgnorm
    }
}

impl<'a> StandardTerminatable for BundleState<'a> {
    fn expected_progress(&self) -> Real {
        self.expected_progress
    }

    fn center_value(&self) -> Real {
        self.cur_val
    }
}

/// An invalid value for some parameter has been passes.
#[derive(Debug)]
pub struct ParameterError(String);

impl fmt::Display for ParameterError {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> std::result::Result<(), fmt::Error> {
        write!(fmt, "{}", self.0)
    }
}

impl Error for ParameterError {}

/// Parameters for tuning the solver.
#[derive(Clone, Debug)]
pub struct SolverParams {
    /// Maximal individual bundle size.
    pub max_bundle_size: usize,

    /**
     * Factor for doing a descent step.
     *
     * If the proportion of actual decrease to predicted decrease is
     * at least that high, a descent step will be done.
     *
     * Must be in (0,1).
     */
    pub acceptance_factor: Real,

    /**
     * Factor for doing a null step.
     *
     * Factor that guarantees a null step. This factor is used to
     * compute a bound for the function oracle, that guarantees a null
     * step. If the function is evaluated by some iterative method that ensures
     * an objective value that is at least as large as this bound, the
     * oracle can stop returning an appropriate $\varepsilon$-subgradient.
     *
     * Must be in (0, acceptance_factor).
     */
    pub nullstep_factor: Real,
}

impl SolverParams {
    /// Verify that all parameters are valid.
    fn check(&self) -> std::result::Result<(), ParameterError> {
        if self.max_bundle_size < 2 {
            Err(ParameterError(format!(
                "max_bundle_size must be >= 2 (got: {})",
                self.max_bundle_size
            )))
        } else if self.acceptance_factor <= 0.0 || self.acceptance_factor >= 1.0 {
            Err(ParameterError(format!(
                "acceptance_factor must be in (0,1) (got: {})",
                self.acceptance_factor
            )))
        } else if self.nullstep_factor <= 0.0 || self.nullstep_factor > self.acceptance_factor {
            Err(ParameterError(format!(
                "nullstep_factor must be in (0,acceptance_factor] (got: {}, acceptance_factor:{})",
                self.nullstep_factor, self.acceptance_factor
            )))
        } else {
            Ok(())
        }
    }
}

 * This program is distributed in the hope that it will be useful, but
impl Default for SolverParams {
    fn default() -> SolverParams {
        SolverParams {
            max_bundle_size: 50,

 * WITHOUT ANY WARRANTY; without even the implied warranty of
            nullstep_factor: 0.1,
            acceptance_factor: 0.1,
        }
    }
}

 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
/// The step type that has been performed.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Step {
    /// A null step has been performed.
    Null,
    /// A descent step has been performed.
    Descent,
    /// No step but the algorithm has been terminated.
    Term,
}

/// Information about a minorant.
#[derive(Debug, Clone)]
struct MinorantInfo {
    /// The minorant's index in the master problem
    index: usize,
    /// Current multiplier.
    multiplier: Real,
}

/// Information about the last iteration.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum IterationInfo {
    NewMinorantTooHigh { new: Real, old: Real },
    UpperBoundNullStep,
    ShallowCut,
}

/// State information for the update callback.
pub struct UpdateState<'a, Pr: 'a> {
    /// Current model minorants.
    minorants: &'a [Vec<MinorantInfo>],
    /// The primals.
    primals: &'a Vec<Option<Pr>>,
    /// The last step type.
    pub step: Step,
    /// Iteration information.
    pub iteration_info: &'a [IterationInfo],
    /// The current candidate. If the step was a descent step, this is
    /// the new center.
    pub nxt_y: &'a DVector,
    /// The center. IF the step was a descent step, this is the old
    /// center.
    pub cur_y: &'a DVector,
}

impl<'a, Pr: 'a> UpdateState<'a, Pr> {
    pub fn aggregated_primals(&self, subproblem: usize) -> Vec<(Real, &Pr)> {
        self.minorants[subproblem]
            .iter()
            .map(|m| (m.multiplier, self.primals[m.index].as_ref().unwrap()))
            .collect()
    }

    /// Return the last primal for a given subproblem.
    ///
    /// This is the last primal generated by the oracle.
    pub fn last_primal(&self, fidx: usize) -> Option<&Pr> {
        self.minorants[fidx].last().and_then(|m| self.primals[m.index].as_ref())
    }
}

/// The default builder.
pub type FullMasterBuilder = master::boxed::Builder<master::cpx::Builder>;

/**
 * Implementation of a bundle method.
 */
pub struct Solver<P, T = StandardTerminator, W = HKWeighter, M = FullMasterBuilder>
where
    P: FirstOrderProblem,
    M: master::Builder,
{
    /// The first order problem description.
    problem: P,

    /// The solver parameter.
    pub params: SolverParams,

    /// Termination predicate.
    pub terminator: T,

    /// Weighter heuristic.
    pub weighter: W,

    /// Lower and upper bounds of all variables.
    bounds: Vec<(Real, Real)>,

    /// Current center of stability.
    cur_y: DVector,

    /// Function value in current point.
    cur_val: Real,

    /// Model value in current point.
    cur_mod: Real,

    /// Vector of subproblem function values in current point.
    cur_vals: DVector,

    /// Vector of model values in current point.
    cur_mods: DVector,

    /**
     * Whether the data of the current center is valid.
     *
     * This variable is set to false of the problem data changes so
     * the function is re-evaluated at the center.
     */
    cur_valid: bool,

    /// Direction from current center to candidate.
    nxt_d: DVector,

 * General Public License for more details.
    /// Current candidate point.
    nxt_y: DVector,

 *
    /// (Upper bound on) function value in candidate.
    nxt_val: Real,

 * You should have received a copy of the GNU General Public License
    /// Model value in candidate.
    nxt_mod: Real,

 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
    /// DVector of subproblem function values in candidate.
    nxt_vals: DVector,

    /// Vector of model values in candidate point.
    nxt_mods: DVector,

    /// Cut value of new subgradient in current center.
    new_cutval: Real,

    /// Norm of current aggregated subgradient.
    sgnorm: Real,

    /// Expected progress.
    expected_progress: Real,

    /// Number of descent steps.
    cnt_descent: usize,

    /// Number of null steps.
    cnt_null: usize,

    /**
     * Time when the solution process started.
     *
     * This is actually the time of the last call to `Solver::init`.
     */
 */
    start_time: Instant,

    /// The master problem.
    master: M::MasterProblem,

    /// The active minorant indices for each subproblem.
    minorants: Vec<Vec<MinorantInfo>>,

    /// The primals associated with each global minorant index.
    primals: Vec<Option<P::Primal>>,

    /// Accumulated information about the last iteration.
    iterinfos: Vec<IterationInfo>,
}

pub type Result<T, P, M> = std::result::Result<
    T,
    SolverError<<P as FirstOrderProblem>::Err, <<M as master::Builder>::MasterProblem as MasterProblem>::Err>,
>;

impl<P, T, W, M> Solver<P, T, W, M>
where
    P: FirstOrderProblem,
    P::Err: Into<Box<dyn std::error::Error + Sync + Send>>,
    T: for<'a> Terminator<BundleState<'a>> + Default,
    W: for<'a> Weighter<BundleState<'a>> + Default,
    M: master::Builder + Default,
    M::MasterProblem: MasterProblem<MinorantIndex = usize>,
{
    /**
     * Create a new solver for the given problem.
     *
     * Note that the solver owns the problem, so you cannot use the
     * same problem description elsewhere as long as it is assigned to
     * the solver. However, it is possible to get a reference to the
     * internally stored problem using `Solver::problem()`.
     */
    #[allow(clippy::type_complexity)]
    pub fn new_params(problem: P, params: SolverParams) -> Result<Solver<P, T, W, M>, P, M> {
        Ok(Solver {
            problem,
            params,
            terminator: T::default(),
            weighter: W::default(),
            bounds: vec![],
            cur_y: dvec![],
            cur_val: 0.0,
            cur_mod: 0.0,
            cur_vals: dvec![],
            cur_mods: dvec![],
            cur_valid: false,
            nxt_d: dvec![],
            nxt_y: dvec![],
            nxt_val: 0.0,
            nxt_mod: 0.0,
            nxt_vals: dvec![],
            nxt_mods: dvec![],
            new_cutval: 0.0,
            sgnorm: 0.0,
            expected_progress: 0.0,
            cnt_descent: 0,
            cnt_null: 0,
            start_time: Instant::now(),
            master: M::default().build().map_err(|e| SolverError::BuildMaster(e.into()))?,
            minorants: vec![],
            primals: vec![],
            iterinfos: vec![],
        })
    }

    /// A new solver with default parameter.
    #[allow(clippy::type_complexity)]
    pub fn new(problem: P) -> Result<Solver<P, T, W, M>, P, M> {
        Solver::new_params(problem, SolverParams::default())
    }

    /**
     * Set the first order problem description associated with this
     * solver.
     *
     * Note that the solver owns the problem, so you cannot use the
     * same problem description elsewhere as long as it is assigned to
     * the solver. However, it is possible to get a reference to the
     * internally stored problem using `Solver::problem()`.
     */
    pub fn set_problem(&mut self, problem: P) {
        self.problem = problem;
    }

    /// Returns a reference to the solver's current problem.
    pub fn problem(&self) -> &P {
        &self.problem
    }

    /// Initialize the solver.
    pub fn init(&mut self) -> Result<(), P, M> {
        self.params.check().map_err(SolverError::Parameter)?;
        if self.cur_y.len() != self.problem.num_variables() {
            self.cur_valid = false;
            self.cur_y.init0(self.problem.num_variables());
        }


//! The basic solver implementation.
        let lb = self.problem.lower_bounds();
        let ub = self.problem.upper_bounds();
        self.bounds.clear();
        self.bounds.reserve(self.cur_y.len());
        for i in 0..self.cur_y.len() {
            let lb_i = lb.as_ref().map(|x| x[i]).unwrap_or(NEG_INFINITY);
            let ub_i = ub.as_ref().map(|x| x[i]).unwrap_or(INFINITY);
            if lb_i > ub_i {
                return Err(SolverError::InvalidBounds {
                    lower: lb_i,
                    upper: ub_i,
                });
            }
            if self.cur_y[i] < lb_i {
                self.cur_valid = false;
                self.cur_y[i] = lb_i;
            } else if self.cur_y[i] > ub_i {
                self.cur_valid = false;
                self.cur_y[i] = ub_i;
            }
            self.bounds.push((lb_i, ub_i));
        }

        let m = self.problem.num_subproblems();
        self.cur_vals.init0(m);
        self.cur_mods.init0(m);
        self.nxt_vals.init0(m);
        self.nxt_mods.init0(m);

        self.start_time = Instant::now();

        Ok(())
    }

    /// Solve the problem with at most 10_000 iterations.
    ///
    /// Use `solve_with_limit` for an explicit iteration limit.
    pub fn solve(&mut self) -> Result<(), P, M> {
        const LIMIT: usize = 10_000;
        self.solve_with_limit(LIMIT)
    }

    /// Solve the problem with explicit iteration limit.
    pub fn solve_with_limit(&mut self, iter_limit: usize) -> Result<(), P, M> {
        // First initialize the internal data structures.
        self.init()?;

        if self.solve_iter(iter_limit)? {
            Ok(())
        } else {
            Err(SolverError::IterationLimit { limit: iter_limit })
        }
    }

    /// Solve the problem but stop after `niter` iterations.
    ///
    /// The function returns `Ok(true)` if the termination criterion
    /// has been satisfied. Otherwise it returns `Ok(false)` or an
    /// error code.
    ///
    /// If this function is called again, the solution process is
    /// continued from the previous point. Because of this one must
    /// call `init()` before the first call to this function.
    pub fn solve_iter(&mut self, niter: usize) -> Result<bool, P, M> {
        for _ in 0..niter {
            let mut term = self.step()?;
            let changed = self.update_problem(term)?;
            // do not stop if the problem has been changed
            if changed && term == Step::Term {
                term = Step::Null
            }
            self.show_info(term);
            if term == Step::Term {
                return Ok(true);
            }
        }
        Ok(false)
    }

    /// Called to update the problem.
    ///
    /// Calling this function typically triggers the problem to
    /// separate new constraints depending on the current solution.
    fn update_problem(&mut self, term: Step) -> Result<bool, P, M> {
        let updates = {
            let state = UpdateState {
                minorants: &self.minorants,
                primals: &self.primals,
                step: term,
                iteration_info: &self.iterinfos,
                // this is a dirty trick: when updating the center, we
                // simply swapped the `cur_*` fields with the `nxt_*`
                // fields
                cur_y: if term == Step::Descent {
                    &self.nxt_y
                } else {
                    &self.cur_y
                },
                nxt_y: if term == Step::Descent {
                    &self.cur_y
                } else {
                    &self.nxt_y
                },
            };
            self.problem.update(&state).map_err(SolverError::Update)?
        };

        let mut newvars = Vec::with_capacity(updates.len());
        for u in updates {
            match u {
                Update::AddVariable { lower, upper } => {
                    if lower > upper {
                        return Err(SolverError::InvalidBounds { lower, upper });
                    }
                    let value = if lower > 0.0 {
                        lower
                    } else if upper < 0.0 {
                        upper
                    } else {
                        0.0
                    };
                    self.bounds.push((lower, upper));
                    newvars.push((None, lower - value, upper - value, value));
                }
                Update::AddVariableValue { lower, upper, value } => {
                    if lower > upper {
                        return Err(SolverError::InvalidBounds { lower, upper });
                    }
                    if value < lower || value > upper {
                        return Err(SolverError::ViolatedBounds { lower, upper, value });
                    }
                    self.bounds.push((lower, upper));
                    newvars.push((None, lower - value, upper - value, value));
                }
                Update::MoveVariable { index, value } => {
                    if index >= self.bounds.len() {
                        return Err(SolverError::InvalidVariable {
                            index,
                            nvars: self.bounds.len(),
                        });
                    }
                    let (lower, upper) = self.bounds[index];
                    if value < lower || value > upper {
                        return Err(SolverError::ViolatedBounds { lower, upper, value });
                    }
                    newvars.push((Some(index), lower - value, upper - value, value));
                }
            }
        }

        if !newvars.is_empty() {
            let problem = &mut self.problem;
            let primals = &self.primals;
            self.master.add_vars(
                &newvars.iter().map(|v| (v.0, v.1, v.2)).collect::<Vec<_>>(),
                &mut |fidx, minidx, vars| {
                    problem
                        .extend_subgradient(fidx, primals[minidx].as_ref().unwrap(), vars)
                        .map(DVector)
                        .map_err(|e| e.into())
                },
            )?;
            // modify moved variables
            for (index, val) in newvars.iter().filter_map(|v| v.0.map(|i| (i, v.3))) {
                self.cur_y[index] = val;
                self.nxt_y[index] = val;
                self.nxt_d[index] = 0.0;
            }
            // add new variables
            self.cur_y.extend(newvars.iter().filter(|v| v.0.is_none()).map(|v| v.3));
            self.nxt_y.extend(newvars.iter().filter(|v| v.0.is_none()).map(|v| v.3));
            self.nxt_d.resize(self.nxt_y.len(), 0.0);
            Ok(true)
        } else {
            Ok(false)
        }
    }

    /// Return the current aggregated primal information for a subproblem.
    ///
    /// This function returns all currently used minorants $x_i$ along
    /// with their coefficients $\alpha_i$. The aggregated primal can
    /// be computed by combining the minorants $\bar{x} =
    /// \sum_{i=1}\^m \alpha_i x_i$.
    pub fn aggregated_primals(&self, subproblem: usize) -> P::Primal {
        Aggregatable::combine(
            self.minorants[subproblem]
                .iter()
                .map(|m| (m.multiplier, self.primals[m.index].as_ref().unwrap())),
        )
    }

    fn show_info(&self, step: Step) {
        let time = self.start_time.elapsed();
        info!(
            "{} {:0>2}:{:0>2}:{:0>2}.{:0>2} {:4} {:4} {:4}{:1}  {:9.4} {:9.4} \
             {:12.6e}({:12.6e}) {:12.6e}",
            if step == Step::Term { "_endit" } else { "endit " },
            time.as_secs() / 3600,
            (time.as_secs() / 60) % 60,
            time.as_secs() % 60,
            time.subsec_nanos() / 10_000_000,
            self.cnt_descent,
            self.cnt_descent + self.cnt_null,
            self.master.cnt_updates(),
            if step == Step::Descent { "*" } else { " " },
            self.master.weight(),
            self.expected_progress,
            self.nxt_mod,
            self.nxt_val,
            self.cur_val
        );
    }

    /// Return the current center of stability.
    pub fn center(&self) -> &[Real] {
        &self.cur_y
    }

    /// Return the last candidate point.
    pub fn candidate(&self) -> &[Real] {
        &self.nxt_y
    }

    /**
     * Initializes the master problem.
     *
     * The oracle is evaluated once at the initial center and the
     * master problem is initialized with the returned subgradient
     * information.
     */
    fn init_master(&mut self) -> Result<(), P, M> {
        let m = self.problem.num_subproblems();

        let lb = self.problem.lower_bounds().map(DVector);
        let ub = self.problem.upper_bounds().map(DVector);

        if lb
            .as_ref()
            .map(|lb| lb.len() != self.problem.num_variables())
            .unwrap_or(false)
        {
            return Err(SolverError::Dimension);
        }
        if ub
            .as_ref()
            .map(|ub| ub.len() != self.problem.num_variables())
            .unwrap_or(false)
        {
            return Err(SolverError::Dimension);
        }

        self.master.set_num_subproblems(m)?;
        self.master.set_vars(self.problem.num_variables(), lb, ub)?;

        self.minorants = (0..m).map(|_| vec![]).collect();

        self.cur_val = 0.0;
        for i in 0..m {
            let result = self
                .problem
                .evaluate(i, &self.cur_y, INFINITY, 0.0)
                .map_err(SolverError::Evaluation)?;
            self.cur_vals[i] = result.objective();
            self.cur_val += self.cur_vals[i];

            let mut minorants = result.into_iter();
            if let Some((minorant, primal)) = minorants.next() {
                self.cur_mods[i] = minorant.constant;
                self.cur_mod += self.cur_mods[i];
                let minidx = self.master.add_minorant(i, minorant)?;
                self.minorants[i].push(MinorantInfo {
                    index: minidx,
                    multiplier: 0.0,
                });
                if minidx >= self.primals.len() {
                    self.primals.resize_with(minidx + 1, || None);
                }
                self.primals[minidx] = Some(primal);
            } else {
                return Err(SolverError::NoMinorant);
            }
        }

pub mod sync;
        self.cur_valid = true;

pub use sync::{DefaultSolver, NoBundleSolver};
        // Solve the master problem once to compute the initial
        // subgradient.
        //
        // We could compute that subgradient directly by
        // adding up the initial minorants, but this would not include
        // the eta terms. However, this is a heuristic anyway because
        // we assume an initial weight of 1.0, which, in general, will
        // *not* be the initial weight for the first iteration.
        self.master.set_weight(1.0)?;
        self.master.solve(self.cur_val)?;
        self.sgnorm = self.master.get_dualoptnorm2().sqrt();

        // Compute the real initial weight.
        let state = current_state!(self, Step::Term);
        let new_weight = self.weighter.initial_weight(&state);
        self.master.set_weight(new_weight)?;

        debug!("Init master completed");

        Ok(())
    }

    /// Solve the model (i.e. master problem) to compute the next candidate.
    fn solve_model(&mut self) -> Result<(), P, M> {
        self.master.solve(self.cur_val)?;
        self.nxt_d = self.master.get_primopt();
        self.nxt_y.add(&self.cur_y, &self.nxt_d);
        self.nxt_mod = self.master.get_primoptval();
        self.sgnorm = self.master.get_dualoptnorm2().sqrt();
        self.expected_progress = self.cur_val - self.nxt_mod;

        // update multiplier from master solution
        for i in 0..self.problem.num_subproblems() {
            for m in &mut self.minorants[i] {
                m.multiplier = self.master.multiplier(m.index);
            }
        }

        debug!("Model result");
        debug!("  cur_val ={}", self.cur_val);
        debug!("  nxt_mod ={}", self.nxt_mod);
        debug!("  expected={}", self.expected_progress);
        Ok(())
    }

    /// Reduce size of bundle.
    fn compress_bundle(&mut self) -> Result<(), P, M> {
        for i in 0..self.problem.num_subproblems() {
            let n = self.master.num_minorants(i);
            if n >= self.params.max_bundle_size {
                // aggregate minorants with smallest coefficients
                self.minorants[i].sort_by_key(|m| -((1e6 * m.multiplier) as isize));
                let aggr = self.minorants[i].split_off(self.params.max_bundle_size - 2);
                let aggr_sum = aggr.iter().map(|m| m.multiplier).sum();
                let (aggr_mins, aggr_primals): (Vec<_>, Vec<_>) = aggr
                    .into_iter()
                    .map(|m| (m.index, self.primals[m.index].take().unwrap()))
                    .unzip();
                let (aggr_min, aggr_coeffs) = self.master.aggregate(i, &aggr_mins)?;
                // append aggregated minorant
                self.minorants[i].push(MinorantInfo {
                    index: aggr_min,
                    multiplier: aggr_sum,
                });
                self.primals[aggr_min] = Some(Aggregatable::combine(aggr_coeffs.into_iter().zip(&aggr_primals)));
            }
        }
        Ok(())
    }

    /// Perform a descent step.
    fn descent_step(&mut self) -> Result<(), P, M> {
        let new_weight = self.weighter.descent_weight(&current_state!(self, Step::Descent));
        self.master.set_weight(new_weight)?;
        self.cnt_descent += 1;
        swap(&mut self.cur_y, &mut self.nxt_y);
        swap(&mut self.cur_val, &mut self.nxt_val);
        swap(&mut self.cur_mod, &mut self.nxt_mod);
        swap(&mut self.cur_vals, &mut self.nxt_vals);
        swap(&mut self.cur_mods, &mut self.nxt_mods);
        self.master.move_center(1.0, &self.nxt_d);
        debug!("Descent Step");
        debug!("  dir ={}", self.nxt_d);
        debug!("  newy={}", self.cur_y);
        Ok(())
    }

    /// Perform a null step.
    fn null_step(&mut self) -> Result<(), P, M> {
        let new_weight = self.weighter.null_weight(&current_state!(self, Step::Null));
        self.master.set_weight(new_weight)?;
        self.cnt_null += 1;
        debug!("Null Step");
        Ok(())
    }

    /// Perform one bundle iteration.
    #[allow(clippy::collapsible_if)]
    pub fn step(&mut self) -> Result<Step, P, M> {
        self.iterinfos.clear();

        if !self.cur_valid {
            // current point needs new evaluation
            self.init_master()?;
        }

        self.solve_model()?;
        if self.terminator.terminate(&current_state!(self, Step::Term)) {
            return Ok(Step::Term);
        }


mod masterprocess;
        let m = self.problem.num_subproblems();
        let descent_bnd = self.get_descent_bound();
        let nullstep_bnd = if m == 1 { self.get_nullstep_bound() } else { INFINITY };
        let relprec = if m == 1 { self.get_relative_precision() } else { 0.0 };

        self.compress_bundle()?;

        let mut nxt_lb = 0.0;
        let mut nxt_ub = 0.0;
        self.new_cutval = 0.0;
        for fidx in 0..self.problem.num_subproblems() {
            let result = self
                .problem
                .evaluate(fidx, &self.nxt_y, nullstep_bnd, relprec)
                .map_err(SolverError::Evaluation)?;
            let fun_ub = result.objective();

            let mut minorants = result.into_iter();
            let mut nxt_minorant;
            let nxt_primal;
            match minorants.next() {
                Some((m, p)) => {
                    nxt_minorant = m;
                    nxt_primal = p;
                }
                None => return Err(SolverError::NoMinorant),
            }
            let fun_lb = nxt_minorant.constant;

            nxt_lb += fun_lb;
            nxt_ub += fun_ub;
            self.nxt_vals[fidx] = fun_ub;

            // move center of minorant to cur_y
            nxt_minorant.move_center(-1.0, &self.nxt_d);
            self.new_cutval += nxt_minorant.constant;
            let minidx = self.master.add_minorant(fidx, nxt_minorant)?;
            self.minorants[fidx].push(MinorantInfo {
                index: minidx,
                multiplier: 0.0,
            });
            if minidx >= self.primals.len() {
                self.primals.resize_with(minidx + 1, || None);
            }
            self.primals[minidx] = Some(nxt_primal);
        }

        if self.new_cutval > self.cur_val + 1e-3 {
            warn!(
                "New minorant has higher value in center new:{} old:{}",
                self.new_cutval, self.cur_val
            );
            self.cur_val = self.new_cutval;
            self.iterinfos.push(IterationInfo::NewMinorantTooHigh {
                new: self.new_cutval,
                old: self.cur_val,
            });
        }

        self.nxt_val = nxt_ub;

        // check for potential problems with relative precision of all kinds
        if nxt_lb <= descent_bnd {
            // lower bound gives descent step
            if nxt_ub > descent_bnd {
                // upper bound will produce null-step
                if self.cur_val - nxt_lb > (self.cur_val - self.nxt_mod) * self.params.nullstep_factor.max(0.5) {
                    warn!("Relative precision of returned objective interval enforces null-step.");
                    self.iterinfos.push(IterationInfo::UpperBoundNullStep);
                }
            }
        } else if self.cur_val - nxt_lb > 0.8 * (self.cur_val - self.nxt_mod) {
            // TODO: double check with ConicBundle if this test makes sense.
            // lower bound gives already a null step
            // subgradient won't yield much improvement
            warn!("Shallow cut (subgradient won't yield much improvement)");
            self.iterinfos.push(IterationInfo::ShallowCut);
        }

        debug!("Step");
        debug!("  cur_val    ={}", self.cur_val);
        debug!("  nxt_mod    ={}", self.nxt_mod);
        debug!("  nxt_ub     ={}", self.nxt_val);
        debug!("  descent_bnd={}", descent_bnd);

        // do a descent step or null step
        if nxt_ub <= descent_bnd {
            self.descent_step()?;
            Ok(Step::Descent)
        } else {
            self.null_step()?;
            Ok(Step::Null)
        }
    }

    /**
     * Return the bound on the function value that enforces a
     * nullstep.
     *
     * If the oracle guarantees that $f(\bar{y}) \ge$ this bound, the
     * bundle method will perform a nullstep.
     *
     * This value is $f(\hat{y}) + \varrho' \cdot \Delta$ where
     * $\Delta = f(\hat{y}) - \hat{f}(\bar{y})$ is the expected
     * progress and $\varrho'$ is the `nullstep_factor`.
     */
    fn get_nullstep_bound(&self) -> Real {
        self.cur_val - self.params.nullstep_factor * (self.cur_val - self.nxt_mod)
    }

    /**
     * Return the bound the function value must be below of to enforce a descent step.
     *
     * If the oracle guarantees that $f(\bar{y}) \le$ this bound, the
     * bundle method will perform a descent step.
     *
     * This value is $f(\hat{y}) + \varrho \cdot \Delta$ where
     * $\Delta = f(\hat{y}) - \hat{f}(\bar{y})$ is the expected
     * progress and $\varrho$ is the `acceptance_factor`.
     */
    fn get_descent_bound(&self) -> Real {
        self.cur_val - self.params.acceptance_factor * (self.cur_val - self.nxt_mod)
    }

    /**
     * Return the required relative precision for the computation.
     */
    fn get_relative_precision(&self) -> Real {
        (0.1 * (self.cur_val - self.get_nullstep_bound()) / (self.cur_val.abs() + 1.0)).min(1e-3)
    }
}
Added src/solver/masterprocess.rs.


















































































































































































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! Asynchronous process solving a master problem.

use crossbeam::channel::{unbounded as channel, Receiver, Sender};
use log::{debug, warn};
use std::sync::Arc;
use threadpool::ThreadPool;

use super::sync::Error;
use crate::master::primalmaster::PrimalMaster;
use crate::master::MasterProblem;
use crate::problem::{FirstOrderProblem, SubgradientExtender};
use crate::{DVector, Minorant, Real};

/// Configuration information for setting up a master problem.
pub struct MasterConfig {
    /// The number of subproblems.
    pub num_subproblems: usize,
    /// The number of variables.
    pub num_vars: usize,
    /// The lower bounds on the variables.
    pub lower_bounds: Option<DVector>,
    /// The lower bounds on the variables.
    pub upper_bounds: Option<DVector>,
}

/// A task for the master problem.
enum MasterTask<Pr, PErr, M>
where
    M: MasterProblem,
{
    /// Add new variables to the master problem.
    AddVariables(Vec<(Option<usize>, Real, Real)>, Box<SubgradientExtender<Pr, PErr>>),

    /// Add a new minorant for a subfunction to the master problem.
    AddMinorant(usize, Minorant, Pr),

    /// Move the center of the master problem in the given direction.
    MoveCenter(Real, Arc<DVector>),

    /// Start a new computation of the master problem.
    Solve { center_value: Real },

    /// Compress the bundle.
    Compress,

    /// Set the weight parameter of the master problem.
    SetWeight { weight: Real },

    /// Return the current aggregated primal.
    GetAggregatedPrimal {
        subproblem: usize,
        tx: Sender<Result<Pr, M::Err>>,
    },
}

/// The response send from a master process.
///
/// The response contains the evaluation results of the latest
pub struct MasterResponse {
    pub nxt_d: DVector,
    pub nxt_mod: Real,
    pub sgnorm: Real,
    /// The number of internal iterations.
    pub cnt_updates: usize,
}

type ToMasterSender<P, M> = Sender<MasterTask<<P as FirstOrderProblem>::Primal, <P as FirstOrderProblem>::Err, M>>;

type ToMasterReceiver<P, M> = Receiver<MasterTask<<P as FirstOrderProblem>::Primal, <P as FirstOrderProblem>::Err, M>>;

type MasterSender<E> = Sender<Result<MasterResponse, E>>;

pub type MasterReceiver<E> = Receiver<Result<MasterResponse, E>>;

pub struct MasterProcess<P, M>
where
    P: FirstOrderProblem,
    M: MasterProblem,
{
    /// The channel to transmit new tasks to the master problem.
    tx: ToMasterSender<P, M>,

    /// The channel to receive solutions from the master problem.
    pub rx: MasterReceiver<M::Err>,

    phantom: std::marker::PhantomData<M>,
}

impl<P, M> MasterProcess<P, M>
where
    P: FirstOrderProblem,
    P::Primal: Send + 'static,
    P::Err: Into<Box<dyn std::error::Error + Sync + Send>> + 'static,
    M: MasterProblem + Send + 'static,
    M::MinorantIndex: std::hash::Hash,
    M::Err: Send + 'static,
{
    pub fn start(master: M, master_config: MasterConfig, threadpool: &mut ThreadPool) -> Self {
        // Create a pair of communication channels.
        let (to_master_tx, to_master_rx) = channel();
        let (from_master_tx, from_master_rx) = channel();

        // The the master process thread.
        threadpool.execute(move || {
            debug!("Master process started");
            let mut from_master_tx = from_master_tx;
            if let Err(err) = Self::master_main(master, master_config, &mut from_master_tx, to_master_rx) {
                #[allow(unused_must_use)]
                {
                    from_master_tx.send(Err(err));
                }
            }
            debug!("Master proces stopped");
        });

        MasterProcess {
            tx: to_master_tx,
            rx: from_master_rx,
            phantom: std::marker::PhantomData,
        }
    }

    /// Add new variables to the master problem.
    pub fn add_vars(
        &mut self,
        vars: Vec<(Option<usize>, Real, Real)>,
        sgext: Box<SubgradientExtender<P::Primal, P::Err>>,
    ) -> Result<(), Error<P::Err>>
    where
        P::Err: 'static,
    {
        self.tx
            .send(MasterTask::AddVariables(vars, sgext))
            .map_err(|err| Error::Process(err.into()))
    }

    /// Add a new minorant to the master problem model.
    ///
    /// This adds the specified `minorant` with associated `primal` data to the
    /// model of subproblem `i`.
    pub fn add_minorant(&mut self, i: usize, minorant: Minorant, primal: P::Primal) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::AddMinorant(i, minorant, primal))
            .map_err(|err| Error::Process(err.into()))
    }

    /// Move the center of the master problem.
    ///
    /// This moves the master problem's center in direction $\\alpha \\cdot d$.
    pub fn move_center(&mut self, alpha: Real, d: Arc<DVector>) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::MoveCenter(alpha, d))
            .map_err(|err| Error::Process(err.into()))
    }

    /// Solve the master problem.
    ///
    /// The current function value in the center `center_value`.
    /// Once the master problem is solved the process will send a
    /// [`MasterResponse`] message to the `tx` channel.
    pub fn solve(&mut self, center_value: Real) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::Solve { center_value })
            .map_err(|err| Error::Process(err.into()))
    }

    /// Compresses the model.
    pub fn compress(&mut self) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::Compress)
            .map_err(|err| Error::Process(err.into()))
    }

    /// Sets the new weight of the proximal term in the master problem.
    pub fn set_weight(&mut self, weight: Real) -> Result<(), Error<P::Err>> {
        self.tx
            .send(MasterTask::SetWeight { weight })
            .map_err(|err| Error::Process(err.into()))
    }

    /// Get the current aggregated primal for a certain subproblem.
    pub fn get_aggregated_primal(&self, subproblem: usize) -> Result<P::Primal, Error<P::Err>> {
        let (tx, rx) = channel();
        self.tx
            .send(MasterTask::GetAggregatedPrimal { subproblem, tx })
            .map_err(|err| Error::Process(err.into()))?;
        rx.recv()
            .map_err(|err| Error::Process(err.into()))?
            .map_err(|err| Error::Master(err.into()))
    }

    /// The main loop of the master process.
    fn master_main(
        master: M,
        master_config: MasterConfig,
        tx: &mut MasterSender<M::Err>,
        rx: ToMasterReceiver<P, M>,
    ) -> Result<(), M::Err> {
        let mut master = PrimalMaster::<_, P::Primal>::new(master);

        // Initialize the master problem.
        master.set_num_subproblems(master_config.num_subproblems)?;
        master.set_vars(
            master_config.num_vars,
            master_config.lower_bounds,
            master_config.upper_bounds,
        )?;

        // The main iteration: wait for new tasks.
        for m in rx {
            match m {
                MasterTask::AddVariables(vars, sgext) => {
                    debug!("master: add {} variables to the subproblem", vars.len());
                    master.add_vars(vars, sgext)?;
                }
                MasterTask::AddMinorant(i, m, primal) => {
                    debug!("master: add minorant to subproblem {}", i);
                    master.add_minorant(i, m, primal)?;
                }
                MasterTask::MoveCenter(alpha, d) => {
                    debug!("master: move center");
                    master.move_center(alpha, &d);
                }
                MasterTask::Compress => {
                    debug!("Compress bundle");
                    master.compress()?;
                }
                MasterTask::Solve { center_value } => {
                    debug!("master: solve with center_value {}", center_value);
                    master.solve(center_value)?;
                    let master_response = MasterResponse {
                        nxt_d: master.get_primopt(),
                        nxt_mod: master.get_primoptval(),
                        sgnorm: master.get_dualoptnorm2().sqrt(),
                        cnt_updates: master.cnt_updates(),
                    };
                    if let Err(err) = tx.send(Ok(master_response)) {
                        warn!("Master process cancelled because of channel error: {}", err);
                        break;
                    }
                }
                MasterTask::SetWeight { weight } => {
                    debug!("master: set weight {}", weight);
                    master.set_weight(weight)?;
                }
                MasterTask::GetAggregatedPrimal { subproblem, tx } => {
                    debug!("master: get aggregated primal for {}", subproblem);
                    if tx.send(master.aggregated_primal(subproblem)).is_err() {
                        warn!("Sending of aggregated primal for {} failed", subproblem);
                    };
                }
            };
        }

        Ok(())
    }
}
Added src/solver/sync.rs.


































































































































































































































































































































































































































































































































































































































































































































































































































































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/*
 * Copyright (c) 2019 Frank Fischer <frank-fischer@shadow-soft.de>
 *
 * This program is free software: you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see  <http://www.gnu.org/licenses/>
 */

//! An asynchronous parallel bundle solver.

use crossbeam::channel::{select, unbounded as channel, Receiver, Sender};
use log::{debug, info};
use num_cpus;
use num_traits::Float;
use std::sync::Arc;
use std::time::Instant;
use threadpool::ThreadPool;

use crate::{DVector, Real};

use super::masterprocess::{MasterConfig, MasterProcess, MasterResponse};
use crate::master::{self, MasterProblem};
use crate::problem::{EvalResult, FirstOrderProblem, Update, UpdateState};
use crate::terminator::{StandardTerminatable, StandardTerminator, Terminator};
use crate::weighter::{HKWeightable, HKWeighter, Weighter};

/// The default iteration limit.
pub const DEFAULT_ITERATION_LIMIT: usize = 10_000;

/// The default solver.
pub type DefaultSolver<P> = Solver<P, StandardTerminator, HKWeighter, crate::master::FullMasterBuilder>;

/// The minimal bundle solver.
pub type NoBundleSolver<P> = Solver<P, StandardTerminator, HKWeighter, crate::master::MinimalMasterBuilder>;

/// Error raised by the parallel bundle [`Solver`].
#[derive(Debug)]
pub enum Error<E> {
    /// An error raised when creating a new master problem solver.
    BuildMaster(Box<dyn std::error::Error>),
    /// An error raised by the master problem process.
    Master(Box<dyn std::error::Error>),
    /// The iteration limit has been reached.
    IterationLimit { limit: usize },
    /// An error raised by a subproblem evaluation.
    Evaluation(E),
    /// An error raised subproblem update.
    Update(E),
    /// The dimension of some data is wrong.
    Dimension(String),
    /// Invalid bounds for a variable.
    InvalidBounds { lower: Real, upper: Real },
    /// The value of a variable is outside its bounds.
    ViolatedBounds { lower: Real, upper: Real, value: Real },
    /// The variable index is out of bounds.
    InvalidVariable { index: usize, nvars: usize },
    /// An error occurred in a subprocess.
    Process(Box<dyn std::error::Error>),
    /// A method requiring an initialized solver has been called.
    NotInitialized,
    /// The problem has not been solved yet.
    NotSolved,
}

impl<E> std::fmt::Display for Error<E>
where
    E: std::fmt::Display,
{
    fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
        use Error::*;
        match self {
            BuildMaster(err) => writeln!(fmt, "Cannot create master problem solver: {}", err),
            Master(err) => writeln!(fmt, "Error in master problem: {}", err),
            IterationLimit { limit } => writeln!(fmt, "The iteration limit has been reached: {}", limit),
            Evaluation(err) => writeln!(fmt, "Error in subproblem evaluation: {}", err),
            Update(err) => writeln!(fmt, "Error in subproblem update: {}", err),
            Dimension(what) => writeln!(fmt, "Wrong dimension for {}", what),
            InvalidBounds { lower, upper } => write!(fmt, "Invalid bounds, lower:{}, upper:{}", lower, upper),
            ViolatedBounds { lower, upper, value } => write!(
                fmt,
                "Violated bounds, lower:{}, upper:{}, value:{}",
                lower, upper, value
            ),
            InvalidVariable { index, nvars } => {
                write!(fmt, "Variable index out of bounds, got:{} must be < {}", index, nvars)
            }
            Process(err) => writeln!(fmt, "Error in subprocess: {}", err),
            NotInitialized => writeln!(fmt, "The solver must be initialized (called Solver::init()?)"),
            NotSolved => writeln!(fmt, "The problem has not been solved yet"),
        }
    }
}

impl<E> std::error::Error for Error<E>
where
    E: std::error::Error + 'static,
{
    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
        use Error::*;
        match self {
            BuildMaster(err) => Some(err.as_ref()),
            Master(err) => Some(err.as_ref()),
            Evaluation(err) => Some(err),
            Process(err) => Some(err.as_ref()),
            _ => None,
        }
    }
}

type ClientSender<P> =
    Sender<std::result::Result<EvalResult<usize, <P as FirstOrderProblem>::Primal>, <P as FirstOrderProblem>::Err>>;

type ClientReceiver<P> =
    Receiver<std::result::Result<EvalResult<usize, <P as FirstOrderProblem>::Primal>, <P as FirstOrderProblem>::Err>>;

/// Parameters for tuning the solver.
#[derive(Debug, Clone)]
pub struct Parameters {
    /// The descent step acceptance factors, must be in (0,1).
    ///
    /// The default value is 0.1.
    acceptance_factor: Real,
}

impl Default for Parameters {
    fn default() -> Self {
        Parameters { acceptance_factor: 0.1 }
    }
}

impl Parameters {
    /// Change the descent step acceptance factor.
    ///
    /// The default value is 0.1.
    pub fn set_acceptance_factor(&mut self, acceptance_factor: Real) {
        assert!(
            acceptance_factor > 0.0 && acceptance_factor < 1.0,
            "Descent step acceptance factors must be in (0,1), got: {}",
            acceptance_factor
        );
        self.acceptance_factor = acceptance_factor;
    }
}

/// The step type that has been performed.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Step {
    /// A null step has been performed.
    Null,
    /// A descent step has been performed.
    Descent,
    /// No step but the algorithm has been terminated.
    Term,
}

pub struct SolverData {
    /// Current center of stability.
    cur_y: DVector,

    /// Function value in the current point.
    cur_val: Real,

    /// Function value at the current candidate.
    nxt_val: Real,

    /// Model value at the current candidate.
    nxt_mod: Real,

    /// The value of the new minorant in the current center.
    new_cutval: Real,

    /// The current expected progress.
    ///
    /// This value is actually `cur_val - nxt_val`. We store it separately only
    /// for debugging purposes because after a descent step `cur_val` will be
    /// changed and we could not see the "old" expected progress anymore that
    /// led to the descent step.
    expected_progress: Real,

    /// Norm of current aggregated subgradient.
    sgnorm: Real,

    /// The currently used master problem weight.
    cur_weight: Real,
}

impl SolverData {
    /// Reset solver data to initial values.
    ///
    /// This means that almost everything is set to +infinity so that
    /// a null-step is forced after the first evaluation.
    fn init(&mut self, y: DVector) {
        self.cur_y = y;
        self.cur_val = Real::infinity();
        self.nxt_val = Real::infinity();
        self.nxt_mod = -Real::infinity();
        self.new_cutval = -Real::infinity();
        self.expected_progress = Real::infinity();
        self.sgnorm = Real::infinity();
        self.cur_weight = 1.0;
    }
}

impl StandardTerminatable for SolverData {
    fn center_value(&self) -> Real {
        self.cur_val
    }

    fn expected_progress(&self) -> Real {
        self.expected_progress
    }
}

impl HKWeightable for SolverData {
    fn current_weight(&self) -> Real {
        self.cur_weight
    }

    fn center(&self) -> &DVector {
        &self.cur_y
    }

    fn center_value(&self) -> Real {
        self.cur_val
    }

    fn candidate_value(&self) -> Real {
        self.nxt_val
    }

    fn candidate_model(&self) -> Real {
        self.nxt_mod
    }

    fn new_cutvalue(&self) -> Real {
        self.new_cutval
    }

    fn sgnorm(&self) -> Real {
        self.sgnorm
    }
}

/// Internal data used during the main iteration loop.
struct IterData {
    /// Maximal number of iterations.
    max_iter: usize,
    cnt_iter: usize,
    cnt_updates: usize,
    nxt_ubs: Vec<Real>,
    cnt_remaining_ubs: usize,
    nxt_cutvals: Vec<Real>,
    cnt_remaining_mins: usize,
    nxt_d: Arc<DVector>,
    nxt_y: Arc<DVector>,
    /// True if the problem has been updated after the last evaluation.
    updated: bool,
}

impl IterData {
    fn new(num_subproblems: usize, num_variables: usize, max_iter: usize) -> Self {
        IterData {
            max_iter,
            cnt_iter: 0,
            cnt_updates: 0,
            nxt_ubs: vec![Real::infinity(); num_subproblems],
            cnt_remaining_ubs: num_subproblems,
            nxt_cutvals: vec![-Real::infinity(); num_subproblems],
            cnt_remaining_mins: num_subproblems,
            nxt_d: Arc::new(dvec![0.0; num_variables]),
            nxt_y: Arc::new(dvec![]),
            updated: true,
        }
    }
}

/// Data providing access for updating the problem.
struct UpdateData<'a, P, M>
where
    P: FirstOrderProblem,
    M: MasterProblem,
{
    /// Type of step.
    step: Step,

    /// Current center of stability.
    cur_y: &'a DVector,

    /// Current candidate.
    nxt_y: &'a Arc<DVector>,

    /// The master process.
    master_proc: &'a MasterProcess<P, M>,
}

impl<'a, P, M> UpdateState<P::Primal> for UpdateData<'a, P, M>
where
    P: FirstOrderProblem,
    P::Err: Into<Box<dyn std::error::Error + Sync + Send>> + 'static,
    M: MasterProblem,
    M::MinorantIndex: std::hash::Hash,
{
    fn was_descent(&self) -> bool {
        self.step == Step::Descent
    }

    fn center(&self) -> Arc<DVector> {
        Arc::new(self.cur_y.clone())
    }

    fn candidate(&self) -> Arc<DVector> {
        self.nxt_y.clone()
    }

    fn aggregated_primal(&self, i: usize) -> P::Primal {
        self.master_proc
            .get_aggregated_primal(i)
            .map_err(|_| "get_aggregated_primal".to_string())
            .expect("Cannot get aggregated primal from master process")
    }
}

/// Implementation of a parallel bundle method.
pub struct Solver<P, T = StandardTerminator, W = HKWeighter, M = crate::master::FullMasterBuilder>
where
    P: FirstOrderProblem,
    M: master::Builder,
{
    /// Parameters for the solver.
    pub params: Parameters,

    /// Termination predicate.
    pub terminator: T,

    /// Weighter heuristic.
    pub weighter: W,

    /// The threadpool of the solver.
    pub threadpool: ThreadPool,

    /// The master problem builder.
    pub master: M,

    /// The first order problem.
    problem: P,

    /// The algorithm data.
    data: SolverData,

    /// The master problem process.
    master_proc: Option<MasterProcess<P, M::MasterProblem>>,

    /// The channel to receive the evaluation results from subproblems.
    client_tx: Option<ClientSender<P>>,

    /// The channel to receive the evaluation results from subproblems.
    client_rx: Option<ClientReceiver<P>>,

    /// Number of descent steps.
    cnt_descent: usize,

    /// Number of null steps.
    cnt_null: usize,

    /// Number of function evaluation.
    cnt_evals: usize,

    /// Time when the solution process started.
    ///
    /// This is actually the time of the last call to `Solver::init`.
    start_time: Instant,
}

impl<P, T, W, M> Solver<P, T, W, M>
where
    P: FirstOrderProblem,
    P::Err: Into<Box<dyn std::error::Error + Sync + Send>> + 'static,
    T: Terminator<SolverData> + Default,
    W: Weighter<SolverData> + Default,
    M: master::Builder,
    M::MasterProblem: MasterProblem,
    <M::MasterProblem as MasterProblem>::MinorantIndex: std::hash::Hash,
{
    /// Create a new parallel bundle solver.
    pub fn new(problem: P) -> Self
    where
        M: Default,
    {
        Solver {
            params: Parameters::default(),
            terminator: Default::default(),
            weighter: Default::default(),
            problem,
            data: SolverData {
                cur_y: dvec![],
                cur_val: 0.0,
                nxt_val: 0.0,
                nxt_mod: 0.0,
                new_cutval: 0.0,
                expected_progress: 0.0,
                sgnorm: 0.0,
                cur_weight: 1.0,
            },

            threadpool: ThreadPool::with_name("Parallel bundle solver".to_string(), num_cpus::get()),
            master: M::default(),
            master_proc: None,
            client_tx: None,
            client_rx: None,

            cnt_descent: 0,
            cnt_null: 0,
            cnt_evals: 0,

            start_time: Instant::now(),
        }
    }

    /// Create a new parallel bundle solver.
    pub fn with_master(problem: P, master: M) -> Self {
        Solver {
            params: Parameters::default(),
            terminator: Default::default(),
            weighter: Default::default(),
            problem,
            data: SolverData {
                cur_y: dvec![],
                cur_val: 0.0,
                nxt_val: 0.0,
                nxt_mod: 0.0,
                new_cutval: 0.0,
                expected_progress: 0.0,
                sgnorm: 0.0,
                cur_weight: 1.0,
            },

            threadpool: ThreadPool::with_name("Parallel bundle solver".to_string(), num_cpus::get()),
            master,
            master_proc: None,
            client_tx: None,
            client_rx: None,

            cnt_descent: 0,
            cnt_null: 0,
            cnt_evals: 0,

            start_time: Instant::now(),
        }
    }

    /// Return the underlying threadpool.
    ///
    /// In order to use the same threadpool for concurrent processes,
    /// just clone the returned `ThreadPool`.
    pub fn threadpool(&self) -> &ThreadPool {
        &self.threadpool
    }

    /// Set the threadpool.
    ///
    /// This function allows to use a specific threadpool for all processes
    /// spawned by the solver. Note that this does not involve any threads
    /// used by the problem because the solver is not responsible for executing
    /// the evaluation process of the subproblems. However, the problem might
    /// use the same threadpool as the solver.
    pub fn set_threadpool(&mut self, threadpool: ThreadPool) {
        self.threadpool = threadpool;
    }

    /// Return the current problem associated with the solver.
    pub fn problem(&self) -> &P {
        &self.problem
    }

    /// Initialize the solver.
    ///
    /// This will reset the internal data structures so that a new fresh
    /// solution process can be started.
    ///
    /// It will also setup all worker processes.
    ///
    /// This function is automatically called by [`Solver::solve`].
    pub fn init(&mut self) -> Result<(), Error<P::Err>> {
        debug!("Initialize solver");

        let n = self.problem.num_variables();
        let m = self.problem.num_subproblems();

        self.data.init(dvec![0.0; n]);
        self.cnt_descent = 0;
        self.cnt_null = 0;
        self.cnt_evals = 0;

        let (tx, rx) = channel();
        self.client_tx = Some(tx);
        self.client_rx = Some(rx);

        let master_config = MasterConfig {
            num_subproblems: m,
            num_vars: n,
            lower_bounds: self.problem.lower_bounds().map(DVector),
            upper_bounds: self.problem.upper_bounds().map(DVector),
        };

        if master_config
            .lower_bounds
            .as_ref()
            .map(|lb| lb.len() != n)
            .unwrap_or(false)
        {
            return Err(Error::Dimension("lower bounds".to_string()));
        }
        if master_config
            .upper_bounds
            .as_ref()
            .map(|ub| ub.len() != n)
            .unwrap_or(false)
        {
            return Err(Error::Dimension("upper bounds".to_string()));
        }

        debug!("Start master process");
        self.master_proc = Some(MasterProcess::start(
            self.master.build().map_err(|err| Error::BuildMaster(err.into()))?,
            master_config,
            &mut self.threadpool,
        ));

        debug!("Initial problem evaluation");
        // We need an initial evaluation of all oracles for the first center.
        let y = Arc::new(self.data.cur_y.clone());
        for i in 0..m {
            self.problem
                .evaluate(i, y.clone(), i, self.client_tx.clone().unwrap())
                .map_err(Error::Evaluation)?;
        }

        debug!("Initialization complete");

        self.start_time = Instant::now();

        Ok(())
    }

    /// Solve the problem with the default maximal iteration limit [`DEFAULT_ITERATION_LIMIT`].
    pub fn solve(&mut self) -> Result<(), Error<P::Err>> {
        self.solve_with_limit(DEFAULT_ITERATION_LIMIT)
    }

    /// Solve the problem with a maximal iteration limit.
    pub fn solve_with_limit(&mut self, limit: usize) -> Result<(), Error<P::Err>> {
        // First initialize the internal data structures.
        self.init()?;

        if self.solve_iter(limit)? {
            Ok(())
        } else {
            Err(Error::IterationLimit { limit })
        }
    }

    /// Solve the problem but stop after at most `niter` iterations.
    ///
    /// The function returns `Ok(true)` if the termination criterion
    /// has been satisfied. Otherwise it returns `Ok(false)` or an
    /// error code.
    ///
    /// If this function is called again, the solution process is
    /// continued from the previous point. Because of this one *must*
    /// call `init()` before the first call to this function.
    pub fn solve_iter(&mut self, niter: usize) -> Result<bool, Error<P::Err>> {
        debug!("Start solving up to {} iterations", niter);

        let mut itdata = IterData::new(self.problem.num_subproblems(), self.problem.num_variables(), niter);

        loop {
            select! {
                recv(self.client_rx.as_ref().ok_or(Error::NotInitialized)?) -> msg => {
                    let msg = msg
                        .map_err(|err| Error::Process(err.into()))?
                        .map_err(Error::Evaluation)?;
                    if self.handle_client_response(msg, &mut itdata)? {
                        return Ok(false);
                    }
                },
                recv(self.master_proc.as_ref().ok_or(Error::NotInitialized)?.rx) -> msg => {
                    debug!("Receive master response");
                    // Receive result (new candidate) from the master
                    let master_res = msg
                        .map_err(|err| Error::Process(err.into()))?
                        .map_err(|err| Error::Master(err.into()))?;

                    if self.handle_master_response(master_res, &mut itdata)? {
                        return Ok(true);
                    }
                },
            }
        }
    }

    /// Handle a response from a subproblem evaluation.
    ///
    /// The function returns `Ok(true)` if the final iteration count has been reached.
    fn handle_client_response(
        &mut self,
        msg: EvalResult<usize, <P as FirstOrderProblem>::Primal>,
        itdata: &mut IterData,
    ) -> Result<bool, Error<P::Err>> {
        let master = self.master_proc.as_mut().ok_or(Error::NotInitialized)?;
        match msg {
            EvalResult::ObjectiveValue { index, value } => {
                debug!("Receive objective from subproblem {}: {}", index, value);
                if itdata.nxt_ubs[index].is_infinite() {
                    itdata.cnt_remaining_ubs -= 1;
                }
                itdata.nxt_ubs[index] = itdata.nxt_ubs[index].min(value);
            }
            EvalResult::Minorant {
                index,
                mut minorant,
                primal,
            } => {
                debug!("Receive minorant from subproblem {}", index);
                if itdata.nxt_cutvals[index].is_infinite() {
                    itdata.cnt_remaining_mins -= 1;
                }
                // move center of minorant to cur_y
                minorant.move_center(-1.0, &itdata.nxt_d);
                itdata.nxt_cutvals[index] = itdata.nxt_cutvals[index].max(minorant.constant);
                // add minorant to master problem
                master.add_minorant(index, minorant, primal)?;
            }
        }

        if itdata.cnt_remaining_ubs > 0 || itdata.cnt_remaining_mins > 0 {
            // Haven't received data from all subproblems, yet.
            return Ok(false);
        }

        // All subproblems have been evaluated, do a step.
        let nxt_ub = itdata.nxt_ubs.iter().sum::<Real>();
        let descent_bnd = Self::get_descent_bound(self.params.acceptance_factor, &self.data);

        self.data.nxt_val = nxt_ub;
        self.data.new_cutval = itdata.nxt_cutvals.iter().sum::<Real>();

        debug!("Step");
        debug!("  cur_val    ={}", self.data.cur_val);
        debug!("  nxt_mod    ={}", self.data.nxt_mod);
        debug!("  nxt_ub     ={}", nxt_ub);
        debug!("  descent_bnd={}", descent_bnd);

        itdata.updated = false;
        let step;
        if self.data.cur_val.is_infinite() {
            // This is the first evaluation. We effectively get
            // the function value at the current center but
            // we do not have a model estimate yet. Hence, we do not know
            // a good guess for the weight.
            step = Step::Descent;
            self.data.cur_val = nxt_ub;
            self.data.cur_weight = Real::infinity();
            master.set_weight(1.0)?;

            itdata.updated = true;

            debug!("First Step");
            debug!("  cur_val={}", self.data.cur_val);
            debug!("  cur_y={}", self.data.cur_y);
        } else if nxt_ub <= descent_bnd {
            step = Step::Descent;
            self.cnt_descent += 1;

            // Note that we must update the weight *before* we
            // change the internal data, so the old information
            // that caused the descent step is still available.
            self.data.cur_weight = self.weighter.descent_weight(&self.data);
            self.data.cur_y = itdata.nxt_y.as_ref().clone();
            self.data.cur_val = nxt_ub;

            master.move_center(1.0, itdata.nxt_d.clone())?;
            master.set_weight(self.data.cur_weight)?;

            debug!("Descent Step");
            debug!("  dir ={}", itdata.nxt_d);
            debug!("  newy={}", self.data.cur_y);
        } else {
            step = Step::Null;
            self.cnt_null += 1;
            self.data.cur_weight = self.weighter.null_weight(&self.data);
            master.set_weight(self.data.cur_weight)?;
        }

        Self::show_info(
            &self.start_time,
            step,
            &self.data,
            self.cnt_descent,
            self.cnt_null,
            itdata.cnt_updates,
        );
        itdata.cnt_iter += 1;

        // Update problem.
        if Self::update_problem(&mut self.problem, step, &mut self.data, itdata, master)? {
            itdata.updated = true;
        }

        // Compute the new candidate. The main loop will wait for the result of
        // this solution process of the master problem.
        master.solve(self.data.cur_val)?;

        Ok(itdata.cnt_iter >= itdata.max_iter)
    }

    fn handle_master_response(
        &mut self,
        master_res: MasterResponse,
        itdata: &mut IterData,
    ) -> Result<bool, Error<P::Err>> {
        let master = self.master_proc.as_mut().ok_or(Error::NotInitialized)?;

        self.data.nxt_mod = master_res.nxt_mod;
        self.data.sgnorm = master_res.sgnorm;
        self.data.expected_progress = self.data.cur_val - self.data.nxt_mod;
        itdata.cnt_updates = master_res.cnt_updates;

        // If this is the very first solution of the model,
        // we use its result as to make a good guess for the initial weight
        // of the proximal term and resolve.
        if self.data.cur_weight.is_infinite() {
            self.data.cur_weight = self.weighter.initial_weight(&self.data);
            master.set_weight(self.data.cur_weight)?;
            master.solve(self.data.cur_val)?;
            return Ok(false);
        }

        if self.terminator.terminate(&self.data) && !itdata.updated {
            Self::show_info(
                &self.start_time,
                Step::Term,
                &self.data,
                self.cnt_descent,
                self.cnt_null,
                itdata.cnt_updates,
            );
            info!("Termination criterion satisfied");
            return Ok(true);
        }

        // Compress bundle
        master.compress()?;

        // Compute new candidate.
        let mut next_y = dvec![];
        itdata.nxt_d = Arc::new(master_res.nxt_d);
        next_y.add(&self.data.cur_y, &itdata.nxt_d);
        itdata.nxt_y = Arc::new(next_y);

        // Reset evaluation data.
        itdata.nxt_ubs.clear();
        itdata.nxt_ubs.resize(self.problem.num_subproblems(), Real::infinity());
        itdata.cnt_remaining_ubs = self.problem.num_subproblems();
        itdata.nxt_cutvals.clear();
        itdata
            .nxt_cutvals
            .resize(self.problem.num_subproblems(), -Real::infinity());
        itdata.cnt_remaining_mins = self.problem.num_subproblems();

        // Start evaluation of all subproblems at the new candidate.
        let client_tx = self.client_tx.as_ref().ok_or(Error::NotInitialized)?;
        for i in 0..self.problem.num_subproblems() {
            self.problem
                .evaluate(i, itdata.nxt_y.clone(), i, client_tx.clone())
                .map_err(Error::Evaluation)?;
        }
        Ok(false)
    }

    fn update_problem(
        problem: &mut P,
        step: Step,
        data: &mut SolverData,
        itdata: &mut IterData,
        master_proc: &mut MasterProcess<P, M::MasterProblem>,
    ) -> Result<bool, Error<P::Err>> {
        let (update_tx, update_rx) = channel();
        problem
            .update(
                &UpdateData {
                    cur_y: &data.cur_y,
                    nxt_y: &itdata.nxt_y,
                    step,
                    master_proc,
                },
                itdata.cnt_iter,
                update_tx,
            )
            .map_err(Error::Update)?;

        let mut have_update = false;
        for update in update_rx {
            let update = update.map_err(Error::Update)?;
            have_update = true;
            match update {
                Update::AddVariables { bounds, sgext, .. } => {
                    let mut newvars = Vec::with_capacity(bounds.len());
                    for (lower, upper) in bounds {
                        if lower > upper {
                            return Err(Error::InvalidBounds { lower, upper });
                        }
                        let value = if lower > 0.0 {
                            lower
                        } else if upper < 0.0 {
                            upper
                        } else {
                            0.0
                        };
                        //self.bounds.push((lower, upper));
                        newvars.push((None, lower - value, upper - value, value));
                    }
                    if !newvars.is_empty() {
                        // modify moved variables
                        for (index, val) in newvars.iter().filter_map(|v| v.0.map(|i| (i, v.3))) {
                            data.cur_y[index] = val;
                        }

                        // add new variables
                        data.cur_y.extend(newvars.iter().filter(|v| v.0.is_none()).map(|v| v.3));

                        master_proc.add_vars(newvars.iter().map(|v| (v.0, v.1, v.2)).collect(), sgext)?;
                    }
                }
            }
        }

        Ok(have_update)
    }

    /// Return the bound the function value must be below of to enforce a descent step.
    ///
    /// If the oracle guarantees that $f(\bar{y}) \le$ this bound, the
    /// bundle method will perform a descent step.
    ///
    /// This value is $f(\hat{y}) + \varrho \cdot \Delta$ where
    /// $\Delta = f(\hat{y}) - \hat{f}(\bar{y})$ is the expected
    /// progress and $\varrho$ is the `acceptance_factor`.
    fn get_descent_bound(acceptance_factor: Real, data: &SolverData) -> Real {
        data.cur_val - acceptance_factor * (data.cur_val - data.nxt_mod)
    }

    fn show_info(
        start_time: &Instant,
        step: Step,
        data: &SolverData,
        cnt_descent: usize,
        cnt_null: usize,
        cnt_updates: usize,
    ) {
        let time = start_time.elapsed();
        info!(
            "{} {:0>2}:{:0>2}:{:0>2}.{:0>2} {:4} {:4} {:4}{:1}  {:9.4} {:9.4} \
             {:12.6e}({:12.6e}) {:12.6e}",
            if step == Step::Term { "_endit" } else { "endit " },
            time.as_secs() / 3600,
            (time.as_secs() / 60) % 60,
            time.as_secs() % 60,
            time.subsec_nanos() / 10_000_000,
            cnt_descent,
            cnt_descent + cnt_null,
            cnt_updates,
            if step == Step::Descent { "*" } else { " " },
            data.cur_weight,
            data.expected_progress(),
            data.nxt_mod,
            data.nxt_val,
            data.cur_val
        );
    }

    /// Return the aggregated primal of the given subproblem.
    pub fn aggregated_primal(&self, subproblem: usize) -> Result<P::Primal, Error<P::Err>> {
        Ok(self
            .master_proc
            .as_ref()
            .ok_or(Error::NotSolved)?
            .get_aggregated_primal(subproblem)?)
    }
}
Deleted src/vector.rs.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287































































































































































































































































































-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
// Copyright (c) 2016, 2017, 2018, 2019 Frank Fischer <frank-fischer@shadow-soft.de>
//
// This program is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see  <http://www.gnu.org/licenses/>
//

//! Finite-dimensional sparse and dense vectors.

use crate::{Aggregatable, Real};
use std::fmt;
use std::ops::{Deref, DerefMut};
// use std::cmp::min;
use std::borrow::Borrow;
use std::iter::FromIterator;
use std::vec::IntoIter;

#[cfg(feature = "blas")]
use {openblas_src as _, rs_blas as blas, std::os::raw::c_int};

/// Type of dense vectors.
#[derive(Debug, Clone, PartialEq, Default)]
pub struct DVector(pub Vec<Real>);

impl Deref for DVector {
    type Target = Vec<Real>;

    fn deref(&self) -> &Vec<Real> {
        &self.0
    }
}

impl DerefMut for DVector {
    fn deref_mut(&mut self) -> &mut Vec<Real> {
        &mut self.0
    }
}

impl fmt::Display for DVector {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "(")?;
        for (i, x) in self.iter().enumerate() {
            if i > 0 {
                write!(f, ", ")?;
            }
            write!(f, "{}", x)?
        }
        write!(f, ")")?;
        Ok(())
    }
}

impl FromIterator<Real> for DVector {
    fn from_iter<I: IntoIterator<Item = Real>>(iter: I) -> Self {
        DVector(Vec::from_iter(iter))
    }
}

impl IntoIterator for DVector {
    type Item = Real;
    type IntoIter = IntoIter<Real>;

    fn into_iter(self) -> IntoIter<Real> {
        self.0.into_iter()
    }
}

/// Type of dense or vectors.
#[derive(Debug, Clone)]
pub enum Vector {
    /// A vector with dense storage.
    Dense(DVector),

    /**
     * A vector with sparse storage.
     *
     * For each non-zero element this vector stores an index and the
     * value of the element in addition to the size of the vector.
     */
    Sparse { size: usize, elems: Vec<(usize, Real)> },
}

impl fmt::Display for Vector {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        match *self {
            Vector::Dense(ref v) => write!(f, "{}", v),
            Vector::Sparse { size, ref elems } => {
                let mut it = elems.iter();
                write!(f, "{}:(", size)?;
                if let Some(&(i, x)) = it.next() {
                    write!(f, "{}:{}", i, x)?;
                    for &(i, x) in it {
                        write!(f, ", {}:{}", i, x)?;
                    }
                }
                write!(f, ")")
            }
        }
    }
}

impl DVector {
    /// Set all elements to 0.
    pub fn init0(&mut self, size: usize) {
        self.clear();
        self.extend((0..size).map(|_| 0.0));
    }

    /// Set self = factor * y.
    pub fn scal(&mut self, factor: Real, y: &DVector) {
        self.clear();
        self.extend(y.iter().map(|y| factor * y));
    }

    /// Return factor * self.
    pub fn scaled(&self, factor: Real) -> DVector {
        let mut x = DVector::default();
        x.scal(factor, self);
        x
    }

    /// Return the inner product with another vector.
    pub fn dot(&self, other: &DVector) -> Real {
        assert_eq!(self.len(), other.len());
        self.dot_begin(other)
    }

    /// Return the inner product with another vector.
    ///
    /// The inner product is computed on the smaller of the two
    /// dimensions. All other elements are assumed to be zero.
    pub fn dot_begin(&self, other: &DVector) -> Real {
        #[cfg(feature = "blas")]
        unsafe {
            blas::ddot(self.len().min(other.len()) as c_int, &self, 1, &other, 1)
        }
        #[cfg(not(feature = "blas"))]
        {
            self.iter().zip(other.iter()).map(|(x, y)| x * y).sum::<Real>()
        }
    }

    /// Add two vectors and store result in this vector.
    pub fn add(&mut self, x: &DVector, y: &DVector) {
        assert_eq!(x.len(), y.len());
        self.clear();
        self.extend(x.iter().zip(y.iter()).map(|(a, b)| a + b));
    }

    /// Add two vectors and store result in this vector.
    pub fn add_scaled(&mut self, alpha: Real, y: &DVector) {
        assert_eq!(self.len(), y.len());
        #[cfg(feature = "blas")]
        unsafe {
            blas::daxpy(self.len() as c_int, alpha, &y, 1, &mut self[..], 1)
        }
        #[cfg(not(feature = "blas"))]
        {
            for (x, y) in self.iter_mut().zip(y.iter()) {
                *x += alpha * y;
            }
        }
    }

    /// Add two vectors and store result in this vector.
    ///
    /// In contrast to `add_scaled`, the two vectors might have
    /// different sizes. The size of the resulting vector is the
    /// larger of the two vector sizes and the remaining entries of
    /// the smaller vector are assumed to be 0.0.
    pub fn add_scaled_begin(&mut self, alpha: Real, y: &DVector) {
        #[cfg(feature = "blas")]
        unsafe {
            let n = self.len();
            blas::daxpy(n.min(y.len()) as c_int, alpha, &y, 1, &mut self[..], 1);
        }
        #[cfg(not(feature = "blas"))]
        {
            for (x, y) in self.iter_mut().zip(y.iter()) {
                *x += alpha * y;
            }
        }
        let n = self.len();
        if n < y.len() {
            self.extend(y[n..].iter().map(|y| alpha * y));
        }
    }

    /// Return the 2-norm of this vector.
    pub fn norm2(&self) -> Real {
        #[cfg(feature = "blas")]
        unsafe {
            blas::dnrm2(self.len() as c_int, &self, 1)
        }
        #[cfg(not(feature = "blas"))]
        {
            self.iter().map(|x| x * x).sum::<Real>().sqrt()
        }
    }
}

impl Aggregatable for DVector {
    fn new_scaled<A>(alpha: Real, other: A) -> Self
    where
        A: Borrow<Self>,
    {
        DVector::scaled(&other.borrow(), alpha)
    }

    fn add_scaled<A>(&mut self, alpha: Real, other: A)
    where
        A: Borrow<Self>,
    {
        DVector::add_scaled(self, alpha, &other.borrow())
    }
}

impl Vector {
    /**
     * Return a sparse vector with the given non-zeros.
     */
    pub fn new_sparse(n: usize, indices: &[usize], values: &[Real]) -> Vector {
        assert_eq!(indices.len(), values.len());

        if indices.is_empty() {
            Vector::Sparse { size: n, elems: vec![] }
        } else {
            let mut ordered: Vec<_> = (0..n).collect();
            ordered.sort_by_key(|&i| indices[i]);
            assert!(*indices.last().unwrap() < n);
            let mut elems = Vec::with_capacity(indices.len());
            let mut last_idx = n;
            for i in ordered {
                let val = unsafe { *values.get_unchecked(i) };
                if val != 0.0 {
                    let idx = unsafe { *indices.get_unchecked(i) };
                    if idx != last_idx {
                        elems.push((idx, val));
                        last_idx = idx;
                    } else {
                        elems.last_mut().unwrap().1 += val;
                        if elems.last_mut().unwrap().1 == 0.0 {
                            elems.pop();
                            last_idx = n;
                        }
                    }
                }
            }
            Vector::Sparse { size: n, elems }
        }
    }

    /**
     * Convert vector to a dense vector.
     *
     * This function always returns a copy of the vector.
     */
    pub fn to_dense(&self) -> DVector {
        match *self {
            Vector::Dense(ref x) => x.clone(),
            Vector::Sparse { size: n, elems: ref xs } => {
                let mut v = vec![0.0; n];
                for &(i, x) in xs {
                    unsafe { *v.get_unchecked_mut(i) = x };
                }
                DVector(v)
            }
        }
    }
}

#[test]
fn test_add_scaled_begin() {
    let mut x = dvec![1.0; 5];
    let y = dvec![2.0; 7];
    x.add_scaled_begin(3.0, &y);
    assert_eq!(x, dvec![7.0, 7.0, 7.0, 7.0, 7.0, 6.0, 6.0]);
}