RsBundle  Check-in [4dad0cad83]

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Reformat
Downloads: Tarball | ZIP archive
Timelines: family | ancestors | descendants | both | error-handling
Files: files | file ages | folders
SHA1: 4dad0cad83dce61ecd178254049ecadcbc2bf1a6
User & Date: fifr 2018-08-18 11:15:25.187
Context
2018-08-30
13:31
Merge error-handling check-in: a304098147 user: fifr tags: trunk
09:04
Update version to 0.6.0-dev check-in: a4c479bbc1 user: fifr tags: modifyprimals
2018-08-18
11:15
Reformat Closed-Leaf check-in: 4dad0cad83 user: fifr tags: error-handling
2018-07-09
09:32
Add .editorconfig check-in: 7557d9d11d user: fifr tags: error-handling
Changes
Unified Diff Ignore Whitespace Patch
Changes to examples/mmcf.rs.
50
51
52
53
54
55
56
57
58
59
60
61
62
63
        solver.solve().unwrap();

        let costs: f64 = (0..solver.problem().num_subproblems())
            .map(|i| {
                let primals = solver.aggregated_primals(i);
                let aggr_primals = solver.problem().aggregate_primals_ref(&primals);
                solver.problem().get_primal_costs(i, &aggr_primals)
            })
            .sum();
        info!("Primal costs: {}", costs);
    } else {
        panic!("Usage: {} FILENAME", program);
    }
}







<
|





50
51
52
53
54
55
56

57
58
59
60
61
62
        solver.solve().unwrap();

        let costs: f64 = (0..solver.problem().num_subproblems())
            .map(|i| {
                let primals = solver.aggregated_primals(i);
                let aggr_primals = solver.problem().aggregate_primals_ref(&primals);
                solver.problem().get_primal_costs(i, &aggr_primals)

            }).sum();
        info!("Primal costs: {}", costs);
    } else {
        panic!("Usage: {} FILENAME", program);
    }
}
Changes to src/hkweighter.rs.
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
            self.eps_weight = 1e30;
            self.iter = 0;
            return if state.cur_y.len() == 0 || state.sgnorm < state.cur_y.len() as Real * 1e-10 {
                1.0
            } else {
                state.sgnorm.max(1e-4)
            }.max(params.min_weight)
                .min(params.max_weight);
        }

        let cur_nxt = state.cur_val - state.nxt_val;
        let cur_mod = state.cur_val - state.nxt_mod;
        let w = 2.0 * state.weight * (1.0 - cur_nxt / cur_mod);

        debug!("  cur_nxt={} cur_mod={} w={}", cur_nxt, cur_mod, w);

        if state.step == Step::Null {
            let sgnorm = state.sgnorm;
            let lin_err = state.cur_val - state.new_cutval;
            self.eps_weight = self.eps_weight.min(sgnorm + cur_mod - sgnorm * sgnorm / state.weight);
            let new_weight = if self.iter < -3 && lin_err > self.eps_weight.max(FACTOR * cur_mod) {
                w
            } else {
                state.weight
            }.min(FACTOR * state.weight)
                .min(params.max_weight);
            if new_weight > state.weight {
                self.iter = -1
            } else {
                self.iter = min(self.iter - 1, -1);
            }

            debug!(







|

















|







80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
            self.eps_weight = 1e30;
            self.iter = 0;
            return if state.cur_y.len() == 0 || state.sgnorm < state.cur_y.len() as Real * 1e-10 {
                1.0
            } else {
                state.sgnorm.max(1e-4)
            }.max(params.min_weight)
            .min(params.max_weight);
        }

        let cur_nxt = state.cur_val - state.nxt_val;
        let cur_mod = state.cur_val - state.nxt_mod;
        let w = 2.0 * state.weight * (1.0 - cur_nxt / cur_mod);

        debug!("  cur_nxt={} cur_mod={} w={}", cur_nxt, cur_mod, w);

        if state.step == Step::Null {
            let sgnorm = state.sgnorm;
            let lin_err = state.cur_val - state.new_cutval;
            self.eps_weight = self.eps_weight.min(sgnorm + cur_mod - sgnorm * sgnorm / state.weight);
            let new_weight = if self.iter < -3 && lin_err > self.eps_weight.max(FACTOR * cur_mod) {
                w
            } else {
                state.weight
            }.min(FACTOR * state.weight)
            .min(params.max_weight);
            if new_weight > state.weight {
                self.iter = -1
            } else {
                self.iter = min(self.iter - 1, -1);
            }

            debug!(
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
            let new_weight = if self.iter > 0 && cur_nxt > self.m_r * cur_mod {
                w
            } else if self.iter > 3 || state.nxt_val < self.model_max {
                state.weight / 2.0
            } else {
                state.weight
            }.max(state.weight / FACTOR)
                .max(params.min_weight);
            self.eps_weight = self.eps_weight.max(2.0 * cur_mod);
            if new_weight < state.weight {
                self.iter = 1;
                self.model_max = NEG_INFINITY;
            } else {
                self.iter = max(self.iter + 1, 1);
            }







|







121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
            let new_weight = if self.iter > 0 && cur_nxt > self.m_r * cur_mod {
                w
            } else if self.iter > 3 || state.nxt_val < self.model_max {
                state.weight / 2.0
            } else {
                state.weight
            }.max(state.weight / FACTOR)
            .max(params.min_weight);
            self.eps_weight = self.eps_weight.max(2.0 * cur_mod);
            if new_weight < state.weight {
                self.iter = 1;
                self.model_max = NEG_INFINITY;
            } else {
                self.iter = max(self.iter + 1, 1);
            }
Changes to src/master/boxed.rs.
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
                        0.0
                    }
                } else if ub < INFINITY {
                    ub * eta
                } else {
                    0.0
                }
            })
            .sum()
    }

    /**
     * Return $\\|G \alpha - \eta\\|_2\^2$.
     *
     * This is the norm-square of the dual optimal solution including
     * the current box-multipliers $\eta$.







<
|







157
158
159
160
161
162
163

164
165
166
167
168
169
170
171
                        0.0
                    }
                } else if ub < INFINITY {
                    ub * eta
                } else {
                    0.0
                }

            }).sum()
    }

    /**
     * Return $\\|G \alpha - \eta\\|_2\^2$.
     *
     * This is the norm-square of the dual optimal solution including
     * the current box-multipliers $\eta$.
Changes to src/mcf/problem.rs.
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
        let mut aggr = primals[0]
            .1
            .iter()
            .map(|x| {
                let mut r = dvec![];
                r.scal(primals[0].0, x);
                r
            })
            .collect::<Vec<_>>();

        for &(alpha, primal) in &primals[1..] {
            for (j, x) in primal.iter().enumerate() {
                aggr[j].add_scaled(alpha, x);
            }
        }








<
|







204
205
206
207
208
209
210

211
212
213
214
215
216
217
218
        let mut aggr = primals[0]
            .1
            .iter()
            .map(|x| {
                let mut r = dvec![];
                r.scal(primals[0].0, x);
                r

            }).collect::<Vec<_>>();

        for &(alpha, primal) in &primals[1..] {
            for (j, x) in primal.iter().enumerate() {
                aggr[j].add_scaled(alpha, x);
            }
        }

Changes to src/solver.rs.
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
                    &newvars.iter().map(|v| (v.0, v.1, v.2)).collect::<Vec<_>>(),
                    &mut |fidx, minidx, vars| {
                        problem
                            .extend_subgradient(minorants[fidx][minidx].primal.as_ref().unwrap(), vars)
                            .map(DVector)
                            .map_err(|e| e.into())
                    },
                )
                .map_err(SolverError::Master)?;
            // modify moved variables
            for (index, val) in newvars.iter().filter_map(|v| v.0.map(|i| (i, v.3))) {
                self.cur_y[index] = val;
                self.nxt_y[index] = val;
                self.nxt_d[index] = 0.0;
            }
            // add new variables







<
|







690
691
692
693
694
695
696

697
698
699
700
701
702
703
704
                    &newvars.iter().map(|v| (v.0, v.1, v.2)).collect::<Vec<_>>(),
                    &mut |fidx, minidx, vars| {
                        problem
                            .extend_subgradient(minorants[fidx][minidx].primal.as_ref().unwrap(), vars)
                            .map(DVector)
                            .map_err(|e| e.into())
                    },

                ).map_err(SolverError::Master)?;
            // modify moved variables
            for (index, val) in newvars.iter().filter_map(|v| v.0.map(|i| (i, v.3))) {
                self.cur_y[index] = val;
                self.nxt_y[index] = val;
                self.nxt_d[index] = 0.0;
            }
            // add new variables