RsBundle  Diff

Differences From Artifact [471f4c10be]:

  • File src/mpi/problem.rs — part of check-in [cc34a43d31] at 2023-07-08 10:58:54 on branch mpi — mpi: single worker, multiple evaluations (user: fifr size: 11789)

To Artifact [013b9bf6a9]:

  • File src/mpi/problem.rs — part of check-in [a3c43e66ab] at 2023-07-08 17:44:09 on branch mpi — mpi::problem: support multiple processes per worker (user: fifr size: 13049)

17
18
19
20
21
22
23
24

25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

40
41
42
43
44
45
46
17
18
19
20
21
22
23

24
25
26
27

28
29
30
31

32
33
34

35

36
37
38
39
40
41
42
43







-
+



-




-



-

-
+








use super::msg::{ResultMsg, WorkerMsg};
use crate::problem::{
    FirstOrderProblem, ResultSender, SubgradientExtender, UpdateSendError, UpdateSender, UpdateState,
};
use crate::{DVector, Minorant, Real};

use log::{debug, error, info};
use log::{error, info};
use mpi::environment::Universe;
use mpi::topology::{Communicator, SystemCommunicator};
use mpi::Rank;
use num_traits::ToPrimitive;
use serde::{Deserialize, Serialize};
use std::sync::mpsc::{channel, Sender};
use std::sync::{RwLock, RwLockReadGuard};
use thiserror::Error;
use threadpool::ThreadPool;

use crate::mpi::msg::{recv_msg, send_msg};
use serde::de::DeserializeOwned;
use std::collections::VecDeque;
use std::sync::Arc;
use std::time::Instant;
use std::thread;

/// Error raised by the MPI [`Problem`].
#[derive(Debug, Error, Serialize, Deserialize)]
#[allow(clippy::upper_case_acronyms)]
pub enum Error<E> {
    /// MPI error.
    #[error("MPI error")]
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130

131
132
133
134

135
136
137
138
139
140
141
142
143
144
145
146

147
148
149
150
151
152
153
154
155

156
157
158
159
160
161
162
107
108
109
110
111
112
113







114
115
116
117
118
119
120
121
122
123
124

125
126
127
128
129
130
131
132
133
134
135
136

137
138
139
140
141
142
143
144
145

146
147
148
149
150
151
152
153







-
-
-
-
-
-
-







+



-
+











-
+








-
+







}

type ClientMessage<P> = (
    WorkerMsg<<P as DistributedFirstOrderProblem>::Update>,
    Option<Box<dyn ResultSender<Problem<P>> + 'static>>,
);

struct MPIData<P: DistributedFirstOrderProblem> {
    nclients: Rank,
    free_clients: VecDeque<Rank>,
    next_client: Rank,
    client_txs: Vec<Sender<ClientMessage<P>>>,
}

/// The first order problem for the mpi end-point on the main node.
///
/// This is the problem called from the bundle algorithm. It does not
/// solve the subproblems directly but transfers the requests to MPI
/// worker nodes. Use [`Problem`] to wrap a regular (non-MPI) first
/// order problem.
pub struct Problem<P: DistributedFirstOrderProblem> {
    #[allow(dead_code)]
    universe: Universe,
    problem: Arc<RwLock<P>>,

    mpidata: Option<MPIData<P>>,
    compute_tx: Option<Sender<ClientMessage<P>>>,
}

impl<P: DistributedFirstOrderProblem> Problem<P> {
    /// Return a reference to the underlying problem.
    pub fn problem(&self) -> RwLockReadGuard<P> {
        self.problem.read().unwrap()
    }
}

impl<P: DistributedFirstOrderProblem> Drop for Problem<P> {
    fn drop(&mut self) {
        self.mpidata.take();
        self.compute_tx.take();
    }
}

impl<P: DistributedFirstOrderProblem> Problem<P> {
    pub fn new(universe: Universe, problem: P) -> Self {
        Problem {
            universe,
            problem: Arc::new(RwLock::new(problem)),
            mpidata: None,
            compute_tx: None,
        }
    }
}

impl<P: DistributedFirstOrderProblem + 'static> FirstOrderProblem for Problem<P>
where
    P::Minorant: for<'a> Deserialize<'a>,
183
184
185
186
187
188
189

190

191
192
193


194
195

196
197
198
199
200
201
202



203
204
205
206
207
208

209
210
211
212
213
214
215
216
217
218
219
220













221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236

































237
238
239
240
241
242



















243
244
245
246









247
248

249
250
251
252
253








254
255
256






257


258
259
260
261
262
263

264
265
266
267
268
269
270
271

272
273
274
275

276
277
278
279
280
281
282
283
284

285
286
287
288
289
290
291
292
293
294
295
296
297
298

299
300
301
302

303
304
305
306
307
308
309
310
311
312
313
314

315
316
317
318
319
320
321
322
323
174
175
176
177
178
179
180
181

182



183
184


185



186



187
188
189
190





191












192
193
194
195
196
197
198
199
200
201
202
203
204
205















206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238






239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257




258
259
260
261
262
263
264
265
266
267

268





269
270
271
272
273
274
275
276



277
278
279
280
281
282
283
284
285
286
287
288
289

290
291
292
293
294
295
296
297
298

299
300
301
302

303
304
305







306

307
308
309
310
311
312
313
314
315
316
317
318

319
320
321
322

323
324
325

326
327
328
329
330




331


332
333
334
335
336
337
338







+
-
+
-
-
-
+
+
-
-
+
-
-
-

-
-
-
+
+
+

-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+

-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
+
+
+
+
+

-
+
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
-
+
+
+
+
+
+

+
+




-

+







-
+



-
+


-
-
-
-
-
-
-
+
-












-
+



-
+


-





-
-
-
-
+
-
-







    }

    fn num_subproblems(&self) -> usize {
        self.problem.read().unwrap().num_subproblems()
    }

    fn start(&mut self) {
        let nsubs = {
        self.problem.write().unwrap().start();
            let mut p = self.problem.write().unwrap();
        if self.mpidata.is_none() {
            let world = self.universe.world();
            let free_clients = (1..world.size()).collect();
            p.start();
            p.num_subproblems()
            let pool = ThreadPool::new(world.size().to_usize().unwrap());

        };
            let client_txs = (1..world.size())
                .map(|rank| {
                    let (tx, rx) = channel::<ClientMessage<P>>();

                    pool.execute(move || {
                        let world = SystemCommunicator::world();
                        let client = world.process_at_rank(rank);
        if self.compute_tx.is_none() {
            let world = SystemCommunicator::world();
            let nworkers = world.size() - 1;

                        while let Ok((msg, result_tx)) = rx.recv() {
                            let start_time = Instant::now();
                            // send evaluation point
                            send_msg(&client, &msg);

            let (compute_tx, compute_rx) = channel::<ClientMessage<P>>();
                            if let WorkerMsg::ApplyUpdate(_) = msg {
                                // no response expected
                                // TODO: this might be bad because `ApplyUpdate` could fail, hence
                                // an error-message is sent
                                continue;
                            }

                            let result_tx = result_tx.unwrap();

                            // wait for response
                            loop {
                                let msg = recv_msg(&client);

            // First start one receiver thread for each worker. Each thread gets the result-channels
            // (for the algorithm) via another channel.
            let result_txs = (0..nworkers)
                .map(|client_idx| {
                    let rank = client_idx as Rank + 1;
                    let (req_tx, req_rx) = channel::<(usize, Box<dyn ResultSender<Self>>)>();
                    thread::spawn(move || {
                        let world = SystemCommunicator::world();
                        let worker = world.process_at_rank(rank);
                        let mut result_txs = (0..nsubs).map(|_| None).collect::<Vec<_>>();
                        loop {
                            let msg = recv_msg(&worker);

                                match msg {
                                    ResultMsg::ObjectiveValue { value, .. } => result_tx.objective(value).unwrap(),
                                    ResultMsg::Minorant { minorant, .. } => result_tx.minorant(minorant).unwrap(),
                                    ResultMsg::Done { index, .. } => {
                                        debug!(
                                            "Worker index:{} time:{}",
                                            index,
                                            start_time.elapsed().as_millis() as f64 / 1000.0
                                        );
                                        break;
                                    }
                                    ResultMsg::Error { error, .. } => {
                                        result_tx.error(Error::OracleError(error)).unwrap()
                                    }
                                    ResultMsg::Terminate => {
                            let index = match msg {
                                ResultMsg::ObjectiveValue { index, .. } => index,
                                ResultMsg::Minorant { index, .. } => index,
                                ResultMsg::Done { index, .. } => index,
                                ResultMsg::Error { index, .. } => index,
                                ResultMsg::Terminate => break,
                            };

                            while result_txs[index].is_none() {
                                let (i, tx) = req_rx.recv().expect("Result receiver failed");
                                result_txs[i] = Some(tx);
                            }

                            let tx = result_txs[index].as_mut().unwrap();

                            match msg {
                                ResultMsg::ObjectiveValue { value, .. } => tx.objective(value).unwrap(),
                                ResultMsg::Minorant { minorant, .. } => tx.minorant(minorant).unwrap(),
                                ResultMsg::Done { index, .. } => {
                                    // debug!(
                                    //     "Worker index:{} time:{}",
                                    //     index,
                                    //     start_time.elapsed().as_millis() as f64 / 1000.0
                                    // );
                                    // Remove (i.e. close) the result sender.
                                    result_txs[index].take();
                                }
                                ResultMsg::Error { index, error } => {
                                    tx.error(Error::OracleError(error)).unwrap();
                                    // we also close the result sender on error
                                    result_txs[index].take();
                                }
                                ResultMsg::Terminate => unreachable!(),
                                        error!("Unexpected termination message");
                                    }
                                }
                            }
                        }

                            }
                        }
                    });
                    req_tx
                })
                .collect::<Vec<_>>();

            // This thread forwards computation requests to the workers.
            thread::spawn(move || {
                let world = SystemCommunicator::world();
                let mut next_rank = 1;
                while let Ok((msg, result_tx)) = compute_rx.recv() {
                    match msg {
                        WorkerMsg::Terminate | WorkerMsg::ApplyUpdate(..) => {
                            // send message to all workers
                            for rank in 1..=nworkers {
                                let worker = world.process_at_rank(rank);
                                send_msg(&worker, &msg);
                            }
                        info!("Terminate worker thread {}", rank);

                        send_msg(&client, &WorkerMsg::<P::Update>::Terminate);
                    });
                            // no response expected
                            // TODO: this might be bad because `ApplyUpdate` could fail, hence
                            // an error-message is sent
                        }
                        WorkerMsg::Evaluate { i, .. } => {
                            let rank = next_rank;
                            next_rank = (next_rank % nworkers) + 1;

                            //let start_time = Instant::now();

                    tx
                            // update result_tx
                })
                .collect();

            self.mpidata = Some(MPIData {
                nclients: world.size() - 1,
                            result_txs[rank as usize - 1].send((i, result_tx.unwrap())).unwrap();
                            let worker = world.process_at_rank(rank);
                            send_msg(&worker, &msg);
                        }
                    }
                }

                // stop all workers
                free_clients,
                next_client: 1,
                client_txs,
                for rank in 1..=nworkers {
                    let worker = world.process_at_rank(rank);
                    send_msg(&worker, &WorkerMsg::<P::Update>::Terminate);
                }

                info!("Computation relay terminated");
            });

            self.compute_tx = Some(compute_tx);
        }
    }

    fn stop(&mut self) {
        self.mpidata.take();
        self.problem.write().unwrap().stop();
        self.compute_tx.take();
    }

    fn evaluate<S>(&mut self, i: usize, y: Arc<DVector>, tx: S) -> Result<(), Self::Err>
    where
        S: ResultSender<Self> + 'static,
        Self: Sized,
    {
        if self.mpidata.is_none() {
        if self.compute_tx.is_none() {
            self.start()
        }

        let mpidata = self.mpidata.as_mut().unwrap();
        let compute_tx = self.compute_tx.as_mut().unwrap();

        // get client
        let rank = mpidata.free_clients.pop_front().unwrap_or_else(|| {
            let r = mpidata.next_client;
            let n = mpidata.nclients; // rank 0 is this node
            mpidata.next_client = (mpidata.next_client % n) + 1;
            r
        }) as usize;

        compute_tx
        mpidata.client_txs[rank - 1]
            .send((WorkerMsg::Evaluate { i, y }, Some(Box::new(tx))))
            .unwrap();

        Ok(())
    }

    fn update<U, S>(&mut self, state: U, tx: S) -> Result<(), Self::Err>
    where
        U: UpdateState<<Self::Minorant as Minorant>::Primal>,
        S: UpdateSender<Self> + 'static,
        Self: Sized,
    {
        if self.mpidata.is_none() {
        if self.compute_tx.is_none() {
            self.start()
        }

        let mpidata = self.mpidata.as_mut().unwrap();
        let compute_tx = self.compute_tx.clone().unwrap();

        let problem = self.problem.clone();
        let client_txs = mpidata.client_txs.clone();
        self.problem
            .write()
            .unwrap()
            .compute_update(state, move |update| {
                let update = Arc::new(update);
                let world = SystemCommunicator::world();
                for rank in 1..world.size() as usize {
                    client_txs[rank - 1]
                        .send((WorkerMsg::ApplyUpdate(update.clone()), None))
                compute_tx.send((WorkerMsg::ApplyUpdate(update.clone()), None)).unwrap();
                        .unwrap()
                }
                let mut problem = problem.write().unwrap();
                problem.apply_update(&update)?;
                problem.send_update(&update, WorkerUpdateSender(tx))?;

                Ok(())
            })
            .map_err(Error::OracleError)?;