0

I'm trying to run a DoE in parallel on a distributed code, which doesn't seem to work. Below is a simplified example that raises the same error as for the real code.

import numpy as np

from openmdao.api import IndepVarComp, Group, Problem, Component
from openmdao.core.mpi_wrap import MPI
from openmdao.drivers.latinhypercube_driver import LatinHypercubeDriver

if MPI: 
    from openmdao.core.petsc_impl import PetscImpl as impl 
    rank = MPI.COMM_WORLD.rank
else:
    from openmdao.api import BasicImpl as impl 
    rank = 0



class DistribCompSimple(Component):
    """Uses 2 procs but takes full input vars"""

    def __init__(self, arr_size=2):
        super(DistribCompSimple, self).__init__()

        self._arr_size = arr_size
        self.add_param('invar', 0.)
        self.add_output('outvec', np.ones(arr_size, float))

    def solve_nonlinear(self, params, unknowns, resids):
        if rank == 0:
            unknowns['outvec'] = params['invar'] * np.ones(self._arr_size) * 0.25 
        elif rank == 1:
            unknowns['outvec'] = params['invar'] * np.ones(self._arr_size) * 0.5

        print 'hello from rank', rank, unknowns['outvec']

    def get_req_procs(self):
        return (2, 2)


if __name__ == '__main__':

    N_PROCS = 4

    prob = Problem(impl=impl)
    root = prob.root = Group()

    root.add('p1', IndepVarComp('invar', 0.), promotes=['*'])
    root.add('comp', DistribCompSimple(2), promotes=['*'])

    prob.driver = LatinHypercubeDriver(4, num_par_doe=N_PROCS/2)

    prob.driver.add_desvar('invar', lower=-5.0, upper=5.0)

    prob.driver.add_objective('outvec')

    prob.setup(check=False)
    prob.run()

I run this with

mpirun -np 4 python lhc_driver.py

and get this error:

Traceback (most recent call last):
  File "lhc_driver.py", line 60, in <module>
    prob.run()
  File "/Users/frza/git/OpenMDAO/openmdao/core/problem.py", line 1064, in run
    self.driver.run(self)
  File "/Users/frza/git/OpenMDAO/openmdao/drivers/predeterminedruns_driver.py", line 157, in run
    self._run_par_doe(problem.root)
  File "/Users/frza/git/OpenMDAO/openmdao/drivers/predeterminedruns_driver.py", line 221, in _run_par_doe
    for case in self._get_case_w_nones(self._distrib_build_runlist()):
  File "/Users/frza/git/OpenMDAO/openmdao/drivers/predeterminedruns_driver.py", line 283, in _get_case_w_nones
    case = next(it)
  File "/Users/frza/git/OpenMDAO/openmdao/drivers/latinhypercube_driver.py", line 119, in _distrib_build_runlist
    run_list = comm.scatter(job_list, root=0)
  File "MPI/Comm.pyx", line 1286, in mpi4py.MPI.Comm.scatter (src/mpi4py.MPI.c:109079)
  File "MPI/msgpickle.pxi", line 707, in mpi4py.MPI.PyMPI_scatter (src/mpi4py.MPI.c:48114)
  File "MPI/msgpickle.pxi", line 161, in mpi4py.MPI.Pickle.dumpv (src/mpi4py.MPI.c:41605)
ValueError: expecting 4 items, got 2

I don't see a test for this use case in the latest master, so does that mean you don't yet support it or is it a bug?

4

1 回答 1

0

感谢您为此提交一个简单的测试用例。我最近添加了并行 DOE 的东西,却忘了用分布式组件对其进行测试。为此,我将在我们的错误跟踪器中添加一个故事,并希望尽快修复它。

于 2016-04-12T15:11:34.337 回答