1

问了一个几乎 类似的问题,但在 OpenMDAO 中实现了子问题类来解决这个问题,但在我的情况下似乎不起作用

我正在尝试解决 CO 架构中的 Sellar,从 1.7.3 版本和 sellar 类的子问题示例开始它运行但不收敛。我的猜测是它来自每个优化的初始值(总是从“冷”开始重新启动)

如果有人可以提供帮助,我将不胜感激!这是代码(我想我可以使用变量提升使其更紧凑,但我有点害怕在调试时迷路:-))

class CO1(Group):
"""

"""
def __init__(self):
    super(CO1, self).__init__()
    self.add('indep1x', IndepVarComp('x', 0.0))
    self.add('indep1z', IndepVarComp('z', np.array([5.0, 2.0])))

    self.add('indep1y2', IndepVarComp('y2', 10.0))
    self.add('indep1zt', IndepVarComp('zt', np.array([5.0, 2.0])))
    self.add('indep1xt', IndepVarComp('xt', 0.0))
    self.add('indep1y2t', IndepVarComp('y2t', 10.0))
    self.add('indep1y1t', IndepVarComp('y1t', 10.0))

    self.add('d1', SellarDis1withDerivatives())
    self.connect('indep1z.z', 'd1.z')
    self.connect('indep1x.x', 'd1.x')
    self.connect('indep1y2.y2', 'd1.y2')

    self.add('obj1',ExecComp('obj1 = (zt[0] - z[0])**2 +(zt[1] - z[1])**2 + (xt - x)**2 +(y1t - y1)**2 + (y2t - y2)**2'
                                 , z=np.array([5.0, 2.0]),zt=np.array([0.0, 0.0]), x=0.0, y1=10.0, y2=10.0,xt=0.0, y1t=10.0, y2t=10.0), promotes=['obj1'])
    self.connect('d1.z','obj1.z')        
    self.connect('d1.x','obj1.x')  
    self.connect('d1.y2','obj1.y2')  
    self.connect('d1.y1','obj1.y1') 

    self.connect('indep1zt.zt', "obj1.zt")
    self.connect('indep1xt.xt', "obj1.xt")
    self.connect('indep1y2t.y2t', "obj1.y2t")
    self.connect('indep1y1t.y1t', "obj1.y1t")

    self.add('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1'])
    self.connect('d1.y1', 'con_cmp1.y1')  


class CO2(Group):
"""

"""
def __init__(self):
    super(CO2, self).__init__()

    self.add('indep2z', IndepVarComp('z', np.array([5.0, 2.0])))
    self.add('indep2y1', IndepVarComp('y1', 10.0))
    self.add('indep2zt', IndepVarComp('zt', np.array([5.0, 2.0])))
    self.add('indep2y2t', IndepVarComp('y2t', 10.0))
    self.add('indep2y1t', IndepVarComp('y1t', 10.0))

    self.add('d2', SellarDis2withDerivatives())
    self.connect("indep2z.z", "d2.z")
    self.connect("indep2y1.y1", "d2.y1")

    self.add('obj2',ExecComp('obj2 = (zt[0] - z[0])**2 +(zt[1] - z[1])**2 + (y1t - y1)**2 +(y2t - y2)**2'
                                 ,z=np.array([5.0, 2.0]),zt=np.array([0.0, 0.0]),  y1=10.0, y2=10.0, y1t=10.0, y2t=10.0), promotes=['obj2'] )
    self.connect('d2.z','obj2.z')          
    self.connect('d2.y2','obj2.y2')  
    self.connect('d2.y1','obj2.y1') 

    self.connect("indep2zt.zt", "obj2.zt")
    self.connect("indep2y2t.y2t", "obj2.y2t")
    self.connect("indep2y1t.y1t", "obj2.y1t")      


    self.add('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2'])
    self.connect('d2.y2', 'con_cmp2.y2') 

#######################################
一氧化碳测试用例#
################################################
# First, define a Problem to be able to optimize Discipline 1.
################################################


sub1 = Problem(root=CO1())

# set up our SLSQP optimizer
sub1.driver = ScipyOptimizer()
sub1.driver.options['optimizer'] = 'SLSQP'
#sub1.driver.options['disp'] = False  # disable optimizer output


sub1.driver.add_desvar("indep1z.z",  lower=np.array([-10.0,0.0]), upper=np.array([10.0,10.0]))
sub1.driver.add_desvar("indep1x.x", lower=0.0, upper=10.0)
sub1.driver.add_desvar("indep1y2.y2", lower=-100.0, upper=100.0)

# We are minimizing comp.fx, so that's our objective.
sub1.driver.add_objective("obj1")
sub1.driver.add_constraint('con1', upper=0.0)

################################################
# Second, define a Problem to be able to optimize Discipline 2.
################################################
sub2 = Problem(root=CO2())

# set up our SLSQP optimizer
sub2.driver = ScipyOptimizer()
sub2.driver.options['optimizer'] = 'SLSQP'
#sub2.driver.options['disp'] = False  # disable optimizer output

sub2.driver.add_desvar("indep2z.z",  lower=np.array([-10.0,0.0]), upper=np.array([10.0,10.0]))
sub2.driver.add_desvar("indep2y1.y1", lower=-100.0, upper=100.0)

# We are minimizing comp.fx, so that's our objective.
sub2.driver.add_objective("obj2")
sub2.driver.add_constraint('con2', upper=0.0)

################################################
# Thirs, create our top level problem
################################################
prob = Problem(root=Group())

prob.root.add("top_indepxt", IndepVarComp('xt', 0.0))
prob.root.add("top_indepzt", IndepVarComp('zt',  np.array([5.0, 2.0])))
prob.root.add("top_indepy1t", IndepVarComp('y1t', 10.0))
prob.root.add("top_indepy2t", IndepVarComp('y2t',  10.0))


prob.root.add("subprob1", SubProblem(sub1, params=['indep1z.z','indep1x.x','indep1y2.y2','indep1xt.xt','indep1zt.zt','indep1y1t.y1t','indep1y2t.y2t'],
                                    unknowns=['obj1','con1' ,'d1.y1']))
prob.root.add("subprob2", SubProblem(sub2, params=['indep2z.z','indep2y1.y1','indep2zt.zt','indep2y1t.y1t','indep2y2t.y2t'],
                                    unknowns=['obj2','con2','d2.y2' ]))

prob.root.connect("top_indepxt.xt", "subprob1.indep1xt.xt")
prob.root.connect("top_indepzt.zt", "subprob1.indep1zt.zt")
prob.root.connect("top_indepy1t.y1t", "subprob1.indep1y1t.y1t")
prob.root.connect("top_indepy2t.y2t", "subprob1.indep1y2t.y2t")

prob.root.connect("top_indepzt.zt", "subprob2.indep2zt.zt")
prob.root.connect("top_indepy1t.y1t", "subprob2.indep2y1t.y1t")
prob.root.connect("top_indepy2t.y2t", "subprob2.indep2y2t.y2t") 

prob.driver=ScipyOptimizer()
prob.driver.options['optimizer']='SLSQP'

prob.driver.add_desvar('top_indepzt.zt', lower=np.array([-10.0,0.0]), upper=np.array([10.0,10.0]))
prob.driver.add_desvar('top_indepxt.xt',lower=0.0, upper=10.0)
prob.driver.add_desvar('top_indepy1t.y1t',lower=-100.0, upper=100.0)
prob.driver.add_desvar('top_indepy2t.y2t',lower=-100.0, upper=100.0)

prob.root.add('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
                                 z=np.array([5.0, 2.0]), x=0.0),
             promotes=['obj'])
prob.root.connect("top_indepzt.zt", "obj_cmp.z")
prob.root.connect("top_indepxt.xt", "obj_cmp.x")
prob.root.connect("top_indepy1t.y1t", "obj_cmp.y1")
prob.root.connect("top_indepy2t.y2t", "obj_cmp.y2")

prob.driver.add_objective('obj')

prob.root.add('con1_cmpt',ExecComp('con1t = (zt[0] - z[0])**2 +' 
                                 '(zt[1] - z[1])**2 + '
                                 '(xt - x)**2 + '
                                 '(y1t - y1)**2 + '
                                 '(y2t - y2)**2', z=np.array([5.0, 2.0]),zt=np.array([5.0, 2.0]), x=0.0, y1=10.0, y2=10.0,xt=0.0, y1t=10.0, y2t=10.0), promotes=['con1t'])
prob.root.connect("top_indepzt.zt", "con1_cmpt.zt")
prob.root.connect("top_indepxt.xt", "con1_cmpt.xt")
prob.root.connect("top_indepy1t.y1t", "con1_cmpt.y1t")
prob.root.connect("top_indepy2t.y2t", "con1_cmpt.y2t")    

prob.root.connect("subprob1.indep1z.z", "con1_cmpt.z")
prob.root.connect("subprob1.indep1x.x", "con1_cmpt.x")
prob.root.connect("subprob1.d1.y1", "con1_cmpt.y1")
prob.root.connect("subprob1.indep1y2.y2", "con1_cmpt.y2")    

prob.driver.add_constraint('con1t', upper=0.0)

prob.root.add('con2_cmpt',ExecComp('con2t = (zt[0] - z[0])**2 +' 
                                 '(zt[1] - z[1])**2 + '
                                 '(xt - x)**2 + '
                                 '(y1t - y1)**2 + '
                                 '(y2t - y2)**2', z=np.array([5.0, 2.0]),zt=np.array([5.0, 2.0]), x=0.0, y1=10.0, y2=10.0,xt=0.0, y1t=0.0, y2t=10.0), promotes=['con2t'])
prob.root.connect("top_indepzt.zt", "con2_cmpt.zt")

prob.root.connect("top_indepy1t.y1t", "con2_cmpt.y1t")
prob.root.connect("top_indepy2t.y2t", "con2_cmpt.y2t")    

prob.root.connect("subprob2.indep2z.z", "con2_cmpt.z")

prob.root.connect("subprob2.indep2y1.y1", "con2_cmpt.y1")
prob.root.connect("subprob2.d2.y2", "con2_cmpt.y2")    

prob.driver.add_constraint('con2t', upper=0.0)    


prob.setup()

# run the concurrent optimizations
prob.run() 

print("\n")
print("Design var at convergence (%f,%f,%f)"% (prob['top_indepzt.zt'][0],prob['top_indepzt.zt'][1],prob['top_indepxt.xt']))
print("Coupling var at convergence (%f,%f) "% (prob['top_indepy1t.y1t'],prob['top_indepy2t.y2t']))
print("Objective and constraints at (%f, %f,%f)"% (prob['obj'],prob['con1t'],prob['con2t']))

print("Sub1 : Design var at convergence (%f,%f,%f)"% (prob['subprob1.indep1z.z'][0],prob['subprob1.indep1z.z'][1],prob['subprob1.indep1x.x']))
print("Sub2 : Design var at convergence (%f,%f)"% (prob['subprob2.indep2z.z'][0],prob['subprob2.indep2z.z'][1]))
4

1 回答 1

0

我已经在 OpenMDAO 2.0 中针对这个问题实现了一些合理的解决方案。正常工作有点棘手,最值得注意的问题是我无法在顶级和子优化上使用 ScipyOptimizer,因为它似乎不可重入。

上述问题中的另一个技巧是,您必须创建其中包含子问题的组件。这意味着您在 openmdao 中运行了 openmdao。它不是世界上最有效的设置,并且存在数值挑战,因为您最终会围绕优化进行有限差分。从理论上讲,可以实现后最优性敏感性以在这里获得更有效的导数。

注意:正如对 CO 的预期,收敛特性很糟糕。以这种方式解决塞拉问题是非常低效的。但它显示了在 OpenMDAO 2.0 中设置 MDAO 架构的粗略方法

import numpy as np

from openmdao.api import ExplicitComponent, ImplicitComponent, Group, IndepVarComp, ExecComp


class SellarDis1(ExplicitComponent):


    def setup(self):

        # Global Design Variable
        self.add_input('z', val=np.zeros(2))

        # Local Design Variable
        self.add_input('x', val=0.)

        # Coupling parameter
        self.add_input('y2', val=1.0)

        # Coupling output
        self.add_output('y1', val=1.0)

        # Finite difference all partials.
        self.declare_partials('*', '*')

    def compute(self, inputs, outputs):

        z1 = inputs['z'][0]
        z2 = inputs['z'][1]
        x1 = inputs['x']
        y2 = inputs['y2']

        outputs['y1'] = z1**2 + z2 + x1 - 0.2*y2

    def compute_partials(self, inputs, partials):
        """
        Jacobian for Sellar discipline 1.
        """
        partials['y1', 'y2'] = -0.2
        partials['y1', 'z'] = np.array([[2.0 * inputs['z'][0], 1.0]])
        partials['y1', 'x'] = 1.0


class SellarDis2(ExplicitComponent):


    def setup(self):
        # Global Design Variable
        self.add_input('z', val=np.zeros(2))

        # Coupling parameter
        self.add_input('y1', val=1.0)

        # Coupling output
        self.add_output('y2', val=1.0)

        # Finite difference all partials.
        self.declare_partials('*', '*')

    def compute(self, inputs, outputs):


        z1 = inputs['z'][0]
        z2 = inputs['z'][1]
        y1 = inputs['y1']

        # Note: this may cause some issues. However, y1 is constrained to be
        # above 3.16, so lets just let it converge, and the optimizer will
        # throw it out
        if y1.real < 0.0:
            y1 *= -1

        outputs['y2'] = y1**.5 + z1 + z2

    def compute_partials(self, inputs, J):
        """
        Jacobian for Sellar discipline 2.
        """
        y1 = inputs['y1']
        if y1.real < 0.0:
            y1 *= -1

        J['y2', 'y1'] = .5*y1**-.5
        J['y2', 'z'] = np.array([[1.0, 1.0]])


class SubOpt1(ExplicitComponent):
    ''' minimize differences between target and local variables of the first discipline of the sellar problem '''

    def setup(self):
        self.add_input('z', val=np.array([5.0, 2.0]))
        self.add_input('x_hat', val=1.)
        self.add_input('y1_hat', val=1)
        self.add_input('y2_hat', val=1)

        self.add_output('y1', val=1.0)
        self.add_output('z_hat', val=np.ones(2))
        self.add_output('x', val=1.0)

        # using FD to get derivatives across the sub-optimization
        # note: the sub-optimization itself is using analytic derivatives
        self.declare_partials('y1', ['z', 'x_hat', 'y1_hat', 'y2_hat'], method='fd', step=1e-4, step_calc='abs')
        self.declare_partials('z_hat', ['z', 'x_hat', 'y1_hat', 'y2_hat'], method='fd', step=1e-4, step_calc='abs')
        self.declare_partials('x', ['z', 'x_hat', 'y1_hat', 'y2_hat'], method='fd', step=1e-4, step_calc='abs')


        self.prob = p = Problem()

        # have to define these copies so that OpenMDAO can compute derivs wrt these variables
        params = p.model.add_subsystem('params', IndepVarComp(), promotes=['*'])
        params.add_output('z', val=np.ones(2))
        params.add_output('x_hat', val=1.)
        params.add_output('y1_hat', val=1.)
        params.add_output('y2_hat', val=1.)

        des_vars = p.model.add_subsystem('des_vars', IndepVarComp(), promotes=['*'])
        des_vars.add_output('z_hat', val=np.array([5.0, 2.0]))
        des_vars.add_output('x', val=1.)

        p.model.add_subsystem('d1', SellarDis1())

        # using (global-local)**2 ordering
        p.model.add_subsystem('J', ExecComp('f = sum((z-z_hat)**2) + (x_hat-x)**2 +(y1_hat-y1)**2', z=np.zeros(2), z_hat=np.zeros(2)))
        p.model.add_subsystem('con', ExecComp('c = 3.16 - y1'))

        # data connections in the !!!sub-problem!!!
        p.model.connect('z', 'J.z')
        p.model.connect('x_hat', 'J.x_hat')
        p.model.connect('y2_hat', 'd1.y2')
        p.model.connect('y1_hat', 'J.y1_hat')

        p.model.connect('d1.y1', ['J.y1','con.y1'])
        p.model.connect('z_hat', ['J.z_hat', 'd1.z'])
        p.model.connect('x', ['J.x','d1.x'])

        p.driver = ScipyOptimizer()
        p.driver.options['optimizer'] = 'SLSQP'
        p.driver.options['maxiter'] = 100
        p.driver.options['tol'] = 1e-8

        p.model.add_design_var('x', lower=0, upper=10)
        p.model.add_design_var('z_hat', lower=-10.0, upper=10)
        p.model.add_objective('J.f')
        p.model.add_constraint('con.c', upper=0)

        p.setup()
        p.final_setup()


    def compute(self, inputs, outputs): 

        p = self.prob
        # push any global inputs down, using full absolute path names
        p['y2_hat'] = inputs['y2_hat'] 
        p['z'] = inputs['z'] 
        p['x_hat'] = inputs['x_hat'] 
        p['y1_hat'] = inputs['y1_hat'] 

        #run the optimization 
        print('subopt 1 solve')
        # print('    ', inputs['z'], inputs['x_hat'], inputs['y1_hat'], inputs['y2_hat'], outputs['y1'], outputs['z_hat'])
        p.run_driver()

        # pull the values back up into the output array
        outputs['y1'] = p['d1.y1']
        outputs['z_hat'] = p['z_hat']
        outputs['x'] = p['x']



class SubOpt2(ExplicitComponent):
    ''' minimize differences between target and local variables of the second discipline of the sellar problem '''

    def setup(self):
        self.add_input('z', val=np.array([5.0, 2.0]))
        self.add_input('y1_hat', val=1)
        self.add_input('y2_hat', val=1)

        self.add_output('y2', val=1.0)
        self.add_output('z_hat', val=np.ones(2))


        # using FD to get derivatives across the sub-optimization
        # note: the sub-optimization itself is using analytic derivatives
        self.declare_partials('y2', ['z', 'y1_hat', 'y2_hat'], method='fd', step=1e-4, step_calc='abs')
        self.declare_partials('z_hat', ['z', 'y1_hat', 'y2_hat'], method='fd', step=1e-4, step_calc='abs')

        self.prob = p = Problem()

        # have to define these copies so that OpenMDAO can compute derivs wrt these variables
        params = p.model.add_subsystem('params', IndepVarComp(), promotes=['*'])
        params.add_output('z', val=np.ones(2))
        params.add_output('y1_hat', val=1.)
        params.add_output('y2_hat', val=1.)

        des_vars = p.model.add_subsystem('des_vars', IndepVarComp(), promotes=['*'])
        des_vars.add_output('z_hat', val=np.array([5.0, 2.0]))

        p.model.add_subsystem('d2', SellarDis2())

        # using (global-local)**2 ordering
        p.model.add_subsystem('J', ExecComp('f = sum((z-z_hat)**2) + (y2_hat-y2)**2', z=np.zeros(2), z_hat=np.zeros(2)))
        p.model.add_subsystem('con', ExecComp('c = y2 - 24.0'))

        # data connections in the !!!sub-problem!!!
        p.model.connect('y1_hat', 'd2.y1')
        p.model.connect('z', 'J.z')
        p.model.connect('y2_hat', 'J.y2_hat')

        p.model.connect('d2.y2', ['J.y2','con.y2'])
        p.model.connect('z_hat', ['J.z_hat', 'd2.z'])

        p.driver = ScipyOptimizer()
        p.driver.options['optimizer'] = 'SLSQP'
        p.driver.options['maxiter'] = 100
        p.driver.options['tol'] = 1e-8

        p.model.add_design_var('z_hat', lower=-10.0, upper=10)
        p.model.add_objective('J.f')
        p.model.add_constraint('con.c', upper=0)

        p.setup()
        p.final_setup()


    def compute(self, inputs, outputs): 

        p = self.prob
        # push any global inputs down, using full absolute path names
        p['y1_hat'] = inputs['y1_hat'] 
        p['z'] = inputs['z'] 
        p['y2_hat'] = inputs['y2_hat'] 

        #run the optimization 
        print('subopt 2 solve')
        p.run_driver()

        # pull the values back up into the output array
        outputs['y2'] = p['d2.y2']
        outputs['z_hat'] = p['z_hat']
        # print('    ', inputs['z'], inputs['y1_hat'], inputs['y2_hat'], outputs['y2'], outputs['z_hat'])



class SellarCO(Group):
    ''' optimize top objective function of the sellar problem with the target variables '''

    def setup(self): 


        des_vars = self.add_subsystem('des_vars', IndepVarComp(), promotes=['*'])

        des_vars.add_output('z', val=np.array([5.0, 2.0]))
        des_vars.add_output('x_hat', val=1)

        des_vars.add_output('y1_hat', val=1)
        des_vars.add_output('y2_hat', val=2.5)

        self.add_subsystem('subopt_1', SubOpt1())
        self.add_subsystem('subopt_2', SubOpt2())

        self.add_subsystem('J', ExecComp('c = (sum((z-z1_hat)**2) + sum((z-z2_hat)**2) + (x_hat-x) + (y1_hat-y1)**2 + (y2_hat-y2)**2)**.5', 
                                         z=np.zeros(2), z1_hat=np.zeros(2), z2_hat=np.zeros(2)))
        self.add_subsystem('obj', ExecComp('f = x_hat**2 + z[1] + y1_hat + exp(-y2_hat)', z=np.zeros(2)))

        self.connect('z', ['subopt_1.z', 'subopt_2.z', 'obj.z', 'J.z'])
        self.connect('x_hat', ['obj.x_hat', 'J.x_hat', 'subopt_1.x_hat'])
        self.connect('y1_hat', ['subopt_1.y1_hat', 'subopt_2.y1_hat', 'J.y1_hat', 'obj.y1_hat'])
        self.connect('y2_hat', ['subopt_1.y2_hat', 'subopt_2.y2_hat', 'J.y2_hat', 'obj.y2_hat'])

        self.connect('subopt_1.z_hat', 'J.z1_hat')
        self.connect('subopt_1.y1', 'J.y1')
        self.connect('subopt_1.x', 'J.x')
        self.connect('subopt_2.z_hat', 'J.z2_hat')
        self.connect('subopt_2.y2', 'J.y2')




if __name__ == '__main__':

    from openmdao.api import Problem, ScipyOptimizer, pyOptSparseDriver

    prob = Problem()
    prob.model = SellarCO()

    prob.driver = pyOptSparseDriver() 
    prob.driver.options['optimizer'] = 'SNOPT'
    prob.driver.opt_settings['Major optimality tolerance'] = 1e-1
    prob.driver.opt_settings['Major feasibility tolerance'] = 1e-3

    prob.model.add_design_var('z', lower=np.array([-10.0, 0.0]),upper=np.array([10.0, 10.0]))
    prob.model.add_design_var('x_hat', lower=0.0, upper=10.0)
    prob.model.add_design_var('y1_hat', lower=-10.0, upper=10.0)
    prob.model.add_design_var('y2_hat', lower=-10.0, upper=10.0)

    prob.model.add_objective('obj.f')
    prob.model.add_constraint('J.c', upper=0.005)

    prob.setup()

    prob.run_driver()

    print("\n")
    print( "Minimum target found at (%f, %f, %f)" % (prob['z'][0], prob['z'][1], prob['x_hat']))

    print("Coupling vars target: %f, %f" % (prob['y1_hat'], prob['y2_hat']))
    print("Minimum objective: ", prob['obj.f'])
    # print("constraints: ", prob['con1'] , prob['con2'])
于 2018-01-23T02:27:31.583 回答