3

我最近从 celerite 切换到 celerite2 来建模恒星光曲线。我密切关注celerite 2 教程,根据我的需要进行调整。我的模型由一个平面均值、一个抖动项、一个粒化项和一个 SHO 项组成,用于模拟非径向振荡。

数据输入:

观察结果是通过以下方式从 lightkurve.KeplerLightCurve 对象转换而来的:

import numpy as np
import lightkurve as lk

import pymc3 as pm
import theano.tensor as tt
import theano

import celerite2
from celerite2.theano import terms

datalist = lk.search_lightcurvefile('KIC005002732', cadence='long')
data = datalist[2:3].download_all()
lc = data.stitch().remove_outliers().remove_nans()
lc = lc[0:2001] # cut lightcurve

# Convert to ppm
lc.flux = (lc.flux - 1) * 1e6
lc.flux_err *= 1e6

#Find numax prior:
pg = lc.to_periodogram(method='lombscargle', normalization='psd',
                      minimum_frequency=50, maximum_frequency=300, oversample_factor = 3)
snr = pg.flatten()
seis = snr.to_seismology()
numax_guess = seis.estimate_numax(window_width = 1)

time =     np.ascontiguousarray(lc.time, dtype=np.float64)
flux =     np.ascontiguousarray(lc.flux, dtype=np.float64)
flux_err = np.ascontiguousarray(lc.flux_err, dtype=np.float64)

先验:

先验 (prior_*_mprior_*_sd) 完全是 numpy float64 标量。

# MEAN FUNCTION
prior_mean_m  = np.float64(0.0)
prior_mean_sd = np.float64(np.std(flux))

# JITTER TERM
prior_jitter_m  = np.float64(10)
prior_jitter_sd = np.float64(5)

# GRANULATION TERM(S)
prior_w_gran_m  = np.float64([2*np.pi*numax_guess.value/2])
prior_w_gran_sd = np.float64(100.0)

prior_a_gran_m  = np.float64([np.var(flux)])
prior_a_gran_sd = np.float64(np.var(flux)/100*50)

prior_Q_gran_m =  np.float64(1/np.sqrt(2))
prior_Q_gran_sd = np.float64(4)

prior_S_gran_m =  prior_a_gran_m/(prior_w_gran_m * prior_Q_gran_m)
prior_S_gran_sd=  np.abs(prior_Q_gran_sd/(prior_w_gran_m*prior_Q_gran_m)
                        - (prior_a_gran_m *prior_w_gran_sd)/(prior_w_gran_m**2*prior_Q_gran_m)
                        - (prior_a_gran_m*prior_Q_gran_sd)/(prior_w_gran_m*prior_Q_gran_m**2))

# NONRADIAL OSCILLATION TERM(S)
prior_rho_osc_m  = np.float64(1/numax_guess.value)
prior_rho_osc_sd = prior_rho_osc_m/100*10

prior_tau_osc_m  = np.float64(2*4/(2*np.pi*numax_guess.value)) # deterministic result for Q = 4, numax = numax_guess
prior_tau_osc_sd = prior_tau_osc_m/100*50

prior_sig_osc_m =  np.float64(np.sqrt(np.var(flux)))
prior_sig_osc_sd = prior_sig_osc_m/100*50

建筑模型:

我已经为大多数分布包含了一个形状参数,因为我希望我的代码能够将向量作为输入处理,以防我需要多个粒化/振荡项。

with pm.Model() as gp_model:
# MEAN FUNCTION ----------------------------------------------------------------------------------------------------------------
    mean_function = pm.Normal("mean", mu=prior_mean_m, sd=prior_mean_sd)

# JITTER TERM ------------------------------------------------------------------------------------------------------------------
    jitter = pm.Lognormal("jitter", mu=prior_jitter_m, sigma=prior_jitter_sd)
    
# GRANULATION TERM(S) ----------------------------------------------------------------------------------------------------------
    num_gran_terms = np.size(prior_w_gran_m) # used later on
    #Set pymc3 priors for parameters
    Q_gran = prior_Q_gran_m
    a_gran = pm.Lognormal("a_gran", mu = np.log(prior_a_gran_m), sigma = prior_a_gran_sd, shape = np.size(prior_a_gran_m)) #parameter a of SHO term. Used to link w0 and S0. 
    w_gran = pm.Lognormal("w_gran", mu = np.log(prior_w_gran_m), sigma = prior_w_gran_sd, shape = np.size(prior_w_gran_m))
    S_gran = pm.Deterministic("S_gran", a_gran/(w_gran * Q_gran)) #S0 = a/(w0*Q)

    # Build terms
    if np.size(prior_w_gran_m) is 1:
        gran_terms = terms.SHOTerm(S0=S_gran, w0=w_gran, Q=Q_gran)
    else:
        gran_terms = terms.SHOTerm(S0=S_gran[0], w0=w_gran[0], Q=Q_gran)
        for i in range(1,np.size(prior_a_gran_m)):
            gran_terms += terms.SHOTerm(S0=S_gran[i], w0=w_gran[i], Q=Q_gran)
    
# NONRADIAL OSCILLATION TERM(S) ------------------------------------------------------------------------------------------------    
    num_osc_terms = np.size(prior_rho_osc_m) # used later on
    #Set pymc3 priors for parameters
    tau_osc = pm.Lognormal("tau_osc", mu = np.log(prior_tau_osc_m), sigma = prior_tau_osc_sd, shape = np.size(prior_tau_osc_m))
    rho_osc = pm.Lognormal("rho_osc", mu = np.log(prior_rho_osc_m), sigma = prior_rho_osc_sd, shape = np.size(prior_rho_osc_m))
    sig_osc = pm.Lognormal("sig_osc", mu = np.log(prior_sig_osc_m), sigma = prior_sig_osc_sd, shape = np.size(prior_sig_osc_m))

    # Build terms
    if np.size(prior_rho_osc_m) is 1:
        osc_terms = terms.SHOTerm(rho=rho_osc, tau=tau_osc, sigma=sig_osc)
    else:
        osc_terms = terms.SHOTerm(rho=rho_osc[0], tau=tau_osc[0], sigma=sig_osc[0])
        for i in range(1,np.size(prior_rho_osc_m)):
            osc_terms += terms.SHOTerm(rho=rho_osc[i], tau=tau_osc[i], sigma=sig_osc[i])       

# BUILD GP ---------------------------------------------------------------------------------------------------------------------
    kernel = terms.TermSum(gran_terms, osc_terms)

    gp = celerite2.theano.GaussianProcess(kernel, mean = mean_function)
    gp.compute(time, diag=flux_err ** 2 + jitter, check_sorted = True) # Compute cholesky factorisation of covariance matrix
    gp.marginal("obs", observed=flux) # Add marginal likelihood to model (observations)
    print("Initial log likelihood: {0}".format(gp.log_likelihood(flux)))
    pm.Potential("loglike", gp.log_likelihood(flux - mean_function)) # Define Potential to be used for optimization

现在,当我运行此代码时,出现以下错误:

TypeError: The two branches should have identical types, but they are TensorType(float64, vector) and TensorType(float64, (True, True)) respectively. This error could be raised if for example you provided a one element list on the `then` branch but a tensor on the `else` branch.
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-16-b80a96105cf6> in <module>
     16     # Build terms
     17     if np.size(prior_w_gran_m) is 1:
---> 18         gran_terms = terms.SHOTerm(S0=S_gran, w0=w_gran, Q=Q_gran)
     19     else:
     20         gran_terms = terms.SHOTerm(S0=S_gran[0], w0=w_gran[0], Q=Q_gran)
~\Anaconda3\lib\site-packages\celerite2\terms.py in wrapped(target, *args, **kwargs)
    604                             break
    605 
--> 606             return to_wrap(target, *args, **kwargs)
    607 
    608         return wrapped
~\Anaconda3\lib\site-packages\celerite2\theano\terms.py in __init__(self, eps, **kwargs)
    444     def __init__(self, *, eps=1e-5, **kwargs):
    445         self.eps = tt.as_tensor_variable(eps).astype("float64")
--> 446         super().__init__(**kwargs)
    447 
    448     def overdamped(self):
~\Anaconda3\lib\site-packages\celerite2\theano\terms.py in __init__(self, dtype)
     27     def __init__(self, *, dtype="float64"):
     28         self.dtype = dtype
---> 29         self.coefficients = self.get_coefficients()
     30 
     31     def __add__(self, b):
~\Anaconda3\lib\site-packages\celerite2\theano\terms.py in get_coefficients(self)
    480     def get_coefficients(self):
    481         m = self.Q < 0.5
--> 482         return [
    483             ifelse(m, a, b)
    484             for a, b in zip(self.overdamped(), self.underdamped())
~\Anaconda3\lib\site-packages\celerite2\theano\terms.py in <listcomp>(.0)
    481         m = self.Q < 0.5
    482         return [
--> 483             ifelse(m, a, b)
    484             for a, b in zip(self.overdamped(), self.underdamped())
    485         ]
~\Anaconda3\lib\site-packages\theano\ifelse.py in ifelse(condition, then_branch, else_branch, name)
    367             if then_branch_elem.type != else_branch_elem.type:
    368                 # If the types still don't match, there is a problem.
--> 369                 raise TypeError(
    370                     'The two branches should have identical types, but '
    371                     'they are %s and %s respectively. This error could be '
TypeError: The two branches should have identical types, but they are TensorType(float64, vector) and TensorType(float64, (True, True)) respectively. This error could be raised if for example you provided a one element list on the `then` branch but a tensor on the `else` branch.

这对我来说毫无意义(老实说,正如大多数 theano 错误消息所做的那样)。为什么其中一个分支是布尔值?

我试过的

我已经尝试在代码中省略 shape 参数,因为我之前遇到过问题。

a_gran = pm.Lognormal("a_gran", mu = np.log(prior_a_gran_m), sigma = prior_a_gran_sd)
...

这会导致以下错误:

TypeError: For compute_test_value, one input test value does not have the requested type.

The error when converting the test value to that variable type:
Wrong number of dimensions: expected 0, got 1 with shape (1,).
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-70-d0ae027fe60a> in <module>
     10     num_gran_terms = np.size(prior_w_gran_m)
     11     Q_gran = prior_Q_gran_m
---> 12     a_gran = pm.Lognormal("a_gran", mu = np.log(prior_a_gran_m), sigma = prior_a_gran_sd) #parameter a of SHO term. Used to link w0 and S0.
     13     w_gran = pm.Lognormal("w_gran", mu = np.log(prior_w_gran_m), sigma = prior_w_gran_sd)
     14     S_gran = pm.Deterministic("S_gran", a_gran/(w_gran * Q_gran)) #S0 = a/(w0*Q)
~\Anaconda3\lib\site-packages\pymc3\distributions\distribution.py in __new__(cls, name, *args, **kwargs)
     45             total_size = kwargs.pop('total_size', None)
     46             dist = cls.dist(*args, **kwargs)
---> 47             return model.Var(name, dist, data, total_size)
     48         else:
     49             raise TypeError("Name needs to be a string but got: {}".format(name))
~\Anaconda3\lib\site-packages\pymc3\model.py in Var(self, name, dist, data, total_size)
    924             else:
    925                 with self:
--> 926                     var = TransformedRV(name=name, distribution=dist,
    927                                         transform=dist.transform,
    928                                         total_size=total_size,
~\Anaconda3\lib\site-packages\pymc3\model.py in __init__(self, type, owner, index, name, distribution, model, transform, total_size)
   1654 
   1655             self.transformed = model.Var(
-> 1656                 transformed_name, transform.apply(distribution), total_size=total_size)
   1657 
   1658             normalRV = transform.backward(self.transformed)
~\Anaconda3\lib\site-packages\pymc3\distributions\transforms.py in apply(self, dist)
    110     def apply(self, dist):
    111         # avoid circular import
--> 112         return TransformedDistribution.dist(dist, self)
    113 
    114     def __str__(self):
~\Anaconda3\lib\site-packages\pymc3\distributions\distribution.py in dist(cls, *args, **kwargs)
     55     def dist(cls, *args, **kwargs):
     56         dist = object.__new__(cls)
---> 57         dist.__init__(*args, **kwargs)
     58         return dist
     59 
~\Anaconda3\lib\site-packages\pymc3\distributions\transforms.py in __init__(self, dist, transform, *args, **kwargs)
    139         self.dist = dist
    140         self.transform_used = transform
--> 141         v = forward(FreeRV(name="v", distribution=dist))
    142         self.type = v.type
    143 
~\Anaconda3\lib\site-packages\pymc3\model.py in __init__(self, type, owner, index, name, distribution, total_size, model)
   1368             self.tag.test_value = np.ones(
   1369                 distribution.shape, distribution.dtype) * distribution.default()
-> 1370             self.logp_elemwiset = distribution.logp(self)
   1371             # The logp might need scaling in minibatches.
   1372             # This is done in `Factor`.
~\Anaconda3\lib\site-packages\pymc3\distributions\continuous.py in logp(self, value)
   1832         mu = self.mu
   1833         tau = self.tau
-> 1834         return bound(-0.5 * tau * (tt.log(value) - mu)**2
   1835                      + 0.5 * tt.log(tau / (2. * np.pi))
   1836                      - tt.log(value),
~\Anaconda3\lib\site-packages\theano\gof\op.py in __call__(self, *inputs, **kwargs)
    623             for i, ins in enumerate(node.inputs):
    624                 try:
--> 625                     storage_map[ins] = [self._get_test_value(ins)]
    626                     compute_map[ins] = [True]
    627                 except AttributeError:
~\Anaconda3\lib\site-packages\theano\gof\op.py in _get_test_value(cls, v)
    560             # ensure that the test value is correct
    561             try:
--> 562                 ret = v.type.filter(v.tag.test_value)
    563             except Exception as e:
    564                 # Better error message.
~\Anaconda3\lib\site-packages\theano\tensor\type.py in filter(self, data, strict, allow_downcast)
    174 
    175         if self.ndim != data.ndim:
--> 176             raise TypeError("Wrong number of dimensions: expected %s,"
    177                             " got %s with shape %s." % (self.ndim, data.ndim,
    178                                                         data.shape))
TypeError: For compute_test_value, one input test value does not have the requested type.

The error when converting the test value to that variable type:
Wrong number of dimensions: expected 0, got 1 with shape (1,).

我也尝试过手动设置测试值,这会产生相同的错误。

a_gran = pm.Lognormal("a_gran", mu = np.log(prior_a_gran_m), sigma = prior_a_gran_sd, testval = np.log(prior_a_gran_m))
...

这是我第一次在这里发帖,如果我忘记了任何重要信息,非常抱歉。在此先感谢您的帮助!

4

0 回答 0