1

我有一个问题如何减少运行时间。
我编写的代码是 Python。
它需要一个巨大的数据集作为输入,对其进行处理、计算并将输出写入一个数组。
大多数计算可能非常简单,例如求和。在输入文件中,大约有 1 亿行和 3 列。我面临的问题是运行时间太长。如何减少运行时间?

这是我写的代码。
我需要将我计算的所有新值(从 GenePair 到带有标题的 RM_pval)写入新文件。非常感谢你。

fi = open ('1.txt')
fo = open ('2.txt','w')

import math
def log(x):
    return math.log(x)

from math import sqrt

import sys
sys.path.append('/tools/lib/python2.7/site-packages')
import numpy
import scipy
import numpy as np
from scipy.stats.distributions import norm

for line in fi.xreadlines():
    tmp = line.split('\t')

    GenePair = tmp[0].strip()

    PCC_A = float(tmp[1].strip())
    PCC_B = float(tmp[2].strip())

    ZVAL_A = 0.5 * log((1+PCC_A)/(1-PCC_A))
    ZVAL_B = 0.5 * log((1+PCC_B)/(1-PCC_B))

    ABS_ZVAL_A = abs(ZVAL_A)
    ABS_ZVAL_B = abs(ZVAL_B)

    Var_A = float(1) / float(21-3) #SAMPLESIZE - 3
    Var_B = float(1) / float(18-3) #SAMPLESIZE - 3

    WT_A = 1/Var_A #float
    WT_B = 1/Var_B #float

    ZVAL_A_X_WT_A = ZVAL_A * WT_A #float
    ZVAL_B_X_WT_B = ZVAL_B * WT_B #float

    SumofWT = (WT_A + WT_B) #float
    SumofZVAL_X_WT = (ZVAL_A_X_WT_A + ZVAL_B_X_WT_B) #float

    #FIXED MODEL
    meanES = SumofZVAL_X_WT / SumofWT #float
    Var = float(1) / SumofWT #float
    SE = math.sqrt(float(Var)) #float
    LL = meanES - (1.96 * SE) #float
    UL = meanES - (1.96 * SE) #float
    z_score = meanES / SE #float
    p_val = scipy.stats.norm.sf(z_score)

    #CAL
    ES_POWER_X_WT_A = pow(ZVAL_A,2) * WT_A #float
    ES_POWER_X_WT_B = pow(ZVAL_B,2) * WT_B #float
    WT_POWER_A = pow(WT_A,2)
    WT_POWER_B = pow(WT_B,2)
    SumofES_POWER_X_WT = ES_POWER_X_WT_A + ES_POWER_X_WT_B
    SumofWT_POWER = WT_POWER_A + WT_POWER_B

    #COMPUTE TAU
    tmp_A = ZVAL_A - meanES
    tmp_B = ZVAL_B - meanES
    temp = pow(SumofZVAL_X_WT,2)

    Q = SumofES_POWER_X_WT - (temp /(SumofWT))      
    if PCC_A !=0 or PCC_B !=0:
        df = 0
    else:
        df = 1

    c = SumofWT - ((pow(SumofWT,2))/SumofWT)
    if c == 0:
        tau_square = 0
    else:
        tau_square = (Q - df) / c

    #calculation
    Var_total_A = Var_A + tau_square
    Var_total_B = Var_B + tau_square

    WT_total_A = float(1) / Var_total_A
    WT_total_B = float(1) / Var_total_B

    ZVAL_X_WT_total_A = ZVAL_A * WT_total_A
    ZVAL_X_WT_total_B = ZVAL_B * WT_total_B

    Sumoftotal_WT = WT_total_A + WT_total_B
    Sumoftotal_ZVAL_X_WT= ZVAL_X_WT_total_A + ZVAL_X_WT_total_B


    #RANDOM MODEL
    RM_meanES = Sumoftotal_ZVAL_X_WT / Sumoftotal_WT
    RM_Var = float(1) / Sumoftotal_WT
    RM_SE = math.sqrt(float(RM_Var))
    RM_LL = RM_meanES - (1.96 * RM_SE)
    RM_UL = RM_meanES + (1.96 * RM_SE)
    RM_z_score = RM_meanES / RM_Var
    RM_p_val = scipy.stats.norm.sf(RM_z_score)
4

1 回答 1

2

绝对做分析器的事情,但是......我认为唯一的主要加速将由于并行性而发生。如果您要运行这样的 cpu 绑定问题,则利用多个内核至关重要。尝试将每一行放在不同的(线程/进程)。这当然会引发更多问题,例如数据是否需要与输入文件的顺序相同?如果是这样,只需枚举它并在 big_hairy_func 上粘贴第二个变量,它将是哪一行。

这是一些样板代码开始

笔记:

xreadlines 已被弃用,即使它处理大文件for line in file:替换它。

fi = open('1.txt')
fo = open('2.txt','w')

import math
def log(x):
    return math.log(x)

from math import sqrt

import multiprocessing as mp
import sys
sys.path.append('/tools/lib/python2.7/site-packages')
import scipy
import numpy as np
from scipy.stats.distributions import norm

def big_hairy_func(linefromfile):
    <majority of your post here>
    return <whatever data you were going to write to 'fo'>

if __name__ == '__main__':
    pool = mp.Pool(4) #rule of thumb.  Replace '4' with the number of cores on your system
    result = pool.map(big_hairy_func, (input for input in fi.readlines()))
    <write the result to fo that you haven't posted>

xreadlines 在 python 2.3 中已被弃用,所以对于那个版本,我不确定生成器函数是否可以工作。如果您对与您的 python 版本的兼容性有疑问,请告诉我。

于 2013-11-01T05:57:51.430 回答