1

首先,我想说我是 python 新手,这段代码是在 stackoverflow 用户的建议和建议之外创建的。代码如下所示:

f = open('E:\Python27\WASP DATA\Sample Data.txt',"r")
num=0
line = f.readlines()

X = []
for n, lines in enumerate(line, 0):  #6621
        # make it 109 to remove the first line "['# Column 3: Magnitude error\n']"
    if (n > 109): 
        linSplit = lines.split('    ')
        joined = ' '.join(linSplit)
            # apply the float function to every item in joined.split
            # create a new list of floats in tmp variable
        tmp = map((lambda x: float(x)), joined.split())
        X.append(tmp)

#print X[0] # print first element in the list

Period_1 = float(line[28][23:31])
Epoch_1 = float(line[27][22:31])
Period_2 = float(line[44][23:31])
Epoch_2 = float(line[43][22:31])
#Period_3 = float(line[60][23:31])
#Epoch_3 = float(line[59][22:31])
#Period_4 = float(line[76][23:31])
#Epoch_4 = float(line[75][22:31])
#Period_5 = float(line[108][23:31])
#Epoch_5 = float(line[91][22:31])

print("The time periods are:")
print Period_1
print Period_2
#print Period_3
#print Period_4
#print Period_5 

print("\nThe Epoch times are:")
print Epoch_1
print Epoch_2
#print Epoch_3
#print Epoch_4
#print Epoch_5
print('respectively.')

P = []
phase_var = float

for j in range(0,len(X),1):
    phase_var = (X[j][0] + (10*Period_1) - Epoch_1)/Period_1
    P.append(phase_var)

print P[0]

for m in range(0,len(P),1):
    P[m]=float(P[m]-int(P[m]))

#print P[0]

Mag = []

for n in range(0,len(X),1):
    temp = X[n][1]
    Mag.append(temp)

#print Mag[0]
#print X[0]

from pylab import *

#Plotting the first scatter diagram to see if data is phased correctly.

#plot(P, Mag)
scatter(P, Mag)
xlabel('Phase (Periods)')
ylabel('Magnitude')
#title('Dunno yet')
grid(True)
savefig("test.png")
show()

#Bin the data to create graph where magnitudes are averaged, and B lets us mess around with the binning resolution, and reducing effect of extraneous data points.  

B = 2050
minv = min(P)
maxv = max(P)
bincounts = []
for i in range(B+1):
    bincounts.append(0)
for d in P:
    b = int((d - minv) / (maxv - minv) * B)
    bincounts[b] += 1

# plot new scatter

scatter(bincounts, Mag)
show()

原始图是 P 和 Mag 的散点图。但是,每个时期都有多个 Mag 点。我希望尝试创建一个新的散点图,我可以在其中获取所有这些 Y 值并对每个单独的 X 值取平均值,从而创建一个有两个下降的更紧密的图表。

我曾尝试查看各种数据分箱方法,但无论我使用哪种方法,包含分箱数据的图表似乎都无法正确显示。X 值应该从 0 到 1 运行,就像在预分箱数据图上一样。

这是我正在使用的数据,以防您需要查看它。

http://pastebin.com/60E84azv

任何人都可以就如何创建分箱数据图提供任何建议或建议吗?我对数据分箱的了解非常少。

感谢您的时间!

4

2 回答 2

2

这实际上解决了许多问题,不仅仅是分箱部分。我在数据文件的开头包含了解析块的代码,因此您可以获得所有峰值数据

import numpy
import re
import matplotlib.pyplot as plt

f = open('sample_data.txt')
f.next()

pair = re.compile(r'# (.*?)[ \t]*:[ \t]*([0-9e\.-]+).*')

blocks = []
block = {}
blocks.append(block)

for line in f:
    if line[0] <> '#': 
        blocks.append(block)
        break
    line = line.strip()
    m = pair.match(line)
    if m:
        print line
        key, valstr = m.groups()
        print key, valstr
        try:
            value = float(valstr)
        except:
            value = valstr
        block[key] = value

    if (line == "#") and len(block) > 0:
        blocks.append(block)
        block = {}

peaks = sorted([block for block in blocks if 'PEAK' in block], 
               key=lambda b: b['PEAK'])
print peaks

colnames = ['HJD', 'Tamuz-corrected magnitude', 'Magnitude error']
data = numpy.loadtxt(f, [(colname, 'float64') for colname in colnames])

Nbins = 50
for peak in peaks:
    plt.figure()
    phase, _ = numpy.modf((data['HJD'] + 10*peak['Period (days)'] - peak['Epoch'])/peak['Period (days)'])
    mag = data['Tamuz-corrected magnitude']

    # use numpy.histogram to calculate the sum and the number of points in the bins
    sums, _ = numpy.histogram(phase, bins=Nbins, weights=mag)
    N, bin_edges = numpy.histogram(phase, bins=Nbins)

    # We'll plot the value at the center of each bin
    centers = (bin_edges[:-1] + bin_edges[1:])/2

    plt.scatter(phase, mag, alpha=0.2)
    plt.plot(centers, sums/N, color='red', linewidth=2)
plt.show()
于 2013-11-17T09:11:58.070 回答
1

完整代码: http: //pastebin.com/4aBjZC7Q

这是进行分箱的片段:

x = []  # Column 1: HJD
y = []  # Column 2: Tamuz-corrected magnitude

# code to read "sample_data.txt" into lists x and y
# the full code in http://pastebin.com/4aBjZC7Q includes this part as well

import numpy as np

# these will characterize the bins
startBinValue = 5060
endBinValue = 5176
binSize = 0.1

# numpy.arange() will generate the bins of size binSize in between the limiting values
bins = np.arange(startBinValue, endBinValue, binSize)
# numpy.digitize() will "bin" the values; i.e. x_binned[i] will give the bin number of value in i-th index
x_binned = np.digitize(x, bins)
y_numpyArray = np.array(y)

# There is quite a bit of jugglery here.
# x_binned==i gives a boolean of size x_binned
# y_numpyArray[x_binned==i] returns the elements of y_numpyArray where the boolean is true
# The len() check is to make sure that mean() is not called for an empty array (which results in NAN
y_means = np.array([
    y_numpyArray[x_binned == i].mean()
    if len(y_numpyArray[x_binned == i]) > 0
    else 0
    for i in range(1, len(bins))])

# binnedData is a list of tuples; tuple elements are bin's-low-limit, bin's-high-limit, y-mean value
binnedData = [(bins[i], bins[i + 1], y_means[i]) for i in range(len(y_means))]

我已经对代码进行了大量评论。但要了解 numpy 函数的全部功能,请参阅numpy.digitize()numpy.arange()。假设您事先知道 bin 大小,我使用numpy.arange()创建 bin,但如果您需要固定数量的 bin(例如 x 数据为 100 个 bin),请改用numpy.linspace()

于 2013-11-16T18:58:44.537 回答