我正在尝试让flite 语音合成库在我的 Mac 上运行,但 flite 库不支持我的声音架构。为了解决这个问题,我使用PortAudio来播放合成的音频;所以我不得不在audio.c
文件中做一些修改才能让 flite 使用那个库。在 GNU AutoTools 中闲逛了一段时间后,我设法让所有东西都编译得很好,但后来我运行程序并得到这个输出:
$ ./flite -t "test"
frameIndex: 0
maxFrameIndex: 0
numChannels: 1
numSamples: 7225
sampleRate: 8000
=== Now playing back. ===
Waiting for playback to finish.
frameIndex in callback: -2008986336
maxFrameIndex in callback: 32655
numChannels in callback: 152579008
numSamples in callback: 0
sampleRate in callback: 0
Segmentation fault: 11
$ ./flite -t "test"
frameIndex: 0
maxFrameIndex: 0
numChannels: 1
numSamples: 7225
sampleRate: 8000
=== Now playing back. ===
Waiting for playback to finish.
frameIndex in callback: -71217888
maxFrameIndex in callback: 32712
numChannels in callback: 232979392
numSamples in callback: 0
sampleRate in callback: 0
Segmentation fault: 11
这是audio.c
文件中的相关代码,当我提供命令行参数时会调用它-t
。playCallback()
经过一些调试后,我标记了函数中发生分段错误的感兴趣区域。
static int playCallback( const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo* timeInfo,
PaStreamCallbackFlags statusFlags,
void *userData )
{
cst_wave *data = (cst_wave*)userData;
short *rptr = &data->samples[data->frameIndex * data->num_channels];
short *wptr = (short*)outputBuffer;
unsigned int i;
int finished;
unsigned int framesLeft = cst_wave_maxFrameIndex(data) - cst_wave_frameIndex(data);
(void) inputBuffer; /* Prevent unused variable warnings. */
(void) timeInfo;
(void) statusFlags;
(void) userData;
printf("frameIndex in callback: %d\n", cst_wave_frameIndex(data));
printf("maxFrameIndex in callback: %d\n", cst_wave_maxFrameIndex(data));
printf("numChannels in callback: %d\n", cst_wave_num_channels(data));
printf("numSamples in callback: %d\n", cst_wave_num_samples(data));
printf("sampleRate in callback: %d\n\n", cst_wave_sample_rate(data));
if( framesLeft < framesPerBuffer )
{
/* final buffer... */
for( i=0; i<framesLeft; i++ )
{
*wptr++ = *rptr++; /* left */
if( cst_wave_num_channels(data) == 2 ) *wptr++ = *rptr++; /* right */
}
for( ; i<framesPerBuffer; i++ )
{
*wptr++ = 0; /* left */
if( cst_wave_num_channels(data) == 2) *wptr++ = 0; /* right */
}
data->frameIndex += framesLeft;
finished = paComplete;
}
else
{
for( i=0; i<framesPerBuffer; i++ )
{
*wptr++ = *rptr++; /* left */
if( cst_wave_num_channels(data) == 2 ) *wptr++ = *rptr++; /* right */
}
cst_wave_set_frameIndex(data, framesPerBuffer);
finished = paContinue;
}
return finished;
}
int play_wave(cst_wave *w)
{
PaStream* stream;
PaStreamParameters outputParameters;
cst_wave_set_frameIndex(w, 0);
cst_wave_set_maxFrameIndex(w, (cst_wave_num_samples(w) / cst_wave_sample_rate(w)) * cst_wave_num_channels(w) * sizeof(short));
int err = 0;
err = Pa_Initialize();
outputParameters.device = Pa_GetDefaultOutputDevice();
if (outputParameters.device == paNoDevice)
{
fprintf(stderr,"Error: No default output device.\n");
return -5;
}
printf("frameIndex: %d\n", cst_wave_frameIndex(w));
printf("maxFrameIndex: %d\n", cst_wave_maxFrameIndex(w));
printf("numChannels: %d\n", cst_wave_num_channels(w));
printf("numSamples: %d\n", cst_wave_num_samples(w));
printf("sampleRate: %d\n", cst_wave_sample_rate(w));
outputParameters.channelCount = cst_wave_num_channels(w);
outputParameters.sampleFormat = paInt16;
outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultLowOutputLatency;
outputParameters.hostApiSpecificStreamInfo = NULL;
puts("=== Now playing back. ===");
err = Pa_OpenStream(&stream,
NULL, /* no input */
&outputParameters,
cst_wave_sample_rate(w),
512,
paClipOff,
playCallback,
&w);
if( stream )
{
err = Pa_StartStream( stream );
if( err != paNoError ) goto done;
puts("Waiting for playback to finish.");
while((err = Pa_IsStreamActive(stream)) == 1) Pa_Sleep(100);
if( err < 0 ) goto done;
err = Pa_CloseStream( stream );
if( err != paNoError ) goto done;
puts("Done.");
}
done:
Pa_Terminate();
free(cst_wave_samples(w));
}
因为它是相关的,所以我还稍微修改了cst_wave
结构,cst_wave.h
以便它包含我必须存储的数据,并#defines
在已经存在的数据中添加一些:
typedef struct cst_wave_struct {
const char *type;
int frameIndex;
int maxFrameIndex;
int sample_rate;
int num_samples;
int num_channels;
short *samples;
} cst_wave;
#define cst_wave_num_samples(w) (w?w->num_samples:0)
#define cst_wave_num_channels(w) (w?w->num_channels:0)
#define cst_wave_sample_rate(w) (w?w->sample_rate:0)
#define cst_wave_samples(w) (w->samples)
#define cst_wave_frameIndex(w) (w->frameIndex)
#define cst_wave_maxFrameIndex(w) (w->maxFrameIndex)
#define cst_wave_set_num_samples(w,s) w->num_samples=s
#define cst_wave_set_num_channels(w,s) w->num_channels=s
#define cst_wave_set_sample_rate(w,s) w->sample_rate=s
#define cst_wave_set_frameIndex(w,s) w->frameIndex=s
#define cst_wave_set_maxFrameIndex(w,s) w->maxFrameIndex=s
更新 1:
遵循@Rohan 的建议现在给了我这个输出:
$ ./bin/flite -t "test"
frameIndex: 0
maxFrameIndex: 0
numChannels: 1
numSamples: 7225
sampleRate: 8000
=== Now playing back. ===
Waiting for playback to finish.
frameIndex in callback: 0
maxFrameIndex in callback: 0
numChannels in callback: 1
numSamples in callback: 7225
sampleRate in callback: 8000
Done.
flite(68929,0x7fff71c0d310) malloc: *** error for object 0x7fd6e2809800: pointer being freed was not allocated
*** set a breakpoint in malloc_error_break to debug
Abort trap: 6
为了解决这个问题,我删除了free(cst_wave_samples(w));
. 现在程序正常执行,没有明显错误,但我的 Mac 上仍然没有音频输出。有什么建议么?