17

我已经让 OpenCV 和 PyAudio 都工作了,但是我不确定如何将它们同步在一起。我无法从 OpenCV 获得帧速率,并且无法测量帧的调用时间随时变化。然而,对于 PyAudio,它的基础是获取一定的采样率。我如何将它们同步到相同的速率。我认为编解码器有一些标准或某种方式可以做到这一点。(我试过谷歌我得到的只是关于口型同步的信息:/)。

OpenCV 帧率

from __future__ import division
import time
import math
import cv2, cv

vc = cv2.VideoCapture(0)
# get the frame
while True:

    before_read = time.time()
    rval, frame = vc.read()
    after_read  = time.time()
    if frame is not None:
        print len(frame)
        print math.ceil((1.0 / (after_read - before_read)))
        cv2.imshow("preview", frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    else:
        print "None..."
        cv2.waitKey(1)

# display the frame

while True:
    cv2.imshow("preview", frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

抓取和保存音频

from sys import byteorder
from array import array
from struct import pack

import pyaudio
import wave

THRESHOLD = 500
CHUNK_SIZE = 1024
FORMAT = pyaudio.paInt16
RATE = 44100

def is_silent(snd_data):
    "Returns 'True' if below the 'silent' threshold"
    print "\n\n\n\n\n\n\n\n"
    print max(snd_data)
    print "\n\n\n\n\n\n\n\n"
    return max(snd_data) < THRESHOLD

def normalize(snd_data):
    "Average the volume out"
    MAXIMUM = 16384
    times = float(MAXIMUM)/max(abs(i) for i in snd_data)

    r = array('h')
    for i in snd_data:
        r.append(int(i*times))
    return r

def trim(snd_data):
    "Trim the blank spots at the start and end"
    def _trim(snd_data):
        snd_started = False
        r = array('h')

        for i in snd_data:
            if not snd_started and abs(i)>THRESHOLD:
                snd_started = True
                r.append(i)

            elif snd_started:
                r.append(i)
        return r

    # Trim to the left
    snd_data = _trim(snd_data)

    # Trim to the right
    snd_data.reverse()
    snd_data = _trim(snd_data)
    snd_data.reverse()
    return snd_data

def add_silence(snd_data, seconds):
    "Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
    r = array('h', [0 for i in xrange(int(seconds*RATE))])
    r.extend(snd_data)
    r.extend([0 for i in xrange(int(seconds*RATE))])
    return r

def record():
    """
    Record a word or words from the microphone and 
    return the data as an array of signed shorts.

    Normalizes the audio, trims silence from the 
    start and end, and pads with 0.5 seconds of 
    blank sound to make sure VLC et al can play 
    it without getting chopped off.
    """
    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT, channels=1, rate=RATE,
        input=True, output=True,
        frames_per_buffer=CHUNK_SIZE)

    num_silent = 0
    snd_started = False

    r = array('h')

    while 1:
        # little endian, signed short
        snd_data = array('h', stream.read(1024))
        if byteorder == 'big':
            snd_data.byteswap()

        print "\n\n\n\n\n\n"
        print len(snd_data)
        print snd_data

        r.extend(snd_data)

        silent = is_silent(snd_data)

        if silent and snd_started:
            num_silent += 1
        elif not silent and not snd_started:
            snd_started = True

        if snd_started and num_silent > 1:
            break

    sample_width = p.get_sample_size(FORMAT)
    stream.stop_stream()
    stream.close()
    p.terminate()

    r = normalize(r)
    r = trim(r)
    r = add_silence(r, 0.5)
    return sample_width, r

def record_to_file(path):
    "Records from the microphone and outputs the resulting data to 'path'"
    sample_width, data = record()
    data = pack('<' + ('h'*len(data)), *data)

    wf = wave.open(path, 'wb')
    wf.setnchannels(1)
    wf.setsampwidth(sample_width)
    wf.setframerate(RATE)
    wf.writeframes(data)
    wf.close()

if __name__ == '__main__':
    print("please speak a word into the microphone")
    record_to_file('demo.wav')
    print("done - result written to demo.wav")
4

3 回答 3

2

我认为您最好使用 GSreamer 或 ffmpeg,或者如果您在 Windows 上使用 DirectShow。这些库可以同时处理音频和视频,并且应该有某种多路复用器以允许您正确混合视频和音频。

但是如果你真的想用 Opencv 来做这个,你应该可以VideoCapture用来获取帧率,你试过用这个吗?

fps = cv.GetCaptureProperty(vc, CV_CAP_PROP_FPS)

另一种方法是将 fps 估计为帧数除以持续时间:

nFrames  = cv.GetCaptureProperty(vc, CV_CAP_PROP_FRAME_COUNT)
           cv.SetCaptureProperty(vc, CV_CAP_PROP_POS_AVI_RATIO, 1)
duration = cv.GetCaptureProperty(vc, CV_CAP_PROP_POS_MSEC)
fps = 1000 * nFrames / duration;

我不确定我是否理解您在这里尝试做的事情:

before_read = time.time()
rval, frame = vc.read()
after_read  = time.time()

在我看来,doingafter_read - before_read只测量 OpenCV 加载下一帧所需的时间,它不测量 fps。OpenCV 不尝试播放,它只是加载帧,它会尽可能快地这样做,我认为没有办法配置它。我认为waitKey(1/fps)在显示每一帧之后放置一个将实现您正在寻找的内容。

于 2015-10-20T20:05:29.257 回答
1

您可以有 2 个计数器,1 个用于音频,一个用于视频。当显示图像和音频 +sec 时,视频计数器将变为 +(1/fps),其中 sec 是您每次写入流的音频的秒数。然后在代码的音频部分,您可以执行类似 While audiosec-videosec>=0.05: # Audio is ahead time.sleep(0.05)

而在视频部分虽然 videosec-audiosec>=0.2:# video is ahead time.sleep(0.2)

你可以玩数字

这就是我最近使用 pyaudio ffmpeg 而不是 cv2 在我自己的视频播放器项目上实现某种同步的方式。

于 2020-07-26T16:03:48.667 回答
0

我个人为此使用了线程。

import concurrent.futures
import pyaudio
import cv2
class Aud_Vid():

def __init__(self, arg):
    self.video = cv2.VideoCapture(0)
    self.CHUNK = 1470
    self.FORMAT = pyaudio.paInt16
    self.CHANNELS = 2
    self.RATE = 44100
    self.audio = pyaudio.PyAudio()
    self.instream = self.audio.open(format=self.FORMAT,channels=self.CHANNELS,rate=self.RATE,input=True,frames_per_buffer=self.CHUNK)
    self.outstream = self.audio.open(format=self.FORMAT,channels=self.CHANNELS,rate=self.RATE,output=True,frames_per_buffer=self.CHUNK)


def sync(self):
      with concurrent.futures.ThreadPoolExecutor() as executor:
              tv = executor.submit(self.video.read)
              ta = executor.submit(self.instream.read,1470)
              vid = tv.result()
              aud = ta.result()
              return(vid[1].tobytes(),aud)
于 2020-01-19T20:09:57.503 回答