0
点赞
收藏
分享

微信扫一扫

Android音视频——NuPlayer数据解析模块详解


从上文 ​​Android音视频——NuPlayer框架​​的图来看

Android音视频——NuPlayer数据解析模块详解_android

可以看到 NuPlayer 的解析模块主要是 NuPlayerSource和继承自它的HTTPLiveSource、RTSPSource、GenericSource等几个类。

在 NuPlaye r中调用 ​setDataSourceAsync​ :

void NuPlayer::setDataSourceAsync(
const sp<IMediaHTTPService> &httpService,
const char *url,
const KeyedVector<String8, String8> *headers) {

sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
size_t len = strlen(url);

sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);

sp<Source> source;
if (IsHTTPLiveURL(url)) {
source = new HTTPLiveSource(notify, httpService, url, headers);
} else if (!strncasecmp(url, "rtsp://", 7)) {
source = new RTSPSource(
notify, httpService, url, headers, mUIDValid, mUID);
} else if ((!strncasecmp(url, "http://", 7)
|| !strncasecmp(url, "https://", 8))
&& ((len >= 4 && !strcasecmp(".sdp", &url[len - 4]))
|| strstr(url, ".sdp?"))) {
source = new RTSPSource(
notify, httpService, url, headers, mUIDValid, mUID, true);
} else {
sp<GenericSource> genericSource =
new GenericSource(notify, mUIDValid, mUID);
// Don't set FLAG_SECURE on mSourceFlags here for widevine.
// The correct flags will be updated in Source::kWhatFlagsChanged
// handler when GenericSource is prepared.

status_t err = genericSource->setDataSource(httpService, url, headers);

if (err == OK) {
source = genericSource;
} else {
ALOGE("Failed to set data source!");
}
}
msg->setObject("source", source);
msg->post();
}

这里会根据不同协议选择不同的Source(视频源)对象,有了这个Source(视频源)对象

后,发送kWhatSetDataSource消息,代码如下:

case kWhatSetDataSource:
{
ALOGV("kWhatSetDataSource");

CHECK(mSource == NULL);

status_t err = OK;
sp<RefBase> obj;
CHECK(msg->findObject("source", &obj));
if (obj != NULL) {
mSource = static_cast<Source *>(obj.get());
} else {
err = UNKNOWN_ERROR;
}

CHECK(mDriver != NULL);
sp<NuPlayerDriver> driver = mDriver.promote();
if (driver != NULL) {
driver->notifySetDataSourceCompleted(err);
}
break;
}

也就是通过具体的 Source 解析完数据,再把 Source 强制转换成 mSource 给 Decoder 使用,这时里面就包含了数据相关信息,如几个 Track、是什么格式的等。其中的一个HTTPLiveSource,主要用于解析HLS协议。

Android音视频——NuPlayer数据解析模块详解_android_02

从图中可以看出​HTTPLiveSource​主要封装了​LiveSession​。 ​LiveSession​有** HTTPDownloader ** 模块、​M3UParser​ 模块,以及取播放列表的 ​PlaylistFetcher​ 模块。

当调用 HTTPLiveSource 的 prepareAsync 函数时,代码如下:

void NuPlayer::HTTPLiveSource::prepareAsync() {
if (mLiveLooper == NULL) {
mLiveLooper = new ALooper;
mLiveLooper->setName("http live");
mLiveLooper->start();

mLiveLooper->registerHandler(this);
}

sp<AMessage> notify = new AMessage(kWhatSessionNotify, this);

mLiveSession = new LiveSession(
notify,
(mFlags & kFlagIncognito) ? LiveSession::kFlagIncognito : 0,
mHTTPService);

mLiveLooper->registerHandler(mLiveSession);

mLiveSession->connectAsync(
mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
}

内部主要构建了一个LiveSession对象。通过LiveSession内部的 ​connectAsync​函数,创建

一个会话:

void LiveSession::connectAsync(
const char *url, const KeyedVector<String8, String8> *headers) {
sp<AMessage> msg = new AMessage(kWhatConnect, this);
msg->setString("url", url);

if (headers != NULL) {
msg->setPointer(
"headers",
new KeyedVector<String8, String8>(*headers));
}

msg->post();
}

接下来是​onConnect

void LiveSession::onConnect(const sp<AMessage> &msg) {
CHECK(msg->findString("url", &mMasterURL));

// TODO currently we don't know if we are coming here from incognito mode
ALOGI("onConnect %s", uriDebugString(mMasterURL).c_str());

KeyedVector<String8, String8> *headers = NULL;
if (!msg->findPointer("headers", (void **)&headers)) {
mExtraHeaders.clear();
} else {
mExtraHeaders = *headers;

delete headers;
headers = NULL;
}

// create looper for fetchers
if (mFetcherLooper == NULL) {
mFetcherLooper = new ALooper();

mFetcherLooper->setName("Fetcher");
mFetcherLooper->start(false, false);
}

// create fetcher to fetch the master playlist
addFetcher(mMasterURL.c_str())->fetchPlaylistAsync();
}

网络请求,开始取播放列表。在取到播放列表后,会进行回调:

void LiveSession::onMasterPlaylistFetched(const sp<AMessage> &msg) {
AString uri;
CHECK(msg->findString("uri", &uri));
ssize_t index = mFetcherInfos.indexOfKey(uri);
if (index < 0) {
ALOGW("fetcher for master playlist is gone.");
return;
}

// no longer useful, remove
mFetcherLooper->unregisterHandler(mFetcherInfos[index].mFetcher->id());
mFetcherInfos.removeItemsAt(index);

CHECK(msg->findObject("playlist", (sp<RefBase> *)&mPlaylist));
if (mPlaylist == NULL) {
ALOGE("unable to fetch master playlist %s.",
uriDebugString(mMasterURL).c_str());

postPrepared(ERROR_IO);
return;
}
// We trust the content provider to make a reasonable choice of preferred
// initial bandwidth by listing it first in the variant playlist.
// At startup we really don't have a good estimate on the available
// network bandwidth since we haven't tranferred any data yet. Once
// we have we can make a better informed choice.
size_t initialBandwidth = 0;
size_t initialBandwidthIndex = 0;

int32_t maxWidth = 0;
int32_t maxHeight = 0;

if (mPlaylist->isVariantPlaylist()) {
Vector<BandwidthItem> itemsWithVideo;
for (size_t i = 0; i < mPlaylist->size(); ++i) {
BandwidthItem item;

item.mPlaylistIndex = i;
item.mLastFailureUs = -1ll;

sp<AMessage> meta;
AString uri;
mPlaylist->itemAt(i, &uri, &meta);

CHECK(meta->findInt32("bandwidth", (int32_t *)&item.mBandwidth));

int32_t width, height;
if (meta->findInt32("width", &width)) {
maxWidth = max(maxWidth, width);
}
if (meta->findInt32("height", &height)) {
maxHeight = max(maxHeight, height);
}

mBandwidthItems.push(item);
if (mPlaylist->hasType(i, "video")) {
itemsWithVideo.push(item);
}
}
// remove the audio-only variants if we have at least one with video
if (!itemsWithVideo.empty()
&& itemsWithVideo.size() < mBandwidthItems.size()) {
mBandwidthItems.clear();
for (size_t i = 0; i < itemsWithVideo.size(); ++i) {
mBandwidthItems.push(itemsWithVideo[i]);
}
}

CHECK_GT(mBandwidthItems.size(), 0u);
initialBandwidth = mBandwidthItems[0].mBandwidth;

mBandwidthItems.sort(SortByBandwidth);

for (size_t i = 0; i < mBandwidthItems.size(); ++i) {
if (mBandwidthItems.itemAt(i).mBandwidth == initialBandwidth) {
initialBandwidthIndex = i;
break;
}
}
} else {
// dummy item.
BandwidthItem item;
item.mPlaylistIndex = 0;
item.mBandwidth = 0;
mBandwidthItems.push(item);
}

mMaxWidth = maxWidth > 0 ? maxWidth : mMaxWidth;
mMaxHeight = maxHeight > 0 ? maxHeight : mMaxHeight;

mPlaylist->pickRandomMediaItems();
changeConfiguration(
0ll /* timeUs */, initialBandwidthIndex, false /* pickTrack */);
}

上面的代码主要是根据 URL返回的 M3U文件,获取对应的 Bandwidthltem,如果熟悉M3U文件,可以知道M3U文件有一级索引和二级索引,M3U一级索引内容举例如下

#EXTM3U
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=200000
gear1/prog_index.m3u8
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=311111
gear2/prog_index.m3u8
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=484444
gear3/prog_index.m3u8
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=737777
gear4/prog_index.m3u8

其主要作用是动态码率适应,BANDWIDTH 越大,分辨率越高。BANDWIDTH 实际上就是带宽。

例如,想要获得某个 Track的信息,得到视频、音频、字幕相关信息,可以通过LiveSession 的 getTrackInfo 函数:

sp<AMessage> LiveSession::getTrackInfo(size_t trackIndex) const {
if (mPlaylist == NULL) {
return NULL;
} else {
if (trackIndex == mPlaylist->getTrackCount() && mHasMetadata) {
sp<AMessage> format = new AMessage();
format->setInt32("type", MEDIA_TRACK_TYPE_METADATA);
format->setString("language", "und");
format->setString("mime", MEDIA_MIMETYPE_DATA_TIMED_ID3);
return format;
}
return mPlaylist->getTrackInfo(trackIndex);
}
}



举报

相关推荐

0 条评论