Decode one row of tiles at a time for image that has tiles

Add an api to IMediaMetadataRetriever to decode image rect.
It will reuse the same full frame IMemory, and decode only
the requested rect. For now, StagefrightMetadataRetriever
will only allow decoding of rect that's a full row of tiles,
and the requested must be issued sequentially (i.e. no
arbitrary rects). When the extract side is fixed to allow
seeking by tiles, it can be extended to allow arbitrary
rects.

This allows HeifDecoderImpl (on client side) to start
processing the getScanlines in parallel with the decoding.

Test: CTS MediaMetadataRetrieverTest;
Manual testing of HEIF decoding of files with or without tiles;
Manual testing of HEIF thumbnails generation in Downloads app.

bug: 78475896
Change-Id: I820b21cdf33f80593ee6092d8e1ba68b3beb65dd
gugelfrei
Chong Zhang 6 years ago
parent 0c1f9148ed
commit 0c1407f79e

@ -37,9 +37,11 @@ public:
// will calculate frame buffer size if |hasData| is set to true.
VideoFrame(uint32_t width, uint32_t height,
uint32_t displayWidth, uint32_t displayHeight,
uint32_t tileWidth, uint32_t tileHeight,
uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
mWidth(width), mHeight(height),
mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
mTileWidth(tileWidth), mTileHeight(tileHeight),
mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
mSize(hasData ? (bpp * width * height) : 0),
mIccSize(iccSize), mReserved(0) {
@ -74,6 +76,8 @@ public:
uint32_t mHeight; // Decoded image height before rotation
uint32_t mDisplayWidth; // Display width before rotation
uint32_t mDisplayHeight; // Display height before rotation
uint32_t mTileWidth; // Tile width (0 if image doesn't have grid)
uint32_t mTileHeight; // Tile height (0 if image doesn't have grid)
int32_t mRotationAngle; // Rotation angle, clockwise, should be multiple of 90
uint32_t mBytesPerPixel; // Number of bytes per pixel
uint32_t mRowBytes; // Number of bytes per row before rotation

@ -271,17 +271,43 @@ status_t HeifDataSource::getSize(off64_t* size) {
/////////////////////////////////////////////////////////////////////////
struct HeifDecoderImpl::DecodeThread : public Thread {
explicit DecodeThread(HeifDecoderImpl *decoder) : mDecoder(decoder) {}
private:
HeifDecoderImpl* mDecoder;
bool threadLoop();
DISALLOW_EVIL_CONSTRUCTORS(DecodeThread);
};
bool HeifDecoderImpl::DecodeThread::threadLoop() {
return mDecoder->decodeAsync();
}
/////////////////////////////////////////////////////////////////////////
HeifDecoderImpl::HeifDecoderImpl() :
// output color format should always be set via setOutputColor(), in case
// it's not, default to HAL_PIXEL_FORMAT_RGB_565.
mOutputColor(HAL_PIXEL_FORMAT_RGB_565),
mCurScanline(0),
mWidth(0),
mHeight(0),
mFrameDecoded(false),
mHasImage(false),
mHasVideo(false) {
mHasVideo(false),
mAvailableLines(0),
mNumSlices(1),
mSliceHeight(0),
mAsyncDecodeDone(false) {
}
HeifDecoderImpl::~HeifDecoderImpl() {
if (mThread != nullptr) {
mThread->join();
}
}
bool HeifDecoderImpl::init(HeifStream* stream, HeifFrameInfo* frameInfo) {
@ -310,22 +336,23 @@ bool HeifDecoderImpl::init(HeifStream* stream, HeifFrameInfo* frameInfo) {
mHasImage = hasImage && !strcasecmp(hasImage, "yes");
mHasVideo = hasVideo && !strcasecmp(hasVideo, "yes");
sp<IMemory> sharedMem;
if (mHasImage) {
// image index < 0 to retrieve primary image
mFrameMemory = mRetriever->getImageAtIndex(
sharedMem = mRetriever->getImageAtIndex(
-1, mOutputColor, true /*metaOnly*/);
} else if (mHasVideo) {
mFrameMemory = mRetriever->getFrameAtTime(0,
sharedMem = mRetriever->getFrameAtTime(0,
MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
mOutputColor, true /*metaOnly*/);
}
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
if (sharedMem == nullptr || sharedMem->pointer() == nullptr) {
ALOGE("getFrameAtTime: videoFrame is a nullptr");
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->pointer());
ALOGV("Meta dimension %dx%d, display %dx%d, angle %d, iccSize %d",
videoFrame->mWidth,
@ -344,6 +371,14 @@ bool HeifDecoderImpl::init(HeifStream* stream, HeifFrameInfo* frameInfo) {
videoFrame->mIccSize,
videoFrame->getFlattenedIccData());
}
mWidth = videoFrame->mWidth;
mHeight = videoFrame->mHeight;
if (mHasImage && videoFrame->mTileHeight >= 512 && mWidth >= 3000 && mHeight >= 2000 ) {
// Try decoding in slices only if the image has tiles and is big enough.
mSliceHeight = videoFrame->mTileHeight;
mNumSlices = (videoFrame->mHeight + mSliceHeight - 1) / mSliceHeight;
ALOGV("mSliceHeight %u, mNumSlices %zu", mSliceHeight, mNumSlices);
}
return true;
}
@ -376,6 +411,36 @@ bool HeifDecoderImpl::setOutputColor(HeifColorFormat heifColor) {
return false;
}
bool HeifDecoderImpl::decodeAsync() {
for (size_t i = 1; i < mNumSlices; i++) {
ALOGV("decodeAsync(): decoding slice %zu", i);
size_t top = i * mSliceHeight;
size_t bottom = (i + 1) * mSliceHeight;
if (bottom > mHeight) {
bottom = mHeight;
}
sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
-1, mOutputColor, 0, top, mWidth, bottom);
{
Mutex::Autolock autolock(mLock);
if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
mAsyncDecodeDone = true;
mScanlineReady.signal();
break;
}
mFrameMemory = frameMemory;
mAvailableLines = bottom;
ALOGV("decodeAsync(): available lines %zu", mAvailableLines);
mScanlineReady.signal();
}
}
// Aggressive clear to avoid holding on to resources
mRetriever.clear();
mDataSource.clear();
return false;
}
bool HeifDecoderImpl::decode(HeifFrameInfo* frameInfo) {
// reset scanline pointer
mCurScanline = 0;
@ -384,6 +449,47 @@ bool HeifDecoderImpl::decode(HeifFrameInfo* frameInfo) {
return true;
}
// See if we want to decode in slices to allow client to start
// scanline processing in parallel with decode. If this fails
// we fallback to decoding the full frame.
if (mHasImage && mNumSlices > 1) {
// get first slice and metadata
sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
-1, mOutputColor, 0, 0, mWidth, mSliceHeight);
if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
ALOGE("decode: metadata is a nullptr");
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->pointer());
if (frameInfo != nullptr) {
frameInfo->set(
videoFrame->mWidth,
videoFrame->mHeight,
videoFrame->mRotationAngle,
videoFrame->mBytesPerPixel,
videoFrame->mIccSize,
videoFrame->getFlattenedIccData());
}
mFrameMemory = frameMemory;
mAvailableLines = mSliceHeight;
mThread = new DecodeThread(this);
if (mThread->run("HeifDecode", ANDROID_PRIORITY_FOREGROUND) == OK) {
mFrameDecoded = true;
return true;
}
// Fallback to decode without slicing
mThread.clear();
mNumSlices = 1;
mSliceHeight = 0;
mAvailableLines = 0;
mFrameMemory.clear();
}
if (mHasImage) {
// image index < 0 to retrieve primary image
mFrameMemory = mRetriever->getImageAtIndex(-1, mOutputColor);
@ -393,14 +499,14 @@ bool HeifDecoderImpl::decode(HeifFrameInfo* frameInfo) {
}
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
ALOGE("getFrameAtTime: videoFrame is a nullptr");
ALOGE("decode: videoFrame is a nullptr");
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
if (videoFrame->mSize == 0 ||
mFrameMemory->size() < videoFrame->getFlattenedSize()) {
ALOGE("getFrameAtTime: videoFrame size is invalid");
ALOGE("decode: videoFrame size is invalid");
return false;
}
@ -424,36 +530,45 @@ bool HeifDecoderImpl::decode(HeifFrameInfo* frameInfo) {
}
mFrameDecoded = true;
// Aggressive clear to avoid holding on to resources
// Aggressively clear to avoid holding on to resources
mRetriever.clear();
mDataSource.clear();
return true;
}
bool HeifDecoderImpl::getScanline(uint8_t* dst) {
bool HeifDecoderImpl::getScanlineInner(uint8_t* dst) {
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
if (mCurScanline >= videoFrame->mHeight) {
ALOGE("no more scanline available");
return false;
}
uint8_t* src = videoFrame->getFlattenedData() + videoFrame->mRowBytes * mCurScanline++;
memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mWidth);
return true;
}
size_t HeifDecoderImpl::skipScanlines(size_t count) {
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
return 0;
bool HeifDecoderImpl::getScanline(uint8_t* dst) {
if (mCurScanline >= mHeight) {
ALOGE("no more scanline available");
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
if (mNumSlices > 1) {
Mutex::Autolock autolock(mLock);
while (!mAsyncDecodeDone && mCurScanline >= mAvailableLines) {
mScanlineReady.wait(mLock);
}
return (mCurScanline < mAvailableLines) ? getScanlineInner(dst) : false;
}
return getScanlineInner(dst);
}
size_t HeifDecoderImpl::skipScanlines(size_t count) {
uint32_t oldScanline = mCurScanline;
mCurScanline += count;
if (mCurScanline > videoFrame->mHeight) {
mCurScanline = videoFrame->mHeight;
if (mCurScanline > mHeight) {
mCurScanline = mHeight;
}
return (mCurScanline > oldScanline) ? (mCurScanline - oldScanline) : 0;
}

@ -19,6 +19,8 @@
#include "include/HeifDecoderAPI.h"
#include <system/graphics.h>
#include <utils/Condition.h>
#include <utils/Mutex.h>
#include <utils/RefBase.h>
namespace android {
@ -49,14 +51,30 @@ public:
size_t skipScanlines(size_t count) override;
private:
struct DecodeThread;
sp<IDataSource> mDataSource;
sp<MediaMetadataRetriever> mRetriever;
sp<IMemory> mFrameMemory;
android_pixel_format_t mOutputColor;
size_t mCurScanline;
uint32_t mWidth;
uint32_t mHeight;
bool mFrameDecoded;
bool mHasImage;
bool mHasVideo;
// Slice decoding only
Mutex mLock;
Condition mScanlineReady;
sp<DecodeThread> mThread;
size_t mAvailableLines;
size_t mNumSlices;
uint32_t mSliceHeight;
bool mAsyncDecodeDone;
bool decodeAsync();
bool getScanlineInner(uint8_t* dst);
};
} // namespace android

@ -69,6 +69,7 @@ enum {
SET_DATA_SOURCE_CALLBACK,
GET_FRAME_AT_TIME,
GET_IMAGE_AT_INDEX,
GET_IMAGE_RECT_AT_INDEX,
GET_FRAME_AT_INDEX,
EXTRACT_ALBUM_ART,
EXTRACT_METADATA,
@ -187,6 +188,30 @@ public:
return interface_cast<IMemory>(reply.readStrongBinder());
}
sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom)
{
ALOGV("getImageRectAtIndex: index %d, colorFormat(%d) rect {%d, %d, %d, %d}",
index, colorFormat, left, top, right, bottom);
Parcel data, reply;
data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
data.writeInt32(index);
data.writeInt32(colorFormat);
data.writeInt32(left);
data.writeInt32(top);
data.writeInt32(right);
data.writeInt32(bottom);
#ifndef DISABLE_GROUP_SCHEDULE_HACK
sendSchedPolicy(data);
#endif
remote()->transact(GET_IMAGE_RECT_AT_INDEX, data, &reply);
status_t ret = reply.readInt32();
if (ret != NO_ERROR) {
return NULL;
}
return interface_cast<IMemory>(reply.readStrongBinder());
}
status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly)
{
@ -375,6 +400,34 @@ status_t BnMediaMetadataRetriever::onTransact(
#endif
return NO_ERROR;
} break;
case GET_IMAGE_RECT_AT_INDEX: {
CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
int index = data.readInt32();
int colorFormat = data.readInt32();
int left = data.readInt32();
int top = data.readInt32();
int right = data.readInt32();
int bottom = data.readInt32();
ALOGV("getImageRectAtIndex: index(%d), colorFormat(%d), rect {%d, %d, %d, %d}",
index, colorFormat, left, top, right, bottom);
#ifndef DISABLE_GROUP_SCHEDULE_HACK
setSchedPolicy(data);
#endif
sp<IMemory> bitmap = getImageRectAtIndex(
index, colorFormat, left, top, right, bottom);
if (bitmap != 0) { // Don't send NULL across the binder interface
reply->writeInt32(NO_ERROR);
reply->writeStrongBinder(IInterface::asBinder(bitmap));
} else {
reply->writeInt32(UNKNOWN_ERROR);
}
#ifndef DISABLE_GROUP_SCHEDULE_HACK
restoreSchedPolicy();
#endif
return NO_ERROR;
} break;
case GET_FRAME_AT_INDEX: {
CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
int frameIndex = data.readInt32();

@ -46,6 +46,8 @@ public:
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
virtual sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom) = 0;
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;

@ -47,6 +47,8 @@ public:
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
virtual sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom) = 0;
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> >* frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
@ -54,27 +56,6 @@ public:
virtual const char* extractMetadata(int keyCode) = 0;
};
// MediaMetadataRetrieverInterface
class MediaMetadataRetrieverInterface : public MediaMetadataRetrieverBase
{
public:
MediaMetadataRetrieverInterface() {}
virtual ~MediaMetadataRetrieverInterface() {}
virtual sp<IMemory> getFrameAtTime(
int64_t /*timeUs*/, int /*option*/, int /*colorFormat*/, bool /*metaOnly*/)
{ return NULL; }
virtual sp<IMemory> getImageAtIndex(
int /*index*/, int /*colorFormat*/, bool /*metaOnly*/, bool /*thumbnail*/)
{ return NULL; }
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> >* /*frames*/,
int /*frameIndex*/, int /*numFrames*/, int /*colorFormat*/, bool /*metaOnly*/)
{ return ERROR_UNSUPPORTED; }
virtual MediaAlbumArt* extractAlbumArt() { return NULL; }
virtual const char* extractMetadata(int /*keyCode*/) { return NULL; }
};
}; // namespace android
#endif // ANDROID_MEDIAMETADATARETRIEVERINTERFACE_H

@ -91,6 +91,8 @@ public:
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
sp<IMemory> getImageAtIndex(int index,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom);
status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);

@ -166,6 +166,19 @@ sp<IMemory> MediaMetadataRetriever::getImageAtIndex(
return mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
}
sp<IMemory> MediaMetadataRetriever::getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom) {
ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
index, colorFormat, left, top, right, bottom);
Mutex::Autolock _l(mLock);
if (mRetriever == 0) {
ALOGE("retriever is not initialized");
return NULL;
}
return mRetriever->getImageRectAtIndex(
index, colorFormat, left, top, right, bottom);
}
status_t MediaMetadataRetriever::getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) {

@ -213,7 +213,7 @@ sp<IMemory> MetadataRetrieverClient::getFrameAtTime(
sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) {
ALOGV("getFrameAtTime: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
ALOGV("getImageAtIndex: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
index, colorFormat, metaOnly, thumbnail);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
@ -229,6 +229,25 @@ sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
return frame;
}
sp<IMemory> MetadataRetrieverClient::getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom) {
ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d), rect {%d, %d, %d, %d}",
index, colorFormat, left, top, right, bottom);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
if (mRetriever == NULL) {
ALOGE("retriever is not initialized");
return NULL;
}
sp<IMemory> frame = mRetriever->getImageRectAtIndex(
index, colorFormat, left, top, right, bottom);
if (frame == NULL) {
ALOGE("failed to extract image");
return NULL;
}
return frame;
}
status_t MetadataRetrieverClient::getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) {

@ -54,6 +54,8 @@ public:
int64_t timeUs, int option, int colorFormat, bool metaOnly);
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail);
virtual sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom);
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly);

@ -43,7 +43,8 @@ static const int64_t kBufferTimeOutUs = 10000ll; // 10 msec
static const size_t kRetryCount = 50; // must be >0
sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
int32_t width, int32_t height, int32_t dstBpp, bool metaOnly = false) {
int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
int32_t dstBpp, bool metaOnly = false) {
int32_t rotationAngle;
if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
rotationAngle = 0; // By default, no rotation
@ -74,7 +75,7 @@ sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
}
VideoFrame frame(width, height, displayWidth, displayHeight,
rotationAngle, dstBpp, !metaOnly, iccSize);
tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
size_t size = frame.getFlattenedSize();
sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
@ -155,7 +156,7 @@ sp<IMemory> FrameDecoder::getMetadataOnly(
return NULL;
}
int32_t width, height;
int32_t width, height, tileWidth = 0, tileHeight = 0;
if (thumbnail) {
if (!findThumbnailInfo(trackMeta, &width, &height)) {
return NULL;
@ -163,8 +164,14 @@ sp<IMemory> FrameDecoder::getMetadataOnly(
} else {
CHECK(trackMeta->findInt32(kKeyWidth, &width));
CHECK(trackMeta->findInt32(kKeyHeight, &height));
int32_t gridRows, gridCols;
if (!findGridInfo(trackMeta, &tileWidth, &tileHeight, &gridRows, &gridCols)) {
tileWidth = tileHeight = 0;
}
}
return allocVideoFrame(trackMeta, width, height, dstBpp, true /*metaOnly*/);
return allocVideoFrame(trackMeta,
width, height, tileWidth, tileHeight, dstBpp, true /*metaOnly*/);
}
FrameDecoder::FrameDecoder(
@ -237,8 +244,11 @@ status_t FrameDecoder::init(
return OK;
}
sp<IMemory> FrameDecoder::extractFrame() {
status_t err = extractInternal();
sp<IMemory> FrameDecoder::extractFrame(FrameRect *rect) {
status_t err = onExtractRect(rect);
if (err == OK) {
err = extractInternal();
}
if (err != OK) {
return NULL;
}
@ -503,6 +513,8 @@ status_t VideoFrameDecoder::onOutputReceived(
trackMeta(),
(crop_right - crop_left + 1),
(crop_bottom - crop_top + 1),
0,
0,
dstBpp());
addFrame(frameMem);
VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
@ -541,7 +553,10 @@ ImageDecoder::ImageDecoder(
mHeight(0),
mGridRows(1),
mGridCols(1),
mTilesDecoded(0) {
mTileWidth(0),
mTileHeight(0),
mTilesDecoded(0),
mTargetTiles(0) {
}
sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
@ -585,10 +600,12 @@ sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
overrideMeta = new MetaData(*(trackMeta()));
overrideMeta->setInt32(kKeyWidth, tileWidth);
overrideMeta->setInt32(kKeyHeight, tileHeight);
mTileWidth = tileWidth;
mTileHeight = tileHeight;
mGridCols = gridCols;
mGridRows = gridRows;
} else {
ALOGE("ignore bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
ALOGW("ignore bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
}
}
@ -596,6 +613,7 @@ sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
overrideMeta = trackMeta();
}
}
mTargetTiles = mGridCols * mGridRows;
sp<AMessage> videoFormat;
if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
@ -614,6 +632,45 @@ sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
return videoFormat;
}
status_t ImageDecoder::onExtractRect(FrameRect *rect) {
// TODO:
// This callback is for verifying whether we can decode the rect,
// and if so, set up the internal variables for decoding.
// Currently, rect decoding is restricted to sequentially decoding one
// row of tiles at a time. We can't decode arbitrary rects, as the image
// track doesn't yet support seeking by tiles. So all we do here is to
// verify the rect against what we expect.
// When seeking by tile is supported, this code should be updated to
// set the seek parameters.
if (rect == NULL) {
if (mTilesDecoded > 0) {
return ERROR_UNSUPPORTED;
}
mTargetTiles = mGridRows * mGridCols;
return OK;
}
if (mTileWidth <= 0 || mTileHeight <=0) {
return ERROR_UNSUPPORTED;
}
int32_t row = mTilesDecoded / mGridCols;
int32_t expectedTop = row * mTileHeight;
int32_t expectedBot = (row + 1) * mTileHeight;
if (expectedBot > mHeight) {
expectedBot = mHeight;
}
if (rect->left != 0 || rect->top != expectedTop
|| rect->right != mWidth || rect->bottom != expectedBot) {
ALOGE("currently only support sequential decoding of slices");
return ERROR_UNSUPPORTED;
}
// advance one row
mTargetTiles = mTilesDecoded + mGridCols;
return OK;
}
status_t ImageDecoder::onOutputReceived(
const sp<MediaCodecBuffer> &videoFrameBuffer,
const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
@ -626,7 +683,8 @@ status_t ImageDecoder::onOutputReceived(
CHECK(outputFormat->findInt32("height", &height));
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(trackMeta(), mWidth, mHeight, dstBpp());
sp<IMemory> frameMem = allocVideoFrame(
trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
mFrame = static_cast<VideoFrame*>(frameMem->pointer());
addFrame(frameMem);
@ -638,8 +696,6 @@ status_t ImageDecoder::onOutputReceived(
ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
int32_t dstLeft, dstTop, dstRight, dstBottom;
int32_t numTiles = mGridRows * mGridCols;
dstLeft = mTilesDecoded % mGridCols * width;
dstTop = mTilesDecoded / mGridCols * height;
dstRight = dstLeft + width - 1;
@ -663,7 +719,7 @@ status_t ImageDecoder::onOutputReceived(
dstBottom = dstTop + crop_bottom;
}
*done = (++mTilesDecoded >= numTiles);
*done = (++mTilesDecoded >= mTargetTiles);
if (converter.isValid()) {
converter.convert(

@ -40,7 +40,8 @@ namespace android {
StagefrightMetadataRetriever::StagefrightMetadataRetriever()
: mParsedMetaData(false),
mAlbumArt(NULL) {
mAlbumArt(NULL),
mLastImageIndex(-1) {
ALOGV("StagefrightMetadataRetriever()");
}
@ -126,10 +127,30 @@ status_t StagefrightMetadataRetriever::setDataSource(
sp<IMemory> StagefrightMetadataRetriever::getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail) {
ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
index, colorFormat, metaOnly, thumbnail);
return getImageInternal(index, colorFormat, metaOnly, thumbnail, NULL);
}
sp<IMemory> StagefrightMetadataRetriever::getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom) {
ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
index, colorFormat, left, top, right, bottom);
FrameRect rect = {left, top, right, bottom};
if (mImageDecoder != NULL && index == mLastImageIndex) {
return mImageDecoder->extractFrame(&rect);
}
return getImageInternal(
index, colorFormat, false /*metaOnly*/, false /*thumbnail*/, &rect);
}
sp<IMemory> StagefrightMetadataRetriever::getImageInternal(
int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect) {
if (mExtractor.get() == NULL) {
ALOGE("no extractor.");
return NULL;
@ -192,12 +213,17 @@ sp<IMemory> StagefrightMetadataRetriever::getImageAtIndex(
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
ImageDecoder decoder(componentName, trackMeta, source);
sp<ImageDecoder> decoder = new ImageDecoder(componentName, trackMeta, source);
int64_t frameTimeUs = thumbnail ? -1 : 0;
if (decoder.init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
sp<IMemory> frame = decoder.extractFrame();
if (decoder->init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
sp<IMemory> frame = decoder->extractFrame(rect);
if (frame != NULL) {
if (rect != NULL) {
// keep the decoder if slice decoding
mImageDecoder = decoder;
mLastImageIndex = index;
}
return frame;
}
}

@ -34,7 +34,11 @@ class IMediaSource;
class VideoFrame;
struct MediaCodec;
struct FrameDecoder {
struct FrameRect {
int32_t left, top, right, bottom;
};
struct FrameDecoder : public RefBase {
FrameDecoder(
const AString &componentName,
const sp<MetaData> &trackMeta,
@ -43,7 +47,7 @@ struct FrameDecoder {
status_t init(
int64_t frameTimeUs, size_t numFrames, int option, int colorFormat);
sp<IMemory> extractFrame();
sp<IMemory> extractFrame(FrameRect *rect = NULL);
status_t extractFrames(std::vector<sp<IMemory> >* frames);
@ -59,6 +63,8 @@ protected:
int seekMode,
MediaSource::ReadOptions *options) = 0;
virtual status_t onExtractRect(FrameRect *rect) = 0;
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer,
MetaDataBase &sampleMeta,
@ -110,6 +116,11 @@ protected:
int seekMode,
MediaSource::ReadOptions *options) override;
virtual status_t onExtractRect(FrameRect *rect) override {
// Rect extraction for sequences is not supported for now.
return (rect == NULL) ? OK : ERROR_UNSUPPORTED;
}
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer,
MetaDataBase &sampleMeta,
@ -143,6 +154,8 @@ protected:
int seekMode,
MediaSource::ReadOptions *options) override;
virtual status_t onExtractRect(FrameRect *rect) override;
virtual status_t onInputReceived(
const sp<MediaCodecBuffer> &codecBuffer __unused,
MetaDataBase &sampleMeta __unused,
@ -161,7 +174,10 @@ private:
int32_t mHeight;
int32_t mGridRows;
int32_t mGridCols;
int32_t mTileWidth;
int32_t mTileHeight;
int32_t mTilesDecoded;
int32_t mTargetTiles;
};
} // namespace android

@ -27,8 +27,10 @@ namespace android {
class DataSource;
class MediaExtractor;
struct ImageDecoder;
struct FrameRect;
struct StagefrightMetadataRetriever : public MediaMetadataRetrieverInterface {
struct StagefrightMetadataRetriever : public MediaMetadataRetrieverBase {
StagefrightMetadataRetriever();
virtual ~StagefrightMetadataRetriever();
@ -44,6 +46,8 @@ struct StagefrightMetadataRetriever : public MediaMetadataRetrieverInterface {
int64_t timeUs, int option, int colorFormat, bool metaOnly);
virtual sp<IMemory> getImageAtIndex(
int index, int colorFormat, bool metaOnly, bool thumbnail);
virtual sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom);
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> >* frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly);
@ -59,6 +63,8 @@ private:
KeyedVector<int, String8> mMetaData;
MediaAlbumArt *mAlbumArt;
sp<ImageDecoder> mImageDecoder;
int mLastImageIndex;
void parseMetaData();
// Delete album art and clear metadata.
void clearMetadata();
@ -66,6 +72,8 @@ private:
status_t getFrameInternal(
int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
virtual sp<IMemory> getImageInternal(
int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect);
StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);

Loading…
Cancel
Save