Merge "DO NOT MERGE - Merge qt-qpr1-dev-plus-aosp-without-vendor (6129114) into stage-aosp-master" into stage-aosp-master

gugelfrei
TreeHugger Robot 5 years ago committed by Android (Google) Code Review
commit 82e92bc1a1

@ -33,7 +33,9 @@ ACameraCaptureSession::~ACameraCaptureSession() {
dev->unlockDevice();
}
// Fire onClosed callback
(*mUserSessionCallback.onClosed)(mUserSessionCallback.context, this);
if (mUserSessionCallback.onClosed != nullptr) {
(*mUserSessionCallback.onClosed)(mUserSessionCallback.context, this);
}
ALOGV("~ACameraCaptureSession: %p is deleted", this);
}

@ -253,21 +253,9 @@ class CameraHelper {
return true;
}
static void onDeviceDisconnected(void* /*obj*/, ACameraDevice* /*device*/) {}
static void onDeviceError(void* /*obj*/, ACameraDevice* /*device*/, int /*errorCode*/) {}
static void onSessionClosed(void* /*obj*/, ACameraCaptureSession* /*session*/) {}
static void onSessionReady(void* /*obj*/, ACameraCaptureSession* /*session*/) {}
static void onSessionActive(void* /*obj*/, ACameraCaptureSession* /*session*/) {}
private:
ACameraDevice_StateCallbacks mDeviceCb{this, onDeviceDisconnected,
onDeviceError};
ACameraCaptureSession_stateCallbacks mSessionCb{
this, onSessionClosed, onSessionReady, onSessionActive};
ACameraDevice_StateCallbacks mDeviceCb{this, nullptr, nullptr};
ACameraCaptureSession_stateCallbacks mSessionCb{ this, nullptr, nullptr, nullptr};
native_handle_t* mImgReaderAnw = nullptr; // not owned by us.

@ -37,7 +37,7 @@ namespace {
static constexpr int64_t kLogDurationUs = 5000000; // 5 secs
static constexpr size_t kMinAllocBytesForEviction = 1024*1024*15;
static constexpr size_t kMinBufferCountForEviction = 40;
static constexpr size_t kMinBufferCountForEviction = 25;
}
// Buffer structure in bufferpool process
@ -718,8 +718,8 @@ void Accessor::Impl::BufferPool::cleanUp(bool clearCache) {
mStats.mTotalFetches, mStats.mTotalTransfers);
}
for (auto freeIt = mFreeBuffers.begin(); freeIt != mFreeBuffers.end();) {
if (!clearCache && mStats.mSizeCached < kMinAllocBytesForEviction
&& mBuffers.size() < kMinBufferCountForEviction) {
if (!clearCache && (mStats.mSizeCached < kMinAllocBytesForEviction
|| mBuffers.size() < kMinBufferCountForEviction)) {
break;
}
auto it = mBuffers.find(*freeIt);

@ -155,11 +155,10 @@ C2SoftAacEnc::C2SoftAacEnc(
mNumBytesPerInputFrame(0u),
mOutBufferSize(0u),
mSentCodecSpecificData(false),
mInputTimeSet(false),
mInputSize(0),
mNextFrameTimestampUs(0),
mSignalledError(false),
mOutIndex(0u) {
mOutIndex(0u),
mRemainderLen(0u) {
}
C2SoftAacEnc::~C2SoftAacEnc() {
@ -181,10 +180,11 @@ status_t C2SoftAacEnc::initEncoder() {
c2_status_t C2SoftAacEnc::onStop() {
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
mNextFrameTimestampUs = 0;
mNextFrameTimestampUs.reset();
mLastFrameEndTimestampUs.reset();
mSignalledError = false;
mRemainderLen = 0;
return C2_OK;
}
@ -199,9 +199,9 @@ void C2SoftAacEnc::onRelease() {
c2_status_t C2SoftAacEnc::onFlush_sm() {
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
mNextFrameTimestampUs = 0;
mNextFrameTimestampUs.reset();
mLastFrameEndTimestampUs.reset();
return C2_OK;
}
@ -364,23 +364,35 @@ void C2SoftAacEnc::process(
data = view.data();
capacity = view.capacity();
}
if (!mInputTimeSet && capacity > 0) {
mNextFrameTimestampUs = work->input.ordinal.timestamp;
mInputTimeSet = true;
c2_cntr64_t inputTimestampUs = work->input.ordinal.timestamp;
if (inputTimestampUs < mLastFrameEndTimestampUs.value_or(inputTimestampUs)) {
ALOGW("Correcting overlapping timestamp: last frame ended at %lldus but "
"current frame is starting at %lldus. Using the last frame's end timestamp",
mLastFrameEndTimestampUs->peekll(), inputTimestampUs.peekll());
inputTimestampUs = *mLastFrameEndTimestampUs;
}
if (capacity > 0) {
if (!mNextFrameTimestampUs) {
mNextFrameTimestampUs = work->input.ordinal.timestamp;
}
mLastFrameEndTimestampUs = inputTimestampUs
+ (capacity / sizeof(int16_t) * 1000000ll / channelCount / sampleRate);
}
size_t numFrames = (capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
/ mNumBytesPerInputFrame;
size_t numFrames =
(mRemainderLen + capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
/ mNumBytesPerInputFrame;
ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu "
"mNumBytesPerInputFrame = %u inputTS = %lld",
capacity, mInputSize, numFrames,
mNumBytesPerInputFrame, work->input.ordinal.timestamp.peekll());
"mNumBytesPerInputFrame = %u inputTS = %lld remaining = %zu",
capacity, mInputSize, numFrames, mNumBytesPerInputFrame, inputTimestampUs.peekll(),
mRemainderLen);
std::shared_ptr<C2LinearBlock> block;
std::unique_ptr<C2WriteView> wView;
uint8_t *outPtr = temp;
size_t outAvailable = 0u;
uint64_t inputIndex = work->input.ordinal.frameIndex.peeku();
size_t bytesPerSample = channelCount * sizeof(int16_t);
AACENC_InArgs inargs;
AACENC_OutArgs outargs;
@ -449,7 +461,25 @@ void C2SoftAacEnc::process(
};
std::list<OutputBuffer> outputBuffers;
while (encoderErr == AACENC_OK && inargs.numInSamples > 0) {
if (mRemainderLen > 0) {
size_t offset = 0;
for (; mRemainderLen < bytesPerSample && offset < capacity; ++offset) {
mRemainder[mRemainderLen++] = data[offset];
}
data += offset;
capacity -= offset;
if (mRemainderLen == bytesPerSample) {
inBuffer[0] = mRemainder;
inBufferSize[0] = bytesPerSample;
inargs.numInSamples = channelCount;
mRemainderLen = 0;
ALOGV("Processing remainder");
} else {
// We have exhausted the input already
inargs.numInSamples = 0;
}
}
while (encoderErr == AACENC_OK && inargs.numInSamples >= channelCount) {
if (numFrames && !block) {
C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
// TODO: error handling, proper usage, etc.
@ -482,11 +512,13 @@ void C2SoftAacEnc::process(
mInputSize = 0;
int consumed = (capacity / sizeof(int16_t)) - inargs.numInSamples
+ outargs.numInSamples;
c2_cntr64_t currentFrameTimestampUs = mNextFrameTimestampUs;
mNextFrameTimestampUs = work->input.ordinal.timestamp
ALOGV("consumed = %d, capacity = %zu, inSamples = %d, outSamples = %d",
consumed, capacity, inargs.numInSamples, outargs.numInSamples);
c2_cntr64_t currentFrameTimestampUs = *mNextFrameTimestampUs;
mNextFrameTimestampUs = inputTimestampUs
+ (consumed * 1000000ll / channelCount / sampleRate);
std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
#if defined(LOG_NDEBUG) && !LOG_NDEBUG
#if 0
hexdump(outPtr, std::min(outargs.numOutBytes, 256));
#endif
outPtr = temp;
@ -498,7 +530,11 @@ void C2SoftAacEnc::process(
mInputSize += outargs.numInSamples * sizeof(int16_t);
}
if (outargs.numInSamples > 0) {
if (inBuffer[0] == mRemainder) {
inBuffer[0] = const_cast<uint8_t *>(data);
inBufferSize[0] = capacity;
inargs.numInSamples = capacity / sizeof(int16_t);
} else if (outargs.numInSamples > 0) {
inBuffer[0] = (int16_t *)inBuffer[0] + outargs.numInSamples;
inBufferSize[0] -= outargs.numInSamples * sizeof(int16_t);
inargs.numInSamples -= outargs.numInSamples;
@ -506,9 +542,8 @@ void C2SoftAacEnc::process(
}
ALOGV("encoderErr = %d mInputSize = %zu "
"inargs.numInSamples = %d, mNextFrameTimestampUs = %lld",
encoderErr, mInputSize, inargs.numInSamples, mNextFrameTimestampUs.peekll());
encoderErr, mInputSize, inargs.numInSamples, mNextFrameTimestampUs->peekll());
}
if (eos && inBufferSize[0] > 0) {
if (numFrames && !block) {
C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
@ -539,6 +574,14 @@ void C2SoftAacEnc::process(
&outBufDesc,
&inargs,
&outargs);
inBufferSize[0] = 0;
}
if (inBufferSize[0] > 0) {
for (size_t i = 0; i < inBufferSize[0]; ++i) {
mRemainder[i] = static_cast<uint8_t *>(inBuffer[0])[i];
}
mRemainderLen = inBufferSize[0];
}
while (outputBuffers.size() > 1) {
@ -583,9 +626,9 @@ c2_status_t C2SoftAacEnc::drain(
(void)pool;
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
mNextFrameTimestampUs = 0;
mNextFrameTimestampUs.reset();
mLastFrameEndTimestampUs.reset();
// TODO: we don't have any pending work at this time to drain.
return C2_OK;

@ -18,6 +18,7 @@
#define ANDROID_C2_SOFT_AAC_ENC_H_
#include <atomic>
#include <optional>
#include <SimpleC2Component.h>
@ -54,13 +55,17 @@ private:
UINT mOutBufferSize;
bool mSentCodecSpecificData;
bool mInputTimeSet;
size_t mInputSize;
c2_cntr64_t mNextFrameTimestampUs;
std::optional<c2_cntr64_t> mNextFrameTimestampUs;
std::optional<c2_cntr64_t> mLastFrameEndTimestampUs;
bool mSignalledError;
std::atomic_uint64_t mOutIndex;
// We support max 6 channels
uint8_t mRemainder[6 * sizeof(int16_t)];
size_t mRemainderLen;
status_t initEncoder();
status_t setAudioParams();

@ -66,7 +66,7 @@ public:
addParameter(
DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
.withFields({C2F(mSampleRate, value).inRange(8000, 96000)})
.withFields({C2F(mSampleRate, value).inRange(8000, 192000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());

@ -593,12 +593,10 @@ void C2SoftVpxDec::process(
}
}
int64_t frameIndex = work->input.ordinal.frameIndex.peekll();
if (inSize) {
uint8_t *bitstream = const_cast<uint8_t *>(rView.data() + inOffset);
vpx_codec_err_t err = vpx_codec_decode(
mCodecCtx, bitstream, inSize, &frameIndex, 0);
mCodecCtx, bitstream, inSize, &work->input.ordinal.frameIndex, 0);
if (err != VPX_CODEC_OK) {
ALOGE("on2 decoder failed to decode frame. err: %d", err);
mSignalledError = true;
@ -608,7 +606,20 @@ void C2SoftVpxDec::process(
}
}
(void)outputBuffer(pool, work);
status_t err = outputBuffer(pool, work);
if (err == NOT_ENOUGH_DATA) {
if (inSize > 0) {
ALOGV("Maybe non-display frame at %lld.",
work->input.ordinal.frameIndex.peekll());
// send the work back with empty buffer.
inSize = 0;
}
} else if (err != OK) {
ALOGD("Error while getting the output frame out");
// work->result would be already filled; do fillEmptyWork() below to
// send the work back.
inSize = 0;
}
if (eos) {
drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
@ -742,16 +753,16 @@ static void convertYUV420Planar16ToYUV420Planar(uint8_t *dst,
}
return;
}
bool C2SoftVpxDec::outputBuffer(
status_t C2SoftVpxDec::outputBuffer(
const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work)
{
if (!(work && pool)) return false;
if (!(work && pool)) return BAD_VALUE;
vpx_codec_iter_t iter = nullptr;
vpx_image_t *img = vpx_codec_get_frame(mCodecCtx, &iter);
if (!img) return false;
if (!img) return NOT_ENOUGH_DATA;
if (img->d_w != mWidth || img->d_h != mHeight) {
mWidth = img->d_w;
@ -768,7 +779,7 @@ bool C2SoftVpxDec::outputBuffer(
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return false;
return UNKNOWN_ERROR;
}
}
@ -791,18 +802,19 @@ bool C2SoftVpxDec::outputBuffer(
if (err != C2_OK) {
ALOGE("fetchGraphicBlock for Output failed with status %d", err);
work->result = err;
return false;
return UNKNOWN_ERROR;
}
C2GraphicView wView = block->map().get();
if (wView.error()) {
ALOGE("graphic view map failed %d", wView.error());
work->result = C2_CORRUPTED;
return false;
return UNKNOWN_ERROR;
}
ALOGV("provided (%dx%d) required (%dx%d), out frameindex %d",
block->width(), block->height(), mWidth, mHeight, (int)*(int64_t *)img->user_priv);
ALOGV("provided (%dx%d) required (%dx%d), out frameindex %lld",
block->width(), block->height(), mWidth, mHeight,
((c2_cntr64_t *)img->user_priv)->peekll());
uint8_t *dst = const_cast<uint8_t *>(wView.data()[C2PlanarLayout::PLANE_Y]);
size_t srcYStride = img->stride[VPX_PLANE_Y];
@ -858,8 +870,8 @@ bool C2SoftVpxDec::outputBuffer(
dstYStride, dstUVStride,
mWidth, mHeight);
}
finishWork(*(int64_t *)img->user_priv, work, std::move(block));
return true;
finishWork(((c2_cntr64_t *)img->user_priv)->peekull(), work, std::move(block));
return OK;
}
c2_status_t C2SoftVpxDec::drainInternal(
@ -875,7 +887,7 @@ c2_status_t C2SoftVpxDec::drainInternal(
return C2_OMITTED;
}
while ((outputBuffer(pool, work))) {
while (outputBuffer(pool, work) == OK) {
}
if (drainMode == DRAIN_COMPONENT_WITH_EOS &&

@ -85,7 +85,7 @@ struct C2SoftVpxDec : public SimpleC2Component {
status_t destroyDecoder();
void finishWork(uint64_t index, const std::unique_ptr<C2Work> &work,
const std::shared_ptr<C2GraphicBlock> &block);
bool outputBuffer(
status_t outputBuffer(
const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work);
c2_status_t drainInternal(

@ -125,6 +125,9 @@ public:
if (!mClient) {
mClient = Codec2Client::_CreateFromIndex(mIndex);
}
CHECK(mClient) << "Failed to create Codec2Client to service \""
<< GetServiceNames()[mIndex] << "\". (Index = "
<< mIndex << ").";
return mClient;
}
@ -832,6 +835,7 @@ std::shared_ptr<Codec2Client> Codec2Client::_CreateFromIndex(size_t index) {
c2_status_t Codec2Client::ForAllServices(
const std::string &key,
size_t numberOfAttempts,
std::function<c2_status_t(const std::shared_ptr<Codec2Client>&)>
predicate) {
c2_status_t status = C2_NO_INIT; // no IComponentStores present
@ -860,23 +864,31 @@ c2_status_t Codec2Client::ForAllServices(
for (size_t index : indices) {
Cache& cache = Cache::List()[index];
std::shared_ptr<Codec2Client> client{cache.getClient()};
if (client) {
for (size_t tries = numberOfAttempts; tries > 0; --tries) {
std::shared_ptr<Codec2Client> client{cache.getClient()};
status = predicate(client);
if (status == C2_OK) {
std::scoped_lock lock{key2IndexMutex};
key2Index[key] = index; // update last known client index
return C2_OK;
} else if (status == C2_TRANSACTION_FAILED) {
LOG(WARNING) << "\"" << key << "\" failed for service \""
<< client->getName()
<< "\" due to transaction failure. "
<< "(Service may have crashed.)"
<< (tries > 1 ? " Retrying..." : "");
cache.invalidate();
continue;
}
}
if (wasMapped) {
LOG(INFO) << "Could not find \"" << key << "\""
" in the last instance. Retrying...";
wasMapped = false;
cache.invalidate();
if (wasMapped) {
LOG(INFO) << "\"" << key << "\" became invalid in service \""
<< client->getName() << "\". Retrying...";
wasMapped = false;
}
break;
}
}
return status; // return the last status from a valid client
return status; // return the last status from a valid client
}
std::shared_ptr<Codec2Client::Component>
@ -885,35 +897,37 @@ std::shared_ptr<Codec2Client::Component>
const std::shared_ptr<Listener>& listener,
std::shared_ptr<Codec2Client>* owner,
size_t numberOfAttempts) {
while (true) {
std::shared_ptr<Component> component;
c2_status_t status = ForAllServices(
componentName,
[owner, &component, componentName, &listener](
const std::shared_ptr<Codec2Client> &client)
-> c2_status_t {
c2_status_t status = client->createComponent(componentName,
listener,
&component);
if (status == C2_OK) {
if (owner) {
*owner = client;
}
} else if (status != C2_NOT_FOUND) {
LOG(DEBUG) << "IComponentStore("
<< client->getServiceName()
<< ")::createComponent(\"" << componentName
<< "\") returned status = "
<< status << ".";
std::string key{"create:"};
key.append(componentName);
std::shared_ptr<Component> component;
c2_status_t status = ForAllServices(
key,
numberOfAttempts,
[owner, &component, componentName, &listener](
const std::shared_ptr<Codec2Client> &client)
-> c2_status_t {
c2_status_t status = client->createComponent(componentName,
listener,
&component);
if (status == C2_OK) {
if (owner) {
*owner = client;
}
return status;
});
if (numberOfAttempts > 0 && status == C2_TRANSACTION_FAILED) {
--numberOfAttempts;
continue;
}
return component;
} else if (status != C2_NOT_FOUND) {
LOG(DEBUG) << "IComponentStore("
<< client->getServiceName()
<< ")::createComponent(\"" << componentName
<< "\") returned status = "
<< status << ".";
}
return status;
});
if (status != C2_OK) {
LOG(DEBUG) << "Failed to create component \"" << componentName
<< "\" from all known services. "
"Last returned status = " << status << ".";
}
return component;
}
std::shared_ptr<Codec2Client::Interface>
@ -921,34 +935,36 @@ std::shared_ptr<Codec2Client::Interface>
const char* interfaceName,
std::shared_ptr<Codec2Client>* owner,
size_t numberOfAttempts) {
while (true) {
std::shared_ptr<Interface> interface;
c2_status_t status = ForAllServices(
interfaceName,
[owner, &interface, interfaceName](
const std::shared_ptr<Codec2Client> &client)
-> c2_status_t {
c2_status_t status = client->createInterface(interfaceName,
&interface);
if (status == C2_OK) {
if (owner) {
*owner = client;
}
} else if (status != C2_NOT_FOUND) {
LOG(DEBUG) << "IComponentStore("
<< client->getServiceName()
<< ")::createInterface(\"" << interfaceName
<< "\") returned status = "
<< status << ".";
std::string key{"create:"};
key.append(interfaceName);
std::shared_ptr<Interface> interface;
c2_status_t status = ForAllServices(
key,
numberOfAttempts,
[owner, &interface, interfaceName](
const std::shared_ptr<Codec2Client> &client)
-> c2_status_t {
c2_status_t status = client->createInterface(interfaceName,
&interface);
if (status == C2_OK) {
if (owner) {
*owner = client;
}
return status;
});
if (numberOfAttempts > 0 && status == C2_TRANSACTION_FAILED) {
--numberOfAttempts;
continue;
}
return interface;
} else if (status != C2_NOT_FOUND) {
LOG(DEBUG) << "IComponentStore("
<< client->getServiceName()
<< ")::createInterface(\"" << interfaceName
<< "\") returned status = "
<< status << ".";
}
return status;
});
if (status != C2_OK) {
LOG(DEBUG) << "Failed to create interface \"" << interfaceName
<< "\" from all known services. "
"Last returned status = " << status << ".";
}
return interface;
}
std::vector<C2Component::Traits> const& Codec2Client::ListComponents() {

@ -208,11 +208,25 @@ struct Codec2Client : public Codec2ConfigurableClient {
protected:
sp<Base> mBase;
// Finds the first store where the predicate returns OK, and returns the last
// predicate result. Uses key to remember the last store found, and if cached,
// it tries that store before trying all stores (one retry).
// Finds the first store where the predicate returns C2_OK and returns the
// last predicate result. The predicate will be tried on all stores. The
// function will return C2_OK the first time the predicate returns C2_OK,
// or it will return the value from the last time that predicate is tried.
// (The latter case corresponds to a failure on every store.) The order of
// the stores to try is the same as the return value of GetServiceNames().
//
// key is used to remember the last store with which the predicate last
// succeeded. If the last successful store is cached, it will be tried
// first before all the stores are tried. Note that the last successful
// store will be tried twice---first before all the stores, and another time
// with all the stores.
//
// If an attempt to evaluate the predicate results in a transaction failure,
// repeated attempts will be made until the predicate returns without a
// transaction failure or numberOfAttempts attempts have been made.
static c2_status_t ForAllServices(
const std::string& key,
size_t numberOfAttempts,
std::function<c2_status_t(std::shared_ptr<Codec2Client> const&)>
predicate);

@ -600,7 +600,7 @@ c2_status_t C2AllocatorIon::newLinearAllocation(
}
std::shared_ptr<C2AllocationIon> alloc
= std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, mTraits->id);
= std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, getId());
ret = alloc->status();
if (ret == C2_OK) {
*allocation = alloc;
@ -622,7 +622,7 @@ c2_status_t C2AllocatorIon::priorLinearAllocation(
// TODO: get capacity and validate it
const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
std::shared_ptr<C2AllocationIon> alloc
= std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), mTraits->id);
= std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), getId());
c2_status_t ret = alloc->status();
if (ret == C2_OK) {
*allocation = alloc;

@ -35,6 +35,10 @@
#include <memory>
#include <mutex>
#ifdef __ANDROID_APEX__
#include <android-base/properties.h>
#endif
namespace android {
/**
@ -599,9 +603,33 @@ private:
struct Setter {
static C2R setIonUsage(bool /* mayBlock */, C2P<C2StoreIonUsageInfo> &me) {
#ifdef __ANDROID_APEX__
static int32_t defaultHeapMask = [] {
int32_t heapmask = base::GetIntProperty(
"ro.com.android.media.swcodec.ion.heapmask", int32_t(0xFFFFFFFF));
ALOGD("Default ION heapmask = %d", heapmask);
return heapmask;
}();
static int32_t defaultFlags = [] {
int32_t flags = base::GetIntProperty(
"ro.com.android.media.swcodec.ion.flags", 0);
ALOGD("Default ION flags = %d", flags);
return flags;
}();
static uint32_t defaultAlign = [] {
uint32_t align = base::GetUintProperty(
"ro.com.android.media.swcodec.ion.align", 0u);
ALOGD("Default ION align = %d", align);
return align;
}();
me.set().heapMask = defaultHeapMask;
me.set().allocFlags = defaultFlags;
me.set().minAlignment = defaultAlign;
#else
me.set().heapMask = ~0;
me.set().allocFlags = 0;
me.set().minAlignment = 0;
#endif
return C2R::Ok();
}
};

@ -144,6 +144,7 @@ static bool SniffAMR(
AMRExtractor::AMRExtractor(DataSourceHelper *source)
: mDataSource(source),
mMeta(NULL),
mInitCheck(NO_INIT),
mOffsetTableLength(0) {
float confidence;
@ -191,7 +192,9 @@ AMRExtractor::AMRExtractor(DataSourceHelper *source)
AMRExtractor::~AMRExtractor() {
delete mDataSource;
AMediaFormat_delete(mMeta);
if (mMeta) {
AMediaFormat_delete(mMeta);
}
}
media_status_t AMRExtractor::getMetaData(AMediaFormat *meta) {

@ -700,8 +700,8 @@ struct IspeBox : public FullBox, public ItemProperty {
}
private:
uint32_t mWidth;
uint32_t mHeight;
int32_t mWidth;
int32_t mHeight;
};
status_t IspeBox::parse(off64_t offset, size_t size) {
@ -715,12 +715,19 @@ status_t IspeBox::parse(off64_t offset, size_t size) {
if (size < 8) {
return ERROR_MALFORMED;
}
if (!source()->getUInt32(offset, &mWidth)
|| !source()->getUInt32(offset + 4, &mHeight)) {
if (!source()->getUInt32(offset, (uint32_t *)&mWidth)
|| !source()->getUInt32(offset + 4, (uint32_t *)&mHeight)) {
return ERROR_IO;
}
ALOGV("property ispe: %dx%d", mWidth, mHeight);
// Validate that the dimension doesn't cause overflow on calculated max input size.
// Max input size is width*height*1.5, restrict width*height to 1<<29 so that
// we don't need to cast to int64_t when doing mults.
if (mWidth <= 0 || mHeight <= 0 || mWidth > (1 << 29) / mHeight) {
return ERROR_MALFORMED;
}
ALOGV("property ispe: %dx%d", mWidth, mHeight);
return OK;
}
@ -1524,8 +1531,9 @@ AMediaFormat *ItemTable::getImageMeta(const uint32_t imageIndex) {
default: break; // don't set if invalid
}
}
// we validated no overflow in IspeBox::parse()
AMediaFormat_setInt32(meta,
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, image->width * image->height * 1.5);
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, image->width * image->height * 3 / 2);
if (!image->thumbnails.empty()) {
ssize_t thumbItemIndex = mItemIdToItemMap.indexOfKey(image->thumbnails[0]);
@ -1561,8 +1569,9 @@ AMediaFormat *ItemTable::getImageMeta(const uint32_t imageIndex) {
AMEDIAFORMAT_KEY_TILE_WIDTH, image->width);
AMediaFormat_setInt32(meta,
AMEDIAFORMAT_KEY_TILE_HEIGHT, image->height);
// we validated no overflow in IspeBox::parse()
AMediaFormat_setInt32(meta,
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, image->width * image->height * 1.5);
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, image->width * image->height * 3 / 2);
}
if (image->hvcc == NULL) {

@ -1561,8 +1561,12 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
{
*offset += chunk_size;
if (mLastTrack == NULL)
// the absolute minimum size of a compliant mett box is 11 bytes:
// 6 byte reserved, 2 byte index, null byte, one char mime_format, null byte
// The resulting mime_format would be invalid at that size though.
if (mLastTrack == NULL || chunk_data_size < 11) {
return ERROR_MALFORMED;
}
auto buffer = heapbuffer<uint8_t>(chunk_data_size);
if (buffer.get() == NULL) {
@ -1574,10 +1578,24 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
return ERROR_IO;
}
// ISO-14496-12:
// int8 reserved[6]; // should be all zeroes
// int16_t data_reference_index;
// char content_encoding[]; // null terminated, optional (= just the null byte)
// char mime_format[]; // null terminated, mandatory
// optional other boxes
//
// API < 29:
// char mime_format[]; // null terminated
//
// API >= 29
// char mime_format[]; // null terminated
// char mime_format[]; // null terminated
// Prior to API 29, the metadata track was not compliant with ISO/IEC
// 14496-12-2015. This led to some ISO-compliant parsers failing to read the
// metatrack. As of API 29 and onwards, a change was made to metadata track to
// make it compliant with the standard. The workaround is to write the
// make it somewhat compatible with the standard. The workaround is to write the
// null-terminated mime_format string twice. This allows compliant parsers to
// read the missing reserved, data_reference_index, and content_encoding fields
// from the first mime_type string. The actual mime_format field would then be
@ -1586,27 +1604,27 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
// as it would only read the first null-terminated mime_format string. To enable
// reading metadata tracks generated from both the non-compliant and compliant
// formats, a check needs to be done to see which format is used.
int null_pos = 0;
const unsigned char *str = buffer.get();
while (null_pos < chunk_data_size) {
if (*(str + null_pos) == '\0') {
break;
}
++null_pos;
}
const char *str = (const char*) buffer.get();
size_t string_length = strnlen(str, chunk_data_size);
if (null_pos == chunk_data_size - 1) {
// This is not a standard ompliant metadata track.
String8 mimeFormat((const char *)(buffer.get()), chunk_data_size);
AMediaFormat_setString(mLastTrack->meta,
AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
if (string_length == chunk_data_size - 1) {
// This is likely a pre API 29 file, since it's a single null terminated
// string filling the entire box.
AMediaFormat_setString(mLastTrack->meta, AMEDIAFORMAT_KEY_MIME, str);
} else {
// This is a standard compliant metadata track.
String8 contentEncoding((const char *)(buffer.get() + 8));
String8 mimeFormat((const char *)(buffer.get() + 8 + contentEncoding.size() + 1),
chunk_data_size - 8 - contentEncoding.size() - 1);
AMediaFormat_setString(mLastTrack->meta,
AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
// This might be a fully compliant metadata track, a "double mime" compatibility
// track, or anything else, including a single non-terminated string, so we need
// to determine the length of each string we want to parse out of the box.
size_t encoding_length = strnlen(str + 8, chunk_data_size - 8);
if (encoding_length + 8 >= chunk_data_size - 2) {
// the encoding extends to the end of the box, so there's no mime_format
return ERROR_MALFORMED;
}
String8 contentEncoding(str + 8, encoding_length);
String8 mimeFormat(str + 8 + encoding_length + 1,
chunk_data_size - 8 - encoding_length - 1);
AMediaFormat_setString(mLastTrack->meta,
AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
}
break;
}
@ -5775,11 +5793,11 @@ media_status_t MPEG4Source::read(
meta, AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
int32_t byteOrder;
AMediaFormat_getInt32(mFormat,
int32_t byteOrder = 0;
bool isGetBigEndian = AMediaFormat_getInt32(mFormat,
AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN, &byteOrder);
if (byteOrder == 1) {
if (isGetBigEndian && byteOrder == 1) {
// Big-endian -> little-endian
uint16_t *dstData = (uint16_t *)buf;
uint16_t *srcData = (uint16_t *)buf;

@ -655,6 +655,7 @@ void SampleTable::buildSampleEntriesTable() {
}
mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
memset(mSampleTimeEntries, 0, sizeof(SampleTimeEntry) * mNumSampleSizes);
if (!mSampleTimeEntries) {
ALOGE("Cannot allocate sample entry table with %llu entries.",
(unsigned long long)mNumSampleSizes);

@ -1361,10 +1361,14 @@ status_t BnAudioFlinger::onTransact(
}
case GET_EFFECT_DESCRIPTOR: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
effect_uuid_t uuid;
data.read(&uuid, sizeof(effect_uuid_t));
effect_uuid_t type;
data.read(&type, sizeof(effect_uuid_t));
effect_uuid_t uuid = {};
if (data.read(&uuid, sizeof(effect_uuid_t)) != NO_ERROR) {
android_errorWriteLog(0x534e4554, "139417189");
}
effect_uuid_t type = {};
if (data.read(&type, sizeof(effect_uuid_t)) != NO_ERROR) {
android_errorWriteLog(0x534e4554, "139417189");
}
uint32_t preferredTypeFlag = data.readUint32();
effect_descriptor_t desc = {};
status_t status = getEffectDescriptor(&uuid, &type, preferredTypeFlag, &desc);

@ -130,29 +130,32 @@ void NuPlayer::StreamingSource::onReadBuffer() {
} else if (n < 0) {
break;
} else {
if (buffer[0] == 0x00) {
if (buffer[0] == 0x00) { // OK to access buffer[0] since n must be > 0 here
// XXX legacy
if (extra == NULL) {
extra = new AMessage;
}
uint8_t type = buffer[1];
uint8_t type = 0;
if (n > 1) {
type = buffer[1];
if (type & 2) {
int64_t mediaTimeUs;
memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
if ((type & 2) && (n >= 2 + sizeof(int64_t))) {
int64_t mediaTimeUs;
memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
extra->setInt64(kATSParserKeyMediaTimeUs, mediaTimeUs);
extra->setInt64(kATSParserKeyMediaTimeUs, mediaTimeUs);
}
}
mTSParser->signalDiscontinuity(
((type & 1) == 0)
? ATSParser::DISCONTINUITY_TIME
: ATSParser::DISCONTINUITY_FORMATCHANGE,
? ATSParser::DISCONTINUITY_TIME
: ATSParser::DISCONTINUITY_FORMATCHANGE,
extra);
} else {
status_t err = mTSParser->feedTSPacket(buffer, sizeof(buffer));
status_t err = mTSParser->feedTSPacket(buffer, n);
if (err != OK) {
ALOGE("TS Parser returned error %d", err);

@ -6902,7 +6902,7 @@ status_t ACodec::LoadedState::setupInputSurface() {
}
}
if (mCodec->mMaxPtsGapUs != 0LL) {
if (mCodec->mIsVideo && mCodec->mMaxPtsGapUs != 0LL) {
OMX_PARAM_U32TYPE maxPtsGapParams;
InitOMXParams(&maxPtsGapParams);
maxPtsGapParams.nPortIndex = kPortIndexInput;

@ -2004,6 +2004,15 @@ void CameraDeviceClient::detachDevice() {
}
}
for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
auto ret = mCompositeStreamMap.valueAt(i)->deleteInternalStreams();
if (ret != OK) {
ALOGE("%s: Failed removing composite stream %s (%d)", __FUNCTION__,
strerror(-ret), ret);
}
}
mCompositeStreamMap.clear();
Camera2ClientBase::detachDevice();
}

@ -247,7 +247,7 @@ int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*ino
return ret;
}
status_t DepthCompositeStream::processInputFrame(const InputFrame &inputFrame) {
status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) {
status_t res;
sp<ANativeWindow> outputANW = mOutputSurface;
ANativeWindowBuffer *anb;
@ -370,6 +370,13 @@ status_t DepthCompositeStream::processInputFrame(const InputFrame &inputFrame) {
return NO_MEMORY;
}
res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts);
if (res != OK) {
ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__,
getStreamId(), strerror(-res), res);
return res;
}
ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
(gb->getWidth() - sizeof(struct camera3_jpeg_blob));
@ -459,7 +466,7 @@ bool DepthCompositeStream::threadLoop() {
}
}
auto res = processInputFrame(mPendingInputFrames[currentTs]);
auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
Mutex::Autolock l(mMutex);
if (res != OK) {
ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,

@ -97,7 +97,7 @@ private:
size_t maxJpegSize, uint8_t jpegQuality,
std::vector<std::unique_ptr<Item>>* items /*out*/);
std::unique_ptr<ImagingModel> getImagingModel();
status_t processInputFrame(const InputFrame &inputFrame);
status_t processInputFrame(nsecs_t ts, const InputFrame &inputFrame);
// Buffer/Results handling
void compilePendingInputLocked();

@ -31,7 +31,6 @@
#include <mediadrm/ICrypto.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/MediaDefs.h>
#include <media/stagefright/MediaCodecConstants.h>
@ -61,12 +60,13 @@ HeicCompositeStream::HeicCompositeStream(wp<CameraDeviceBase> device,
mUseGrid(false),
mAppSegmentStreamId(-1),
mAppSegmentSurfaceId(-1),
mAppSegmentBufferAcquired(false),
mMainImageStreamId(-1),
mMainImageSurfaceId(-1),
mYuvBufferAcquired(false),
mProducerListener(new ProducerListener()),
mOutputBufferCounter(0),
mDequeuedOutputBufferCnt(0),
mLockedAppSegmentBufferCnt(0),
mCodecOutputCounter(0),
mGridTimestampUs(0) {
}
@ -132,7 +132,7 @@ status_t HeicCompositeStream::createInternalStreams(const std::vector<sp<Surface
sp<IGraphicBufferProducer> producer;
sp<IGraphicBufferConsumer> consumer;
BufferQueue::createBufferQueue(&producer, &consumer);
mAppSegmentConsumer = new CpuConsumer(consumer, 1);
mAppSegmentConsumer = new CpuConsumer(consumer, kMaxAcquiredAppSegment);
mAppSegmentConsumer->setFrameAvailableListener(this);
mAppSegmentConsumer->setName(String8("Camera3-HeicComposite-AppSegmentStream"));
mAppSegmentSurface = new Surface(producer);
@ -231,6 +231,8 @@ void HeicCompositeStream::onBufferReleased(const BufferInfo& bufferInfo) {
if (bufferInfo.mError) return;
mCodecOutputBufferTimestamps.push(bufferInfo.mTimestamp);
ALOGV("%s: [%" PRId64 "]: Adding codecOutputBufferTimestamp (%zu timestamps in total)",
__FUNCTION__, bufferInfo.mTimestamp, mCodecOutputBufferTimestamps.size());
}
// We need to get the settings early to handle the case where the codec output
@ -361,6 +363,8 @@ void HeicCompositeStream::onHeicOutputFrameAvailable(
mCodecOutputBuffers.push_back(outputBufferInfo);
mInputReadyCondition.signal();
} else {
ALOGV("%s: Releasing output buffer: size %d flags: 0x%x ", __FUNCTION__,
outputBufferInfo.size, outputBufferInfo.flags);
mCodec->releaseOutputBuffer(outputBufferInfo.index);
}
} else {
@ -414,8 +418,10 @@ void HeicCompositeStream::onHeicFormatChanged(sp<AMessage>& newFormat) {
mNumOutputTiles = 1;
}
ALOGV("%s: mNumOutputTiles is %zu", __FUNCTION__, mNumOutputTiles);
mFormat = newFormat;
ALOGV("%s: mNumOutputTiles is %zu", __FUNCTION__, mNumOutputTiles);
mInputReadyCondition.signal();
}
void HeicCompositeStream::onHeicCodecError() {
@ -459,9 +465,8 @@ status_t HeicCompositeStream::configureStream() {
// Cannot use SourceSurface buffer count since it could be codec's 512*512 tile
// buffer count.
int maxProducerBuffers = 1;
if ((res = native_window_set_buffer_count(
anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
anwConsumer, kMaxOutputSurfaceProducerCount + maxConsumerBuffers)) != OK) {
ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mMainImageStreamId);
return res;
}
@ -505,6 +510,8 @@ void HeicCompositeStream::onShutter(const CaptureResultExtras& resultExtras, nse
}
if (mSettingsByFrameNumber.find(resultExtras.frameNumber) != mSettingsByFrameNumber.end()) {
ALOGV("%s: [%" PRId64 "]: frameNumber %" PRId64, __FUNCTION__,
timestamp, resultExtras.frameNumber);
mFrameNumberMap.emplace(resultExtras.frameNumber, timestamp);
mSettingsByTimestamp[timestamp] = mSettingsByFrameNumber[resultExtras.frameNumber];
mSettingsByFrameNumber.erase(resultExtras.frameNumber);
@ -520,12 +527,12 @@ void HeicCompositeStream::compilePendingInputLocked() {
mSettingsByTimestamp.erase(it);
}
while (!mInputAppSegmentBuffers.empty() && !mAppSegmentBufferAcquired) {
while (!mInputAppSegmentBuffers.empty()) {
CpuConsumer::LockedBuffer imgBuffer;
auto it = mInputAppSegmentBuffers.begin();
auto res = mAppSegmentConsumer->lockNextBuffer(&imgBuffer);
if (res == NOT_ENOUGH_DATA) {
// Canot not lock any more buffers.
// Can not lock any more buffers.
break;
} else if ((res != OK) || (*it != imgBuffer.timestamp)) {
if (res != OK) {
@ -535,6 +542,7 @@ void HeicCompositeStream::compilePendingInputLocked() {
ALOGE("%s: Expecting JPEG_APP_SEGMENTS buffer with time stamp: %" PRId64
" received buffer with time stamp: %" PRId64, __FUNCTION__,
*it, imgBuffer.timestamp);
mAppSegmentConsumer->unlockBuffer(imgBuffer);
}
mPendingInputFrames[*it].error = true;
mInputAppSegmentBuffers.erase(it);
@ -546,7 +554,7 @@ void HeicCompositeStream::compilePendingInputLocked() {
mAppSegmentConsumer->unlockBuffer(imgBuffer);
} else {
mPendingInputFrames[imgBuffer.timestamp].appSegmentBuffer = imgBuffer;
mAppSegmentBufferAcquired = true;
mLockedAppSegmentBufferCnt++;
}
mInputAppSegmentBuffers.erase(it);
}
@ -556,7 +564,7 @@ void HeicCompositeStream::compilePendingInputLocked() {
auto it = mInputYuvBuffers.begin();
auto res = mMainImageConsumer->lockNextBuffer(&imgBuffer);
if (res == NOT_ENOUGH_DATA) {
// Canot not lock any more buffers.
// Can not lock any more buffers.
break;
} else if (res != OK) {
ALOGE("%s: Error locking YUV_888 image buffer: %s (%d)", __FUNCTION__,
@ -589,17 +597,20 @@ void HeicCompositeStream::compilePendingInputLocked() {
// to look up timestamp.
int64_t bufferTime = -1;
if (mCodecOutputBufferTimestamps.empty()) {
ALOGE("%s: Failed to find buffer timestamp for codec output buffer!", __FUNCTION__);
ALOGV("%s: Failed to find buffer timestamp for codec output buffer!", __FUNCTION__);
break;
} else {
// Direct mapping between camera timestamp (in ns) and codec timestamp (in us).
bufferTime = mCodecOutputBufferTimestamps.front();
mOutputBufferCounter++;
if (mOutputBufferCounter == mNumOutputTiles) {
mCodecOutputCounter++;
if (mCodecOutputCounter == mNumOutputTiles) {
mCodecOutputBufferTimestamps.pop();
mOutputBufferCounter = 0;
mCodecOutputCounter = 0;
}
mPendingInputFrames[bufferTime].codecOutputBuffers.push_back(*it);
ALOGV("%s: [%" PRId64 "]: Pushing codecOutputBuffers (time %" PRId64 " us)",
__FUNCTION__, bufferTime, it->timeUs);
}
mCodecOutputBuffers.erase(it);
}
@ -607,6 +618,7 @@ void HeicCompositeStream::compilePendingInputLocked() {
while (!mFrameNumberMap.empty()) {
auto it = mFrameNumberMap.begin();
mPendingInputFrames[it->second].frameNumber = it->first;
ALOGV("%s: [%" PRId64 "]: frameNumber is %" PRId64, __FUNCTION__, it->second, it->first);
mFrameNumberMap.erase(it);
}
@ -675,16 +687,29 @@ bool HeicCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*out*/) {
}
bool newInputAvailable = false;
for (const auto& it : mPendingInputFrames) {
for (auto& it : mPendingInputFrames) {
// New input is considered to be available only if:
// 1. input buffers are ready, or
// 2. App segment and muxer is created, or
// 3. A codec output tile is ready, and an output buffer is available.
// This makes sure that muxer gets created only when an output tile is
// generated, because right now we only handle 1 HEIC output buffer at a
// time (max dequeued buffer count is 1).
bool appSegmentReady = (it.second.appSegmentBuffer.data != nullptr) &&
!it.second.appSegmentWritten && it.second.result != nullptr;
!it.second.appSegmentWritten && it.second.result != nullptr &&
it.second.muxer != nullptr;
bool codecOutputReady = !it.second.codecOutputBuffers.empty();
bool codecInputReady = (it.second.yuvBuffer.data != nullptr) &&
(!it.second.codecInputBuffers.empty());
bool hasOutputBuffer = it.second.muxer != nullptr ||
(mDequeuedOutputBufferCnt < kMaxOutputSurfaceProducerCount);
if ((!it.second.error) &&
(it.first < *currentTs) &&
(appSegmentReady || codecOutputReady || codecInputReady)) {
(appSegmentReady || (codecOutputReady && hasOutputBuffer) || codecInputReady)) {
*currentTs = it.first;
if (it.second.format == nullptr && mFormat != nullptr) {
it.second.format = mFormat->dup();
}
newInputAvailable = true;
break;
}
@ -716,15 +741,17 @@ status_t HeicCompositeStream::processInputFrame(nsecs_t timestamp,
status_t res = OK;
bool appSegmentReady = inputFrame.appSegmentBuffer.data != nullptr &&
!inputFrame.appSegmentWritten && inputFrame.result != nullptr;
!inputFrame.appSegmentWritten && inputFrame.result != nullptr &&
inputFrame.muxer != nullptr;
bool codecOutputReady = inputFrame.codecOutputBuffers.size() > 0;
bool codecInputReady = inputFrame.yuvBuffer.data != nullptr &&
!inputFrame.codecInputBuffers.empty();
!inputFrame.codecInputBuffers.empty();
bool hasOutputBuffer = inputFrame.muxer != nullptr ||
(mDequeuedOutputBufferCnt < kMaxOutputSurfaceProducerCount);
if (!appSegmentReady && !codecOutputReady && !codecInputReady) {
ALOGW("%s: No valid appSegmentBuffer/codec input/outputBuffer available!", __FUNCTION__);
return OK;
}
ALOGV("%s: [%" PRId64 "]: appSegmentReady %d, codecOutputReady %d, codecInputReady %d,"
" dequeuedOutputBuffer %d", __FUNCTION__, timestamp, appSegmentReady,
codecOutputReady, codecInputReady, mDequeuedOutputBufferCnt);
// Handle inputs for Hevc tiling
if (codecInputReady) {
@ -736,7 +763,13 @@ status_t HeicCompositeStream::processInputFrame(nsecs_t timestamp,
}
}
// Initialize and start muxer if not yet done so
if (!(codecOutputReady && hasOutputBuffer) && !appSegmentReady) {
return OK;
}
// Initialize and start muxer if not yet done so. In this case,
// codecOutputReady must be true. Otherwise, appSegmentReady is guaranteed
// to be false, and the function must have returned early.
if (inputFrame.muxer == nullptr) {
res = startMuxerForInputFrame(timestamp, inputFrame);
if (res != OK) {
@ -747,7 +780,7 @@ status_t HeicCompositeStream::processInputFrame(nsecs_t timestamp,
}
// Write JPEG APP segments data to the muxer.
if (appSegmentReady && inputFrame.muxer != nullptr) {
if (appSegmentReady) {
res = processAppSegment(timestamp, inputFrame);
if (res != OK) {
ALOGE("%s: Failed to process JPEG APP segments: %s (%d)", __FUNCTION__,
@ -766,12 +799,18 @@ status_t HeicCompositeStream::processInputFrame(nsecs_t timestamp,
}
}
if (inputFrame.appSegmentWritten && inputFrame.pendingOutputTiles == 0) {
res = processCompletedInputFrame(timestamp, inputFrame);
if (res != OK) {
ALOGE("%s: Failed to process completed input frame: %s (%d)", __FUNCTION__,
strerror(-res), res);
return res;
if (inputFrame.pendingOutputTiles == 0) {
if (inputFrame.appSegmentWritten) {
res = processCompletedInputFrame(timestamp, inputFrame);
if (res != OK) {
ALOGE("%s: Failed to process completed input frame: %s (%d)", __FUNCTION__,
strerror(-res), res);
return res;
}
} else if (mLockedAppSegmentBufferCnt == kMaxAcquiredAppSegment) {
ALOGE("%s: Out-of-order app segment buffers reaches limit %u", __FUNCTION__,
kMaxAcquiredAppSegment);
return INVALID_OPERATION;
}
}
@ -780,11 +819,6 @@ status_t HeicCompositeStream::processInputFrame(nsecs_t timestamp,
status_t HeicCompositeStream::startMuxerForInputFrame(nsecs_t timestamp, InputFrame &inputFrame) {
sp<ANativeWindow> outputANW = mOutputSurface;
if (inputFrame.codecOutputBuffers.size() == 0) {
// No single codec output buffer has been generated. Continue to
// wait.
return OK;
}
auto res = outputANW->dequeueBuffer(mOutputSurface.get(), &inputFrame.anb, &inputFrame.fenceFd);
if (res != OK) {
@ -792,6 +826,7 @@ status_t HeicCompositeStream::startMuxerForInputFrame(nsecs_t timestamp, InputFr
res);
return res;
}
mDequeuedOutputBufferCnt++;
// Combine current thread id, stream id and timestamp to uniquely identify image.
std::ostringstream tempOutputFile;
@ -828,7 +863,7 @@ status_t HeicCompositeStream::startMuxerForInputFrame(nsecs_t timestamp, InputFr
}
}
ssize_t trackId = inputFrame.muxer->addTrack(mFormat);
ssize_t trackId = inputFrame.muxer->addTrack(inputFrame.format);
if (trackId < 0) {
ALOGE("%s: Failed to addTrack to the muxer: %zd", __FUNCTION__, trackId);
return NO_INIT;
@ -844,6 +879,8 @@ status_t HeicCompositeStream::startMuxerForInputFrame(nsecs_t timestamp, InputFr
return res;
}
ALOGV("%s: [%" PRId64 "]: Muxer started for inputFrame", __FUNCTION__,
timestamp);
return OK;
}
@ -852,9 +889,6 @@ status_t HeicCompositeStream::processAppSegment(nsecs_t timestamp, InputFrame &i
auto appSegmentSize = findAppSegmentsSize(inputFrame.appSegmentBuffer.data,
inputFrame.appSegmentBuffer.width * inputFrame.appSegmentBuffer.height,
&app1Size);
ALOGV("%s: appSegmentSize is %zu, width %d, height %d, app1Size %zu", __FUNCTION__,
appSegmentSize, inputFrame.appSegmentBuffer.width,
inputFrame.appSegmentBuffer.height, app1Size);
if (appSegmentSize == 0) {
ALOGE("%s: Failed to find JPEG APP segment size", __FUNCTION__);
return NO_INIT;
@ -910,7 +944,16 @@ status_t HeicCompositeStream::processAppSegment(nsecs_t timestamp, InputFrame &i
__FUNCTION__, strerror(-res), res);
return res;
}
ALOGV("%s: [%" PRId64 "]: appSegmentSize is %zu, width %d, height %d, app1Size %zu",
__FUNCTION__, timestamp, appSegmentSize, inputFrame.appSegmentBuffer.width,
inputFrame.appSegmentBuffer.height, app1Size);
inputFrame.appSegmentWritten = true;
// Release the buffer now so any pending input app segments can be processed
mAppSegmentConsumer->unlockBuffer(inputFrame.appSegmentBuffer);
inputFrame.appSegmentBuffer.data = nullptr;
mLockedAppSegmentBufferCnt--;
return OK;
}
@ -934,8 +977,9 @@ status_t HeicCompositeStream::processCodecInputFrame(InputFrame &inputFrame) {
mOutputWidth - tileX * mGridWidth : mGridWidth;
size_t height = (tileY == static_cast<size_t>(mGridRows) - 1) ?
mOutputHeight - tileY * mGridHeight : mGridHeight;
ALOGV("%s: inputBuffer tileIndex [%zu, %zu], top %zu, left %zu, width %zu, height %zu",
__FUNCTION__, tileX, tileY, top, left, width, height);
ALOGV("%s: inputBuffer tileIndex [%zu, %zu], top %zu, left %zu, width %zu, height %zu,"
" timeUs %" PRId64, __FUNCTION__, tileX, tileY, top, left, width, height,
inputBuffer.timeUs);
res = copyOneYuvTile(buffer, inputFrame.yuvBuffer, top, left, width, height);
if (res != OK) {
@ -990,6 +1034,9 @@ status_t HeicCompositeStream::processOneCodecOutputFrame(nsecs_t timestamp,
}
inputFrame.codecOutputBuffers.erase(inputFrame.codecOutputBuffers.begin());
ALOGV("%s: [%" PRId64 "]: Output buffer index %d",
__FUNCTION__, timestamp, it->index);
return OK;
}
@ -1046,7 +1093,9 @@ status_t HeicCompositeStream::processCompletedInputFrame(nsecs_t timestamp,
return res;
}
inputFrame.anb = nullptr;
mDequeuedOutputBufferCnt--;
ALOGV("%s: [%" PRId64 "]", __FUNCTION__, timestamp);
ATRACE_ASYNC_END("HEIC capture", inputFrame.frameNumber);
return OK;
}
@ -1060,7 +1109,6 @@ void HeicCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/
if (inputFrame->appSegmentBuffer.data != nullptr) {
mAppSegmentConsumer->unlockBuffer(inputFrame->appSegmentBuffer);
inputFrame->appSegmentBuffer.data = nullptr;
mAppSegmentBufferAcquired = false;
}
while (!inputFrame->codecOutputBuffers.empty()) {
@ -1098,11 +1146,13 @@ void HeicCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/
}
}
void HeicCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
void HeicCompositeStream::releaseInputFramesLocked() {
auto it = mPendingInputFrames.begin();
while (it != mPendingInputFrames.end()) {
if (it->first <= currentTs) {
releaseInputFrameLocked(&it->second);
auto& inputFrame = it->second;
if (inputFrame.error ||
(inputFrame.appSegmentWritten && inputFrame.pendingOutputTiles == 0)) {
releaseInputFrameLocked(&inputFrame);
it = mPendingInputFrames.erase(it);
} else {
it++;
@ -1506,7 +1556,7 @@ bool HeicCompositeStream::threadLoop() {
// In case we landed in error state, return any pending buffers and
// halt all further processing.
compilePendingInputLocked();
releaseInputFramesLocked(currentTs);
releaseInputFramesLocked();
return false;
}
@ -1548,11 +1598,7 @@ bool HeicCompositeStream::threadLoop() {
mPendingInputFrames[currentTs].error = true;
}
if (mPendingInputFrames[currentTs].error ||
(mPendingInputFrames[currentTs].appSegmentWritten &&
mPendingInputFrames[currentTs].pendingOutputTiles == 0)) {
releaseInputFramesLocked(currentTs);
}
releaseInputFramesLocked();
return true;
}

@ -25,6 +25,7 @@
#include <media/hardware/VideoAPI.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaMuxer.h>
@ -157,6 +158,7 @@ private:
bool errorNotified;
int64_t frameNumber;
sp<AMessage> format;
sp<MediaMuxer> muxer;
int fenceFd;
int fileFd;
@ -187,7 +189,7 @@ private:
status_t processCompletedInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
void releaseInputFramesLocked(int64_t currentTs);
void releaseInputFramesLocked();
size_t findAppSegmentsSize(const uint8_t* appSegmentBuffer, size_t maxSize,
size_t* app1SegmentSize);
@ -205,11 +207,13 @@ private:
static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS);
static const android_dataspace kHeifDataSpace =
static_cast<android_dataspace>(HAL_DATASPACE_HEIF);
// Use the limit of pipeline depth in the API sepc as maximum number of acquired
// app segment buffers.
static const uint32_t kMaxAcquiredAppSegment = 8;
int mAppSegmentStreamId, mAppSegmentSurfaceId;
sp<CpuConsumer> mAppSegmentConsumer;
sp<Surface> mAppSegmentSurface;
bool mAppSegmentBufferAcquired;
size_t mAppSegmentMaxSize;
CameraMetadata mStaticInfo;
@ -218,9 +222,10 @@ private:
sp<CpuConsumer> mMainImageConsumer; // Only applicable for HEVC codec.
bool mYuvBufferAcquired; // Only applicable to HEVC codec
static const int32_t kMaxOutputSurfaceProducerCount = 1;
sp<Surface> mOutputSurface;
sp<ProducerListener> mProducerListener;
int32_t mDequeuedOutputBufferCnt;
// Map from frame number to JPEG setting of orientation+quality
std::map<int64_t, std::pair<int32_t, int32_t>> mSettingsByFrameNumber;
@ -229,11 +234,12 @@ private:
// Keep all incoming APP segment Blob buffer pending further processing.
std::vector<int64_t> mInputAppSegmentBuffers;
int32_t mLockedAppSegmentBufferCnt;
// Keep all incoming HEIC blob buffer pending further processing.
std::vector<CodecOutputBufferInfo> mCodecOutputBuffers;
std::queue<int64_t> mCodecOutputBufferTimestamps;
size_t mOutputBufferCounter;
size_t mCodecOutputCounter;
// Keep all incoming Yuv buffer pending tiling and encoding (for HEVC YUV tiling only)
std::vector<int64_t> mInputYuvBuffers;

@ -54,9 +54,8 @@ Camera3OutputStream::Camera3OutputStream(int id,
mState = STATE_ERROR;
}
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
mBufferReleasedListener = new BufferReleasedListener(this);
}
bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
}
Camera3OutputStream::Camera3OutputStream(int id,
@ -87,9 +86,8 @@ Camera3OutputStream::Camera3OutputStream(int id,
mState = STATE_ERROR;
}
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
mBufferReleasedListener = new BufferReleasedListener(this);
}
bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
}
Camera3OutputStream::Camera3OutputStream(int id,
@ -124,10 +122,8 @@ Camera3OutputStream::Camera3OutputStream(int id,
}
mConsumerName = String8("Deferred");
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
mBufferReleasedListener = new BufferReleasedListener(this);
}
bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
}
Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
@ -151,9 +147,8 @@ Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
mBufferReleasedListener = new BufferReleasedListener(this);
}
bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
// Subclasses expected to initialize mConsumer themselves
}
@ -261,7 +256,7 @@ status_t Camera3OutputStream::returnBufferCheckedLocked(
notifyBufferReleased(anwBuffer);
if (mUseBufferManager) {
// Return this buffer back to buffer manager.
mBufferReleasedListener->onBufferReleased();
mBufferProducerListener->onBufferReleased();
}
} else {
if (mTraceFirstBuffer && (stream_type == CAMERA3_STREAM_OUTPUT)) {
@ -387,8 +382,8 @@ status_t Camera3OutputStream::configureConsumerQueueLocked() {
// Configure consumer-side ANativeWindow interface. The listener may be used
// to notify buffer manager (if it is used) of the returned buffers.
res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
/*listener*/mBufferReleasedListener,
/*reportBufferRemoval*/true);
/*reportBufferRemoval*/true,
/*listener*/mBufferProducerListener);
if (res != OK) {
ALOGE("%s: Unable to connect to native window for stream %d",
__FUNCTION__, mId);
@ -790,7 +785,7 @@ status_t Camera3OutputStream::updateStream(const std::vector<sp<Surface>> &/*out
return INVALID_OPERATION;
}
void Camera3OutputStream::BufferReleasedListener::onBufferReleased() {
void Camera3OutputStream::BufferProducerListener::onBufferReleased() {
sp<Camera3OutputStream> stream = mParent.promote();
if (stream == nullptr) {
ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
@ -823,6 +818,25 @@ void Camera3OutputStream::BufferReleasedListener::onBufferReleased() {
}
}
void Camera3OutputStream::BufferProducerListener::onBuffersDiscarded(
const std::vector<sp<GraphicBuffer>>& buffers) {
sp<Camera3OutputStream> stream = mParent.promote();
if (stream == nullptr) {
ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
return;
}
if (buffers.size() > 0) {
Mutex::Autolock l(stream->mLock);
stream->onBuffersRemovedLocked(buffers);
if (stream->mUseBufferManager) {
stream->mBufferManager->onBuffersRemoved(stream->getId(),
stream->getStreamSetId(), buffers.size());
}
ALOGV("Stream %d: %zu Buffers discarded.", stream->getId(), buffers.size());
}
}
void Camera3OutputStream::onBuffersRemovedLocked(
const std::vector<sp<GraphicBuffer>>& removedBuffers) {
sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();

@ -146,18 +146,22 @@ class Camera3OutputStream :
*/
virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
class BufferReleasedListener : public BnProducerListener {
class BufferProducerListener : public SurfaceListener {
public:
BufferReleasedListener(wp<Camera3OutputStream> parent) : mParent(parent) {}
BufferProducerListener(wp<Camera3OutputStream> parent, bool needsReleaseNotify)
: mParent(parent), mNeedsReleaseNotify(needsReleaseNotify) {}
/**
* Implementation of IProducerListener, used to notify this stream that the consumer
* has returned a buffer and it is ready to return to Camera3BufferManager for reuse.
*/
virtual void onBufferReleased();
/**
* Implementation of IProducerListener, used to notify this stream that the consumer
* has returned a buffer and it is ready to return to Camera3BufferManager for reuse.
*/
virtual void onBufferReleased();
virtual bool needsReleaseNotify() { return mNeedsReleaseNotify; }
virtual void onBuffersDiscarded(const std::vector<sp<GraphicBuffer>>& buffers);
private:
wp<Camera3OutputStream> mParent;
wp<Camera3OutputStream> mParent;
bool mNeedsReleaseNotify;
};
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
@ -262,10 +266,10 @@ class Camera3OutputStream :
sp<Camera3BufferManager> mBufferManager;
/**
* Buffer released listener, used to notify the buffer manager that a buffer is released
* from consumer side.
* Buffer producer listener, used to handle notification when a buffer is released
* from consumer side, or a set of buffers are discarded by the consumer.
*/
sp<BufferReleasedListener> mBufferReleasedListener;
sp<BufferProducerListener> mBufferProducerListener;
/**
* Flag indicating if the buffer manager is used to allocate the stream buffers

Loading…
Cancel
Save