Merge "Merge qt-r1-dev-plus-aosp-without-vendor (5817612) into stage-aosp-master" into stage-aosp-master

gugelfrei
TreeHugger Robot 5 years ago committed by Android (Google) Code Review
commit fdb43e49e9

@ -37,9 +37,11 @@ namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv
namespace.platform.isolated = true
namespace.platform.search.paths = /system/${LIB}
namespace.platform.search.paths = /system/${LIB}
namespace.platform.search.paths += /apex/com.android.runtime/${LIB}
namespace.platform.asan.search.paths = /data/asan/system/${LIB}
namespace.platform.asan.search.paths += /system/${LIB}
namespace.platform.asan.search.paths += /apex/com.android.runtime/${LIB}
# /system/lib/libc.so, etc are symlinks to /apex/com.android.lib/lib/bionic/libc.so, etc.
# Add /apex/... pat to the permitted paths because linker uses realpath(3)

@ -138,6 +138,7 @@ class CameraDevice final : public RefBase {
private:
friend ACameraCaptureSession;
friend ACameraDevice;
camera_status_t checkCameraClosedOrErrorLocked() const;
@ -387,7 +388,6 @@ struct ACameraDevice {
mDevice(new android::acam::CameraDevice(id, cb, std::move(chars), this)) {}
~ACameraDevice();
/*******************
* NDK public APIs *
*******************/

@ -24,6 +24,7 @@
#include <algorithm>
#include <mutex>
#include <string>
#include <variant>
#include <vector>
#include <stdio.h>
#include <stdio.h>
@ -49,6 +50,7 @@ static constexpr int kTestImageHeight = 480;
static constexpr int kTestImageFormat = AIMAGE_FORMAT_YUV_420_888;
using android::hardware::camera::common::V1_0::helper::VendorTagDescriptorCache;
using ConfiguredWindows = std::set<native_handle_t *>;
class CameraHelper {
public:
@ -60,9 +62,12 @@ class CameraHelper {
const char* physicalCameraId;
native_handle_t* anw;
};
int initCamera(native_handle_t* imgReaderAnw,
// Retaining the error code in case the caller needs to analyze it.
std::variant<int, ConfiguredWindows> initCamera(native_handle_t* imgReaderAnw,
const std::vector<PhysicalImgReaderInfo>& physicalImgReaders,
bool usePhysicalSettings) {
ConfiguredWindows configuredWindows;
if (imgReaderAnw == nullptr) {
ALOGE("Cannot initialize camera before image reader get initialized.");
return -1;
@ -78,7 +83,7 @@ class CameraHelper {
ret = ACameraManager_openCamera(mCameraManager, mCameraId, &mDeviceCb, &mDevice);
if (ret != AMEDIA_OK || mDevice == nullptr) {
ALOGE("Failed to open camera, ret=%d, mDevice=%p.", ret, mDevice);
return -1;
return ret;
}
// Create capture session
@ -97,8 +102,9 @@ class CameraHelper {
ALOGE("ACaptureSessionOutputContainer_add failed, ret=%d", ret);
return ret;
}
configuredWindows.insert(mImgReaderAnw);
std::vector<const char*> idPointerList;
std::set<const native_handle_t*> physicalStreamMap;
for (auto& physicalStream : physicalImgReaders) {
ACaptureSessionOutput* sessionOutput = nullptr;
ret = ACaptureSessionPhysicalOutput_create(physicalStream.anw,
@ -112,21 +118,25 @@ class CameraHelper {
ALOGE("ACaptureSessionOutputContainer_add failed, ret=%d", ret);
return ret;
}
mExtraOutputs.push_back(sessionOutput);
ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
ALOGW("ACameraDevice_isSessionConfigurationSupported failed, ret=%d camera id %s",
ret, mCameraId);
ACaptureSessionOutputContainer_remove(mOutputs, sessionOutput);
ACaptureSessionOutput_free(sessionOutput);
continue;
}
configuredWindows.insert(physicalStream.anw);
// Assume that at most one physical stream per physical camera.
mPhysicalCameraIds.push_back(physicalStream.physicalCameraId);
idPointerList.push_back(physicalStream.physicalCameraId);
physicalStreamMap.insert(physicalStream.anw);
mSessionPhysicalOutputs.push_back(sessionOutput);
}
ACameraIdList cameraIdList;
cameraIdList.numCameras = idPointerList.size();
cameraIdList.cameraIds = idPointerList.data();
ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
ALOGE("ACameraDevice_isSessionConfigurationSupported failed, ret=%d", ret);
return ret;
}
ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
if (ret != AMEDIA_OK) {
ALOGE("ACameraDevice_createCaptureSession failed, ret=%d", ret);
@ -157,6 +167,10 @@ class CameraHelper {
}
for (auto& physicalStream : physicalImgReaders) {
if (physicalStreamMap.find(physicalStream.anw) == physicalStreamMap.end()) {
ALOGI("Skipping physicalStream anw=%p", physicalStream.anw);
continue;
}
ACameraOutputTarget* outputTarget = nullptr;
ret = ACameraOutputTarget_create(physicalStream.anw, &outputTarget);
if (ret != AMEDIA_OK) {
@ -168,11 +182,11 @@ class CameraHelper {
ALOGE("ACaptureRequest_addTarget failed, ret=%d", ret);
return ret;
}
mReqExtraOutputs.push_back(outputTarget);
mReqPhysicalOutputs.push_back(outputTarget);
}
mIsCameraReady = true;
return 0;
return configuredWindows;
}
@ -184,10 +198,10 @@ class CameraHelper {
ACameraOutputTarget_free(mReqImgReaderOutput);
mReqImgReaderOutput = nullptr;
}
for (auto& outputTarget : mReqExtraOutputs) {
for (auto& outputTarget : mReqPhysicalOutputs) {
ACameraOutputTarget_free(outputTarget);
}
mReqExtraOutputs.clear();
mReqPhysicalOutputs.clear();
if (mStillRequest) {
ACaptureRequest_free(mStillRequest);
mStillRequest = nullptr;
@ -201,10 +215,10 @@ class CameraHelper {
ACaptureSessionOutput_free(mImgReaderOutput);
mImgReaderOutput = nullptr;
}
for (auto& extraOutput : mExtraOutputs) {
for (auto& extraOutput : mSessionPhysicalOutputs) {
ACaptureSessionOutput_free(extraOutput);
}
mExtraOutputs.clear();
mSessionPhysicalOutputs.clear();
if (mOutputs) {
ACaptureSessionOutputContainer_free(mOutputs);
mOutputs = nullptr;
@ -262,13 +276,13 @@ class CameraHelper {
// Capture session
ACaptureSessionOutputContainer* mOutputs = nullptr;
ACaptureSessionOutput* mImgReaderOutput = nullptr;
std::vector<ACaptureSessionOutput*> mExtraOutputs;
std::vector<ACaptureSessionOutput*> mSessionPhysicalOutputs;
ACameraCaptureSession* mSession = nullptr;
// Capture request
ACaptureRequest* mStillRequest = nullptr;
ACameraOutputTarget* mReqImgReaderOutput = nullptr;
std::vector<ACameraOutputTarget*> mReqExtraOutputs;
std::vector<ACameraOutputTarget*> mReqPhysicalOutputs;
bool mIsCameraReady = false;
const char* mCameraId;
@ -581,9 +595,11 @@ class AImageReaderVendorTest : public ::testing::Test {
}
CameraHelper cameraHelper(id, mCameraManager);
ret = cameraHelper.initCamera(testCase.getNativeWindow(),
{}/*physicalImageReaders*/, false/*usePhysicalSettings*/);
if (ret < 0) {
std::variant<int, ConfiguredWindows> retInit =
cameraHelper.initCamera(testCase.getNativeWindow(), {}/*physicalImageReaders*/,
false/*usePhysicalSettings*/);
int *retp = std::get_if<int>(&retInit);
if (retp) {
ALOGE("Unable to initialize camera helper");
return false;
}
@ -751,10 +767,15 @@ class AImageReaderVendorTest : public ::testing::Test {
physicalImgReaderInfo.push_back({physicalCameraIds[0], testCases[1]->getNativeWindow()});
physicalImgReaderInfo.push_back({physicalCameraIds[1], testCases[2]->getNativeWindow()});
int ret = cameraHelper.initCamera(testCases[0]->getNativeWindow(),
physicalImgReaderInfo, usePhysicalSettings);
ASSERT_EQ(ret, 0);
std::variant<int, ConfiguredWindows> retInit =
cameraHelper.initCamera(testCases[0]->getNativeWindow(), physicalImgReaderInfo,
usePhysicalSettings);
int *retp = std::get_if<int>(&retInit);
ASSERT_EQ(retp, nullptr);
ConfiguredWindows *configuredWindowsp = std::get_if<ConfiguredWindows>(&retInit);
ASSERT_NE(configuredWindowsp, nullptr);
ASSERT_LE(configuredWindowsp->size(), testCases.size());
int ret = 0;
if (!cameraHelper.isCameraReady()) {
ALOGW("Camera is not ready after successful initialization. It's either due to camera "
"on board lacks BACKWARDS_COMPATIBLE capability or the device does not have "
@ -776,9 +797,15 @@ class AImageReaderVendorTest : public ::testing::Test {
break;
}
}
ASSERT_EQ(testCases[0]->getAcquiredImageCount(), pictureCount);
ASSERT_EQ(testCases[1]->getAcquiredImageCount(), pictureCount);
ASSERT_EQ(testCases[2]->getAcquiredImageCount(), pictureCount);
for(auto &testCase : testCases) {
auto it = configuredWindowsp->find(testCase->getNativeWindow());
if (it == configuredWindowsp->end()) {
continue;
}
ALOGI("Testing window %p", testCase->getNativeWindow());
ASSERT_EQ(testCase->getAcquiredImageCount(), pictureCount);
}
ASSERT_TRUE(cameraHelper.checkCallbacks(pictureCount));
ACameraMetadata_free(staticMetadata);

@ -157,7 +157,7 @@ C2SoftAacEnc::C2SoftAacEnc(
mSentCodecSpecificData(false),
mInputTimeSet(false),
mInputSize(0),
mInputTimeUs(0),
mNextFrameTimestampUs(0),
mSignalledError(false),
mOutIndex(0u) {
}
@ -183,7 +183,7 @@ c2_status_t C2SoftAacEnc::onStop() {
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
mInputTimeUs = 0;
mNextFrameTimestampUs = 0;
mSignalledError = false;
return C2_OK;
}
@ -201,7 +201,7 @@ c2_status_t C2SoftAacEnc::onFlush_sm() {
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
mInputTimeUs = 0;
mNextFrameTimestampUs = 0;
return C2_OK;
}
@ -365,17 +365,18 @@ void C2SoftAacEnc::process(
capacity = view.capacity();
}
if (!mInputTimeSet && capacity > 0) {
mInputTimeUs = work->input.ordinal.timestamp;
mNextFrameTimestampUs = work->input.ordinal.timestamp;
mInputTimeSet = true;
}
size_t numFrames = (capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
/ mNumBytesPerInputFrame;
ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu mNumBytesPerInputFrame = %u",
capacity, mInputSize, numFrames, mNumBytesPerInputFrame);
ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu "
"mNumBytesPerInputFrame = %u inputTS = %lld",
capacity, mInputSize, numFrames,
mNumBytesPerInputFrame, work->input.ordinal.timestamp.peekll());
std::shared_ptr<C2LinearBlock> block;
std::shared_ptr<C2Buffer> buffer;
std::unique_ptr<C2WriteView> wView;
uint8_t *outPtr = temp;
size_t outAvailable = 0u;
@ -442,7 +443,11 @@ void C2SoftAacEnc::process(
const std::shared_ptr<C2Buffer> mBuffer;
};
C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
struct OutputBuffer {
std::shared_ptr<C2Buffer> buffer;
c2_cntr64_t timestampUs;
};
std::list<OutputBuffer> outputBuffers;
while (encoderErr == AACENC_OK && inargs.numInSamples > 0) {
if (numFrames && !block) {
@ -473,29 +478,22 @@ void C2SoftAacEnc::process(
&outargs);
if (encoderErr == AACENC_OK) {
if (buffer) {
outOrdinal.frameIndex = mOutIndex++;
outOrdinal.timestamp = mInputTimeUs;
cloneAndSend(
inputIndex,
work,
FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
buffer.reset();
}
if (outargs.numOutBytes > 0) {
mInputSize = 0;
int consumed = (capacity / sizeof(int16_t)) - inargs.numInSamples
+ outargs.numInSamples;
mInputTimeUs = work->input.ordinal.timestamp
c2_cntr64_t currentFrameTimestampUs = mNextFrameTimestampUs;
mNextFrameTimestampUs = work->input.ordinal.timestamp
+ (consumed * 1000000ll / channelCount / sampleRate);
buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
#if defined(LOG_NDEBUG) && !LOG_NDEBUG
hexdump(outPtr, std::min(outargs.numOutBytes, 256));
#endif
outPtr = temp;
outAvailable = 0;
block.reset();
outputBuffers.push_back({buffer, currentFrameTimestampUs});
} else {
mInputSize += outargs.numInSamples * sizeof(int16_t);
}
@ -506,8 +504,9 @@ void C2SoftAacEnc::process(
inargs.numInSamples -= outargs.numInSamples;
}
}
ALOGV("encoderErr = %d mInputSize = %zu inargs.numInSamples = %d, mInputTimeUs = %lld",
encoderErr, mInputSize, inargs.numInSamples, mInputTimeUs.peekll());
ALOGV("encoderErr = %d mInputSize = %zu "
"inargs.numInSamples = %d, mNextFrameTimestampUs = %lld",
encoderErr, mInputSize, inargs.numInSamples, mNextFrameTimestampUs.peekll());
}
if (eos && inBufferSize[0] > 0) {
@ -542,10 +541,27 @@ void C2SoftAacEnc::process(
&outargs);
}
outOrdinal.frameIndex = mOutIndex++;
outOrdinal.timestamp = mInputTimeUs;
while (outputBuffers.size() > 1) {
const OutputBuffer& front = outputBuffers.front();
C2WorkOrdinalStruct ordinal = work->input.ordinal;
ordinal.frameIndex = mOutIndex++;
ordinal.timestamp = front.timestampUs;
cloneAndSend(
inputIndex,
work,
FillWork(C2FrameData::FLAG_INCOMPLETE, ordinal, front.buffer));
outputBuffers.pop_front();
}
std::shared_ptr<C2Buffer> buffer;
C2WorkOrdinalStruct ordinal = work->input.ordinal;
ordinal.frameIndex = mOutIndex++;
if (!outputBuffers.empty()) {
ordinal.timestamp = outputBuffers.front().timestampUs;
buffer = outputBuffers.front().buffer;
}
// Mark the end of frame
FillWork((C2FrameData::flags_t)(eos ? C2FrameData::FLAG_END_OF_STREAM : 0),
outOrdinal, buffer)(work);
ordinal, buffer)(work);
}
c2_status_t C2SoftAacEnc::drain(
@ -569,7 +585,7 @@ c2_status_t C2SoftAacEnc::drain(
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
mInputTimeUs = 0;
mNextFrameTimestampUs = 0;
// TODO: we don't have any pending work at this time to drain.
return C2_OK;

@ -56,7 +56,7 @@ private:
bool mSentCodecSpecificData;
bool mInputTimeSet;
size_t mInputSize;
c2_cntr64_t mInputTimeUs;
c2_cntr64_t mNextFrameTimestampUs;
bool mSignalledError;
std::atomic_uint64_t mOutIndex;

@ -375,7 +375,11 @@ public:
// consumer usage is queried earlier.
ALOGD("ISConfig%s", status.str().c_str());
if (status.str().empty()) {
ALOGD("ISConfig not changed");
} else {
ALOGD("ISConfig%s", status.str().c_str());
}
return err;
}

@ -224,7 +224,7 @@ CCodecBufferChannel::CCodecBufferChannel(
mFirstValidFrameIndex(0u),
mMetaMode(MODE_NONE),
mInputMetEos(false) {
mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
mOutputSurface.lock()->maxDequeueBuffers = 2 * kSmoothnessFactor + kRenderingDepth;
{
Mutexed<Input>::Locked input(mInput);
input->buffers.reset(new DummyInputBuffers(""));
@ -948,7 +948,8 @@ status_t CCodecBufferChannel::start(
uint32_t outputGeneration;
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
output->maxDequeueBuffers = numOutputSlots + reorderDepth.value + kRenderingDepth;
output->maxDequeueBuffers = numOutputSlots + numInputSlots +
reorderDepth.value + kRenderingDepth;
outputSurface = output->surface ?
output->surface->getIGraphicBufferProducer() : nullptr;
if (outputSurface) {
@ -1332,9 +1333,10 @@ bool CCodecBufferChannel::handleWork(
ALOGV("[%s] onWorkDone: updated reorder depth to %u",
mName, reorderDepth.value);
size_t numOutputSlots = mOutput.lock()->numSlots;
size_t numInputSlots = mInput.lock()->numSlots;
Mutexed<OutputSurface>::Locked output(mOutputSurface);
output->maxDequeueBuffers =
numOutputSlots + reorderDepth.value + kRenderingDepth;
output->maxDequeueBuffers = numOutputSlots + numInputSlots +
reorderDepth.value + kRenderingDepth;
if (output->surface) {
output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
}
@ -1382,6 +1384,7 @@ bool CCodecBufferChannel::handleWork(
bool outputBuffersChanged = false;
size_t numOutputSlots = 0;
size_t numInputSlots = mInput.lock()->numSlots;
{
Mutexed<Output>::Locked output(mOutput);
output->outputDelay = outputDelay.value;
@ -1406,7 +1409,8 @@ bool CCodecBufferChannel::handleWork(
uint32_t depth = mReorderStash.lock()->depth();
Mutexed<OutputSurface>::Locked output(mOutputSurface);
output->maxDequeueBuffers = numOutputSlots + depth + kRenderingDepth;
output->maxDequeueBuffers = numOutputSlots + numInputSlots +
depth + kRenderingDepth;
if (output->surface) {
output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
}

@ -235,7 +235,10 @@ struct StandardParams {
const std::vector<ConfigMapper> &getConfigMappersForSdkKey(std::string key) const {
auto it = mConfigMappers.find(key);
if (it == mConfigMappers.end()) {
ALOGD("no c2 equivalents for %s", key.c_str());
if (mComplained.count(key) == 0) {
ALOGD("no c2 equivalents for %s", key.c_str());
mComplained.insert(key);
}
return NO_MAPPERS;
}
ALOGV("found %zu eqs for %s", it->second.size(), key.c_str());
@ -304,6 +307,7 @@ struct StandardParams {
private:
std::map<SdkKey, std::vector<ConfigMapper>> mConfigMappers;
mutable std::set<std::string> mComplained;
};
const std::vector<ConfigMapper> StandardParams::NO_MAPPERS;
@ -508,7 +512,8 @@ void CCodecConfig::initializeStandardParams() {
.limitTo(D::ENCODER & D::VIDEO));
// convert to timestamp base
add(ConfigMapper(KEY_I_FRAME_INTERVAL, C2_PARAMKEY_SYNC_FRAME_INTERVAL, "value")
.withMappers([](C2Value v) -> C2Value {
.limitTo(D::VIDEO & D::ENCODER & D::CONFIG)
.withMapper([](C2Value v) -> C2Value {
// convert from i32 to float
int32_t i32Value;
float fpValue;
@ -518,12 +523,6 @@ void CCodecConfig::initializeStandardParams() {
return int64_t(c2_min(1000000 * fpValue + 0.5, (double)INT64_MAX));
}
return C2Value();
}, [](C2Value v) -> C2Value {
int64_t i64;
if (v.get(&i64)) {
return float(i64) / 1000000;
}
return C2Value();
}));
// remove when codecs switch to proper coding.gop (add support for calculating gop)
deprecated(ConfigMapper("i-frame-period", "coding.gop", "intra-period")
@ -1033,7 +1032,25 @@ bool CCodecConfig::updateFormats(Domain domain) {
}
ReflectedParamUpdater::Dict reflected = mParamUpdater->getParams(paramPointers);
ALOGD("c2 config is %s", reflected.debugString().c_str());
std::string config = reflected.debugString();
std::set<std::string> configLines;
std::string diff;
for (size_t start = 0; start != std::string::npos; ) {
size_t end = config.find('\n', start);
size_t count = (end == std::string::npos)
? std::string::npos
: end - start + 1;
std::string line = config.substr(start, count);
configLines.insert(line);
if (mLastConfig.count(line) == 0) {
diff.append(line);
}
start = (end == std::string::npos) ? std::string::npos : end + 1;
}
if (!diff.empty()) {
ALOGD("c2 config diff is %s", diff.c_str());
}
mLastConfig.swap(configLines);
bool changed = false;
if (domain & mInputDomain) {

@ -134,6 +134,8 @@ struct CCodecConfig {
/// For now support a validation function.
std::map<C2Param::Index, LocalParamValidator> mLocalParams;
std::set<std::string> mLastConfig;
CCodecConfig();
/// initializes the members required to manage the format: descriptors, reflector,

@ -146,7 +146,7 @@ PipelineWatcher::Clock::duration PipelineWatcher::elapsed(
std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count());
durations.push_back(elapsed);
}
std::nth_element(durations.begin(), durations.end(), durations.begin() + n,
std::nth_element(durations.begin(), durations.begin() + n, durations.end(),
std::greater<Clock::duration>());
return durations[n];
}

@ -36,6 +36,7 @@
#include "binding/AAudioStreamConfiguration.h"
#include "binding/IAAudioService.h"
#include "binding/AAudioServiceMessage.h"
#include "core/AudioGlobal.h"
#include "core/AudioStreamBuilder.h"
#include "fifo/FifoBuffer.h"
#include "utility/AudioClock.h"

@ -89,7 +89,11 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
if (mAudioEndpoint.isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
// Jitter in the DSP can cause late writes to the FIFO.
// This might be caused by resampling.
// We want to read the FIFO after the latest possible time
// that the DSP could have written the data.
int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
// TODO refactor, maybe use setRemoteCounter()
mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
}
@ -139,7 +143,7 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
wakeTime = mClockModel.convertPositionToTime(nextPosition);
wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
}
break;
default:

@ -19,12 +19,11 @@
#include <log/log.h>
#include <stdint.h>
#include <algorithm>
#include "utility/AudioClock.h"
#include "IsochronousClockModel.h"
#define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
using namespace aaudio;
IsochronousClockModel::IsochronousClockModel()
@ -32,7 +31,7 @@ IsochronousClockModel::IsochronousClockModel()
, mMarkerNanoTime(0)
, mSampleRate(48000)
, mFramesPerBurst(64)
, mMaxLatenessInNanos(0)
, mMaxMeasuredLatenessNanos(0)
, mState(STATE_STOPPED)
{
}
@ -41,8 +40,7 @@ IsochronousClockModel::~IsochronousClockModel() {
}
void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
ALOGV("setPositionAndTime(%lld, %lld)",
(long long) framePosition, (long long) nanoTime);
ALOGV("setPositionAndTime, %lld, %lld", (long long) framePosition, (long long) nanoTime);
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
}
@ -54,7 +52,9 @@ void IsochronousClockModel::start(int64_t nanoTime) {
}
void IsochronousClockModel::stop(int64_t nanoTime) {
ALOGV("stop(nanos = %lld)\n", (long long) nanoTime);
ALOGD("stop(nanos = %lld) max lateness = %d micros\n",
(long long) nanoTime,
(int) (mMaxMeasuredLatenessNanos / 1000));
setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
// TODO should we set position?
mState = STATE_STOPPED;
@ -69,9 +69,10 @@ bool IsochronousClockModel::isRunning() const {
}
void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
// (long long)framePosition,
// (long long)nanoTime);
mTimestampCount++;
// Log position and time in CSV format so we can import it easily into spreadsheets.
//ALOGD("%s() CSV, %d, %lld, %lld", __func__,
//mTimestampCount, (long long)framePosition, (long long)nanoTime);
int64_t framesDelta = framePosition - mMarkerFramePosition;
int64_t nanosDelta = nanoTime - mMarkerNanoTime;
if (nanosDelta < 1000) {
@ -108,17 +109,56 @@ void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nano
case STATE_RUNNING:
if (nanosDelta < expectedNanosDelta) {
// Earlier than expected timestamp.
// This data is probably more accurate so use it.
// or we may be drifting due to a slow HW clock.
// ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
// This data is probably more accurate, so use it.
// Or we may be drifting due to a fast HW clock.
//int microsDelta = (int) (nanosDelta / 1000);
//int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
//ALOGD("%s() - STATE_RUNNING - #%d, %4d micros EARLY",
//__func__, mTimestampCount, expectedMicrosDelta - microsDelta);
setPositionAndTime(framePosition, nanoTime);
} else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
// Later than expected timestamp.
// ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
// (int) (mMaxLatenessInNanos / 1000));
setPositionAndTime(framePosition - mFramesPerBurst, nanoTime - mMaxLatenessInNanos);
} else if (nanosDelta > (expectedNanosDelta + (2 * mBurstPeriodNanos))) {
// In this case we do not update mMaxMeasuredLatenessNanos because it
// would force it too high.
// mMaxMeasuredLatenessNanos should range from 1 to 2 * mBurstPeriodNanos
//int32_t measuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
//ALOGD("%s() - STATE_RUNNING - #%d, lateness %d - max %d = %4d micros VERY LATE",
//__func__,
//mTimestampCount,
//measuredLatenessNanos / 1000,
//mMaxMeasuredLatenessNanos / 1000,
//(measuredLatenessNanos - mMaxMeasuredLatenessNanos) / 1000
//);
// This typically happens when we are modelling a service instead of a DSP.
setPositionAndTime(framePosition, nanoTime - (2 * mBurstPeriodNanos));
} else if (nanosDelta > (expectedNanosDelta + mMaxMeasuredLatenessNanos)) {
//int32_t previousLatenessNanos = mMaxMeasuredLatenessNanos;
mMaxMeasuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
//ALOGD("%s() - STATE_RUNNING - #%d, newmax %d - oldmax %d = %4d micros LATE",
//__func__,
//mTimestampCount,
//mMaxMeasuredLatenessNanos / 1000,
//previousLatenessNanos / 1000,
//(mMaxMeasuredLatenessNanos - previousLatenessNanos) / 1000
//);
// When we are late, it may be because of preemption in the kernel,
// or timing jitter caused by resampling in the DSP,
// or we may be drifting due to a slow HW clock.
// We add slight drift value just in case there is actual long term drift
// forward caused by a slower clock.
// If the clock is faster than the model will get pushed earlier
// by the code in the preceding branch.
// The two opposing forces should allow the model to track the real clock
// over a long time.
int64_t driftingTime = mMarkerNanoTime + expectedNanosDelta + kDriftNanos;
setPositionAndTime(framePosition, driftingTime);
//ALOGD("%s() - #%d, max lateness = %d micros",
//__func__,
//mTimestampCount,
//(int) (mMaxMeasuredLatenessNanos / 1000));
}
break;
default:
@ -138,9 +178,12 @@ void IsochronousClockModel::setFramesPerBurst(int32_t framesPerBurst) {
update();
}
// Update expected lateness based on sampleRate and framesPerBurst
void IsochronousClockModel::update() {
int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
mBurstPeriodNanos = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
// Timestamps may be late by up to a burst because we are randomly sampling the time period
// after the DSP position is actually updated.
mMaxMeasuredLatenessNanos = mBurstPeriodNanos;
}
int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
@ -183,11 +226,25 @@ int64_t IsochronousClockModel::convertTimeToPosition(int64_t nanoTime) const {
return position;
}
int32_t IsochronousClockModel::getLateTimeOffsetNanos() const {
// This will never be < 0 because mMaxLatenessNanos starts at
// mBurstPeriodNanos and only gets bigger.
return (mMaxMeasuredLatenessNanos - mBurstPeriodNanos) + kExtraLatenessNanos;
}
int64_t IsochronousClockModel::convertPositionToLatestTime(int64_t framePosition) const {
return convertPositionToTime(framePosition) + getLateTimeOffsetNanos();
}
int64_t IsochronousClockModel::convertLatestTimeToPosition(int64_t nanoTime) const {
return convertTimeToPosition(nanoTime - getLateTimeOffsetNanos());
}
void IsochronousClockModel::dump() const {
ALOGD("mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
ALOGD("mMarkerNanoTime = %lld", (long long) mMarkerNanoTime);
ALOGD("mSampleRate = %6d", mSampleRate);
ALOGD("mFramesPerBurst = %6d", mFramesPerBurst);
ALOGD("mMaxLatenessInNanos = %6d", mMaxLatenessInNanos);
ALOGD("mMaxMeasuredLatenessNanos = %6d", mMaxMeasuredLatenessNanos);
ALOGD("mState = %6d", mState);
}

@ -18,6 +18,7 @@
#define ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
#include <stdint.h>
#include "utility/AudioClock.h"
namespace aaudio {
@ -78,6 +79,15 @@ public:
*/
int64_t convertPositionToTime(int64_t framePosition) const;
/**
* Calculate the latest estimated time that the stream will be at that position.
* The more jittery the clock is then the later this will be.
*
* @param framePosition
* @return time in nanoseconds
*/
int64_t convertPositionToLatestTime(int64_t framePosition) const;
/**
* Calculate an estimated position where the stream will be at the specified time.
*
@ -86,6 +96,18 @@ public:
*/
int64_t convertTimeToPosition(int64_t nanoTime) const;
/**
* Calculate the corresponding estimated position based on the specified time being
* the latest possible time.
*
* For the same nanoTime, this may return an earlier position than
* convertTimeToPosition().
*
* @param nanoTime
* @return position in frames
*/
int64_t convertLatestTimeToPosition(int64_t nanoTime) const;
/**
* @param framesDelta difference in frames
* @return duration in nanoseconds
@ -101,6 +123,9 @@ public:
void dump() const;
private:
int32_t getLateTimeOffsetNanos() const;
enum clock_model_state_t {
STATE_STOPPED,
STATE_STARTING,
@ -108,13 +133,23 @@ private:
STATE_RUNNING
};
// Amount of time to drift forward when we get a late timestamp.
// This value was calculated to allow tracking of a clock with 50 ppm error.
static constexpr int32_t kDriftNanos = 10 * 1000;
// TODO review value of kExtraLatenessNanos
static constexpr int32_t kExtraLatenessNanos = 100 * 1000;
int64_t mMarkerFramePosition;
int64_t mMarkerNanoTime;
int32_t mSampleRate;
int32_t mFramesPerBurst;
int32_t mMaxLatenessInNanos;
int32_t mBurstPeriodNanos;
// Includes mBurstPeriodNanos because we sample randomly over time.
int32_t mMaxMeasuredLatenessNanos;
clock_model_state_t mState;
int32_t mTimestampCount = 0;
void update();
};

@ -302,6 +302,8 @@ extern "C" int EffectCreate(const effect_uuid_t *uuid,
for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
pContext->pBundledContext->bandGaindB[i] = EQNB_5BandSoftPresets[i];
}
pContext->pBundledContext->effectProcessCalled = 0;
pContext->pBundledContext->effectInDrain = 0;
ALOGV("\tEffectCreate - Calling LvmBundle_init");
ret = LvmBundle_init(pContext);
@ -394,6 +396,8 @@ extern "C" int EffectRelease(effect_handle_t handle){
// Clear the instantiated flag for the effect
// protect agains the case where an effect is un-instantiated without being disabled
int &effectInDrain = pContext->pBundledContext->effectInDrain;
if(pContext->EffectType == LVM_BASS_BOOST) {
ALOGV("\tEffectRelease LVM_BASS_BOOST Clearing global intstantiated flag");
pSessionContext->bBassInstantiated = LVM_FALSE;
@ -418,12 +422,16 @@ extern "C" int EffectRelease(effect_handle_t handle){
} else if(pContext->EffectType == LVM_VOLUME) {
ALOGV("\tEffectRelease LVM_VOLUME Clearing global intstantiated flag");
pSessionContext->bVolumeInstantiated = LVM_FALSE;
if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE){
// There is no samplesToExitCount for volume so we also use the drain flag to check
// if we should decrement the effects enabled.
if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE
|| (effectInDrain & 1 << LVM_VOLUME) != 0) {
pContext->pBundledContext->NumberEffectsEnabled--;
}
} else {
ALOGV("\tLVM_ERROR : EffectRelease : Unsupported effect\n\n\n\n\n\n\n");
}
effectInDrain &= ~(1 << pContext->EffectType); // no need to drain if released
// Disable effect, in this case ignore errors (return codes)
// if an effect has already been disabled
@ -3124,8 +3132,9 @@ LVM_INT16 LVC_ToDB_s32Tos16(LVM_INT32 Lin_fix)
int Effect_setEnabled(EffectContext *pContext, bool enabled)
{
ALOGV("\tEffect_setEnabled() type %d, enabled %d", pContext->EffectType, enabled);
ALOGV("%s effectType %d, enabled %d, currently enabled %d", __func__,
pContext->EffectType, enabled, pContext->pBundledContext->NumberEffectsEnabled);
int &effectInDrain = pContext->pBundledContext->effectInDrain;
if (enabled) {
// Bass boost or Virtualizer can be temporarily disabled if playing over device speaker due
// to their nature.
@ -3139,6 +3148,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
if(pContext->pBundledContext->SamplesToExitCountBb <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
effectInDrain &= ~(1 << LVM_BASS_BOOST);
pContext->pBundledContext->SamplesToExitCountBb =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bBassEnabled = LVM_TRUE;
@ -3152,6 +3162,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
if(pContext->pBundledContext->SamplesToExitCountEq <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
effectInDrain &= ~(1 << LVM_EQUALIZER);
pContext->pBundledContext->SamplesToExitCountEq =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bEqualizerEnabled = LVM_TRUE;
@ -3164,6 +3175,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
if(pContext->pBundledContext->SamplesToExitCountVirt <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
effectInDrain &= ~(1 << LVM_VIRTUALIZER);
pContext->pBundledContext->SamplesToExitCountVirt =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bVirtualizerEnabled = LVM_TRUE;
@ -3174,7 +3186,10 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
ALOGV("\tEffect_setEnabled() LVM_VOLUME is already enabled");
return -EINVAL;
}
pContext->pBundledContext->NumberEffectsEnabled++;
if ((effectInDrain & 1 << LVM_VOLUME) == 0) {
pContext->pBundledContext->NumberEffectsEnabled++;
}
effectInDrain &= ~(1 << LVM_VOLUME);
pContext->pBundledContext->bVolumeEnabled = LVM_TRUE;
break;
default:
@ -3192,6 +3207,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
return -EINVAL;
}
pContext->pBundledContext->bBassEnabled = LVM_FALSE;
effectInDrain |= 1 << LVM_BASS_BOOST;
break;
case LVM_EQUALIZER:
if (pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE) {
@ -3199,6 +3215,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
return -EINVAL;
}
pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
effectInDrain |= 1 << LVM_EQUALIZER;
break;
case LVM_VIRTUALIZER:
if (pContext->pBundledContext->bVirtualizerEnabled == LVM_FALSE) {
@ -3206,6 +3223,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
return -EINVAL;
}
pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
effectInDrain |= 1 << LVM_VIRTUALIZER;
break;
case LVM_VOLUME:
if (pContext->pBundledContext->bVolumeEnabled == LVM_FALSE) {
@ -3213,6 +3231,7 @@ int Effect_setEnabled(EffectContext *pContext, bool enabled)
return -EINVAL;
}
pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
effectInDrain |= 1 << LVM_VOLUME;
break;
default:
ALOGV("\tEffect_setEnabled() invalid effect type");
@ -3283,6 +3302,38 @@ int Effect_process(effect_handle_t self,
ALOGV("\tLVM_ERROR : Effect_process() ERROR NULL INPUT POINTER OR FRAME COUNT IS WRONG");
return -EINVAL;
}
int &effectProcessCalled = pContext->pBundledContext->effectProcessCalled;
int &effectInDrain = pContext->pBundledContext->effectInDrain;
if ((effectProcessCalled & 1 << pContext->EffectType) != 0) {
ALOGW("Effect %d already called", pContext->EffectType);
const int undrainedEffects = effectInDrain & ~effectProcessCalled;
if ((undrainedEffects & 1 << LVM_BASS_BOOST) != 0) {
ALOGW("Draining BASS_BOOST");
pContext->pBundledContext->SamplesToExitCountBb = 0;
--pContext->pBundledContext->NumberEffectsEnabled;
effectInDrain &= ~(1 << LVM_BASS_BOOST);
}
if ((undrainedEffects & 1 << LVM_EQUALIZER) != 0) {
ALOGW("Draining EQUALIZER");
pContext->pBundledContext->SamplesToExitCountEq = 0;
--pContext->pBundledContext->NumberEffectsEnabled;
effectInDrain &= ~(1 << LVM_EQUALIZER);
}
if ((undrainedEffects & 1 << LVM_VIRTUALIZER) != 0) {
ALOGW("Draining VIRTUALIZER");
pContext->pBundledContext->SamplesToExitCountVirt = 0;
--pContext->pBundledContext->NumberEffectsEnabled;
effectInDrain &= ~(1 << LVM_VIRTUALIZER);
}
if ((undrainedEffects & 1 << LVM_VOLUME) != 0) {
ALOGW("Draining VOLUME");
--pContext->pBundledContext->NumberEffectsEnabled;
effectInDrain &= ~(1 << LVM_VOLUME);
}
}
effectProcessCalled |= 1 << pContext->EffectType;
if ((pContext->pBundledContext->bBassEnabled == LVM_FALSE)&&
(pContext->EffectType == LVM_BASS_BOOST)){
//ALOGV("\tEffect_process() LVM_BASS_BOOST Effect is not enabled");
@ -3291,9 +3342,12 @@ int Effect_process(effect_handle_t self,
//ALOGV("\tEffect_process: Waiting to turn off BASS_BOOST, %d samples left",
// pContext->pBundledContext->SamplesToExitCountBb);
}
if(pContext->pBundledContext->SamplesToExitCountBb <= 0) {
if (pContext->pBundledContext->SamplesToExitCountBb <= 0) {
status = -ENODATA;
pContext->pBundledContext->NumberEffectsEnabled--;
if ((effectInDrain & 1 << LVM_BASS_BOOST) != 0) {
pContext->pBundledContext->NumberEffectsEnabled--;
effectInDrain &= ~(1 << LVM_BASS_BOOST);
}
ALOGV("\tEffect_process() this is the last frame for LVM_BASS_BOOST");
}
}
@ -3301,7 +3355,10 @@ int Effect_process(effect_handle_t self,
(pContext->EffectType == LVM_VOLUME)){
//ALOGV("\tEffect_process() LVM_VOLUME Effect is not enabled");
status = -ENODATA;
pContext->pBundledContext->NumberEffectsEnabled--;
if ((effectInDrain & 1 << LVM_VOLUME) != 0) {
pContext->pBundledContext->NumberEffectsEnabled--;
effectInDrain &= ~(1 << LVM_VOLUME);
}
}
if ((pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE)&&
(pContext->EffectType == LVM_EQUALIZER)){
@ -3311,9 +3368,12 @@ int Effect_process(effect_handle_t self,
//ALOGV("\tEffect_process: Waiting to turn off EQUALIZER, %d samples left",
// pContext->pBundledContext->SamplesToExitCountEq);
}
if(pContext->pBundledContext->SamplesToExitCountEq <= 0) {
if (pContext->pBundledContext->SamplesToExitCountEq <= 0) {
status = -ENODATA;
pContext->pBundledContext->NumberEffectsEnabled--;
if ((effectInDrain & 1 << LVM_EQUALIZER) != 0) {
pContext->pBundledContext->NumberEffectsEnabled--;
effectInDrain &= ~(1 << LVM_EQUALIZER);
}
ALOGV("\tEffect_process() this is the last frame for LVM_EQUALIZER");
}
}
@ -3326,9 +3386,12 @@ int Effect_process(effect_handle_t self,
//ALOGV("\tEffect_process: Waiting for to turn off VIRTUALIZER, %d samples left",
// pContext->pBundledContext->SamplesToExitCountVirt);
}
if(pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
if (pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
status = -ENODATA;
pContext->pBundledContext->NumberEffectsEnabled--;
if ((effectInDrain & 1 << LVM_VIRTUALIZER) != 0) {
pContext->pBundledContext->NumberEffectsEnabled--;
effectInDrain &= ~(1 << LVM_VIRTUALIZER);
}
ALOGV("\tEffect_process() this is the last frame for LVM_VIRTUALIZER");
}
}
@ -3337,8 +3400,18 @@ int Effect_process(effect_handle_t self,
pContext->pBundledContext->NumberEffectsCalled++;
}
if(pContext->pBundledContext->NumberEffectsCalled ==
pContext->pBundledContext->NumberEffectsEnabled){
if (pContext->pBundledContext->NumberEffectsCalled >=
pContext->pBundledContext->NumberEffectsEnabled) {
// We expect the # effects called to be equal to # effects enabled in sequence (including
// draining effects). Warn if this is not the case due to inconsistent calls.
ALOGW_IF(pContext->pBundledContext->NumberEffectsCalled >
pContext->pBundledContext->NumberEffectsEnabled,
"%s Number of effects called %d is greater than number of effects enabled %d",
__func__, pContext->pBundledContext->NumberEffectsCalled,
pContext->pBundledContext->NumberEffectsEnabled);
effectProcessCalled = 0; // reset our consistency check.
//ALOGV("\tEffect_process Calling process with %d effects enabled, %d called: Effect %d",
//pContext->pBundledContext->NumberEffectsEnabled,
//pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);

@ -110,6 +110,14 @@ struct BundledEffectContext{
#ifdef SUPPORT_MC
LVM_INT32 ChMask;
#endif
/* Bitmask whether drain is in progress due to disabling the effect.
The corresponding bit to an effect is set by 1 << lvm_effect_en. */
int effectInDrain;
/* Bitmask whether process() was called for a particular effect.
The corresponding bit to an effect is set by 1 << lvm_effect_en. */
int effectProcessCalled;
};
/* SessionContext : One session */

@ -77,10 +77,13 @@ status_t Visualizer::setEnabled(bool enabled)
if (t != 0) {
if (enabled) {
if (t->exitPending()) {
mCaptureLock.unlock();
if (t->requestExitAndWait() == WOULD_BLOCK) {
mCaptureLock.lock();
ALOGE("Visualizer::enable() called from thread");
return INVALID_OPERATION;
}
mCaptureLock.lock();
}
}
t->mLock.lock();

@ -1635,8 +1635,13 @@ status_t MPEG4Writer::setCaptureRate(float captureFps) {
return BAD_VALUE;
}
// Increase moovExtraSize once only irrespective of how many times
// setCaptureRate is called.
bool containsCaptureFps = mMetaKeys->contains(kMetaKey_CaptureFps);
mMetaKeys->setFloat(kMetaKey_CaptureFps, captureFps);
mMoovExtraSize += sizeof(kMetaKey_CaptureFps) + 4 + 32;
if (!containsCaptureFps) {
mMoovExtraSize += sizeof(kMetaKey_CaptureFps) + 4 + 32;
}
return OK;
}

@ -96,10 +96,18 @@ ssize_t MediaMuxer::addTrack(const sp<AMessage> &format) {
sp<MediaAdapter> newTrack = new MediaAdapter(trackMeta);
status_t result = mWriter->addSource(newTrack);
if (result == OK) {
return mTrackList.add(newTrack);
if (result != OK) {
return -1;
}
float captureFps = -1.0;
if (format->findAsFloat("time-lapse-fps", &captureFps)) {
ALOGV("addTrack() time-lapse-fps: %f", captureFps);
result = mWriter->setCaptureRate(captureFps);
if (result != OK) {
ALOGW("addTrack() setCaptureRate failed :%d", result);
}
}
return -1;
return mTrackList.add(newTrack);
}
status_t MediaMuxer::setOrientationHint(int degrees) {

@ -1355,6 +1355,14 @@ PV_STATUS DecodeShortHeader(VideoDecData *video, Vop *currVop)
int tmpHeight = (tmpDisplayHeight + 15) & -16;
int tmpWidth = (tmpDisplayWidth + 15) & -16;
if (tmpWidth > video->width)
{
// while allowed by the spec, this decoder does not actually
// support an increase in size.
ALOGE("width increase not supported");
status = PV_FAIL;
goto return_point;
}
if (tmpHeight * tmpWidth > video->size)
{
// This is just possibly "b/37079296".

@ -35,6 +35,10 @@ struct MediaWriter : public RefBase {
virtual status_t start(MetaData *params = NULL) = 0;
virtual status_t stop() = 0;
virtual status_t pause() = 0;
virtual status_t setCaptureRate(float /* captureFps */) {
ALOGW("setCaptureRate unsupported");
return ERROR_UNSUPPORTED;
}
virtual void setMaxFileSize(int64_t bytes) { mMaxFileSizeLimitBytes = bytes; }
virtual void setMaxFileDuration(int64_t durationUs) { mMaxFileDurationLimitUs = durationUs; }

@ -35,6 +35,7 @@ AImage::AImage(AImageReader* reader, int32_t format, uint64_t usage, BufferItem*
int64_t timestamp, int32_t width, int32_t height, int32_t numPlanes) :
mReader(reader), mFormat(format), mUsage(usage), mBuffer(buffer), mLockedBuffer(nullptr),
mTimestamp(timestamp), mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
LOG_FATAL_IF(reader == nullptr, "AImageReader shouldn't be null while creating AImage");
}
AImage::~AImage() {
@ -57,14 +58,9 @@ AImage::close(int releaseFenceFd) {
if (mIsClosed) {
return;
}
sp<AImageReader> reader = mReader.promote();
if (reader != nullptr) {
reader->releaseImageLocked(this, releaseFenceFd);
} else if (mBuffer != nullptr) {
LOG_ALWAYS_FATAL("%s: parent AImageReader closed without releasing image %p",
__FUNCTION__, this);
if (!mReader->mIsClosed) {
mReader->releaseImageLocked(this, releaseFenceFd);
}
// Should have been set to nullptr in releaseImageLocked
// Set to nullptr here for extra safety only
mBuffer = nullptr;
@ -83,22 +79,12 @@ AImage::free() {
void
AImage::lockReader() const {
sp<AImageReader> reader = mReader.promote();
if (reader == nullptr) {
// Reader has been closed
return;
}
reader->mLock.lock();
mReader->mLock.lock();
}
void
AImage::unlockReader() const {
sp<AImageReader> reader = mReader.promote();
if (reader == nullptr) {
// Reader has been closed
return;
}
reader->mLock.unlock();
mReader->mLock.unlock();
}
media_status_t

@ -72,7 +72,7 @@ struct AImage {
uint32_t getJpegSize() const;
// When reader is close, AImage will only accept close API call
wp<AImageReader> mReader;
const sp<AImageReader> mReader;
const int32_t mFormat;
const uint64_t mUsage; // AHARDWAREBUFFER_USAGE_* flags.
BufferItem* mBuffer;

@ -113,12 +113,12 @@ AImageReader::getNumPlanesForFormat(int32_t format) {
void
AImageReader::FrameListener::onFrameAvailable(const BufferItem& /*item*/) {
Mutex::Autolock _l(mLock);
sp<AImageReader> reader = mReader.promote();
if (reader == nullptr) {
ALOGW("A frame is available after AImageReader closed!");
return; // reader has been closed
}
Mutex::Autolock _l(mLock);
if (mListener.onImageAvailable == nullptr) {
return; // No callback registered
}
@ -143,12 +143,12 @@ AImageReader::FrameListener::setImageListener(AImageReader_ImageListener* listen
void
AImageReader::BufferRemovedListener::onBufferFreed(const wp<GraphicBuffer>& graphicBuffer) {
Mutex::Autolock _l(mLock);
sp<AImageReader> reader = mReader.promote();
if (reader == nullptr) {
ALOGW("A frame is available after AImageReader closed!");
return; // reader has been closed
}
Mutex::Autolock _l(mLock);
if (mListener.onBufferRemoved == nullptr) {
return; // No callback registered
}
@ -272,6 +272,11 @@ AImageReader::AImageReader(int32_t width,
mFrameListener(new FrameListener(this)),
mBufferRemovedListener(new BufferRemovedListener(this)) {}
AImageReader::~AImageReader() {
Mutex::Autolock _l(mLock);
LOG_FATAL_IF("AImageReader not closed before destruction", mIsClosed != true);
}
media_status_t
AImageReader::init() {
PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
@ -347,8 +352,12 @@ AImageReader::init() {
return AMEDIA_OK;
}
AImageReader::~AImageReader() {
void AImageReader::close() {
Mutex::Autolock _l(mLock);
if (mIsClosed) {
return;
}
mIsClosed = true;
AImageReader_ImageListener nullListener = {nullptr, nullptr};
setImageListenerLocked(&nullListener);
@ -741,6 +750,7 @@ EXPORT
void AImageReader_delete(AImageReader* reader) {
ALOGV("%s", __FUNCTION__);
if (reader != nullptr) {
reader->close();
reader->decStrong((void*) AImageReader_delete);
}
return;

@ -76,6 +76,7 @@ struct AImageReader : public RefBase {
int32_t getHeight() const { return mHeight; };
int32_t getFormat() const { return mFormat; };
int32_t getMaxImages() const { return mMaxImages; };
void close();
private:
@ -134,7 +135,7 @@ struct AImageReader : public RefBase {
private:
AImageReader_ImageListener mListener = {nullptr, nullptr};
wp<AImageReader> mReader;
const wp<AImageReader> mReader;
Mutex mLock;
};
sp<FrameListener> mFrameListener;
@ -149,7 +150,7 @@ struct AImageReader : public RefBase {
private:
AImageReader_BufferRemovedListener mListener = {nullptr, nullptr};
wp<AImageReader> mReader;
const wp<AImageReader> mReader;
Mutex mLock;
};
sp<BufferRemovedListener> mBufferRemovedListener;
@ -165,6 +166,7 @@ struct AImageReader : public RefBase {
native_handle_t* mWindowHandle = nullptr;
List<AImage*> mAcquiredImages;
bool mIsClosed = false;
Mutex mLock;
};

@ -24,6 +24,7 @@
#include "Configuration.h"
#include <utils/Log.h>
#include <system/audio_effects/effect_aec.h>
#include <system/audio_effects/effect_dynamicsprocessing.h>
#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_visualizer.h>
#include <audio_utils/channels.h>
@ -2569,7 +2570,8 @@ bool AudioFlinger::EffectChain::isEffectEligibleForSuspend(const effect_descript
if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
(((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) ||
(memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) ||
(memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0))) {
(memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0) ||
(memcmp(&desc.type, SL_IID_DYNAMICSPROCESSING, sizeof(effect_uuid_t)) == 0))) {
return false;
}
return true;

@ -3956,6 +3956,32 @@ status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
return INVALID_OPERATION;
}
// For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
// still applied by the mixer.
// All tracks attached to a mixer with flag VOIP_RX are tied to the same
// stream type STREAM_VOICE_CALL so this will only change the HAL volume once even
// if more than one track are active
status_t AudioFlinger::PlaybackThread::handleVoipVolume_l(float *volume)
{
status_t result = NO_ERROR;
if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
if (*volume != mLeftVolFloat) {
result = mOutput->stream->setVolume(*volume, *volume);
ALOGE_IF(result != OK,
"Error when setting output stream volume: %d", result);
if (result == NO_ERROR) {
mLeftVolFloat = *volume;
}
}
// if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
// remove stream volume contribution from software volume.
if (mLeftVolFloat == *volume) {
*volume = 1.0f;
}
}
return result;
}
status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
@ -4758,22 +4784,25 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
// no acknowledgement required for newly active tracks
}
sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
// cache the combined master volume and stream type volume for fast mixer; this
// lacks any synchronization or barrier so VolumeProvider may read a stale value
const float vh = track->getVolumeHandler()->getVolume(
proxy->framesReleased()).first;
float volume;
if (track->isPlaybackRestricted()) {
if (track->isPlaybackRestricted() || mStreamTypes[track->streamType()].mute) {
volume = 0.f;
} else {
volume = masterVolume
* mStreamTypes[track->streamType()].volume
* vh;
volume = masterVolume * mStreamTypes[track->streamType()].volume;
}
handleVoipVolume_l(&volume);
// cache the combined master volume and stream type volume for fast mixer; this
// lacks any synchronization or barrier so VolumeProvider may read a stale value
const float vh = track->getVolumeHandler()->getVolume(
proxy->framesReleased()).first;
volume *= vh;
track->mCachedVolume = volume;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
track->setFinalVolume((vlf + vrf) / 2.f);
++fastTracks;
} else {
@ -4916,20 +4945,22 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
uint32_t vl, vr; // in U8.24 integer format
float vlf, vrf, vaf; // in [0.0, 1.0] float format
// read original volumes with volume control
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = masterVolume * typeVolume;
float v = masterVolume * mStreamTypes[track->streamType()].volume;
// Always fetch volumeshaper volume to ensure state is updated.
const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
const float vh = track->getVolumeHandler()->getVolume(
track->mAudioTrackServerProxy->framesReleased()).first;
if (track->isPausing() || mStreamTypes[track->streamType()].mute
|| track->isPlaybackRestricted()) {
if (mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
v = 0;
}
handleVoipVolume_l(&v);
if (track->isPausing()) {
vl = vr = 0;
vlf = vrf = vaf = 0.;
if (track->isPausing()) {
track->setPaused();
}
track->setPaused();
} else {
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
@ -4981,25 +5012,6 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac
track->mHasVolumeController = false;
}
// For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
// still applied by the mixer.
if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
v = mStreamTypes[track->streamType()].mute ? 0.0f : v;
if (v != mLeftVolFloat) {
status_t result = mOutput->stream->setVolume(v, v);
ALOGE_IF(result != OK, "Error when setting output stream volume: %d", result);
if (result == OK) {
mLeftVolFloat = v;
}
}
// if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
// remove stream volume contribution from software volume.
if (v != 0.0f && mLeftVolFloat == v) {
vlf = min(1.0f, vlf / v);
vrf = min(1.0f, vrf / v);
vaf = min(1.0f, vaf / v);
}
}
// XXX: these things DON'T need to be done each time
mAudioMixer->setBufferProvider(trackId, track);
mAudioMixer->enable(trackId);

@ -747,6 +747,7 @@ protected:
// is safe to do so. That will drop the final ref count and destroy the tracks.
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
void removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
status_t handleVoipVolume_l(float *volume);
// StreamOutHalInterfaceCallback implementation
virtual void onWriteReady();

@ -333,9 +333,10 @@ sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices
if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
moduleDevice->setEncodedFormat(encodedFormat);
}
moduleDevice->setAddress(devAddress);
if (allowToCreate) {
moduleDevice->attach(hwModule);
moduleDevice->setAddress(devAddress);
moduleDevice->setName(String8(name));
}
return moduleDevice;
}

@ -5690,8 +5690,9 @@ float AudioPolicyManager::computeVolume(IVolumeCurves &curves,
const auto ringVolumeSrc = toVolumeSource(AUDIO_STREAM_RING);
const auto musicVolumeSrc = toVolumeSource(AUDIO_STREAM_MUSIC);
const auto alarmVolumeSrc = toVolumeSource(AUDIO_STREAM_ALARM);
const auto a11yVolumeSrc = toVolumeSource(AUDIO_STREAM_ACCESSIBILITY);
if (volumeSource == toVolumeSource(AUDIO_STREAM_ACCESSIBILITY)
if (volumeSource == a11yVolumeSrc
&& (AUDIO_MODE_RINGTONE == mEngine->getPhoneState()) &&
mOutputs.isActive(ringVolumeSrc, 0)) {
auto &ringCurves = getVolumeCurves(AUDIO_STREAM_RING);
@ -5708,7 +5709,7 @@ float AudioPolicyManager::computeVolume(IVolumeCurves &curves,
volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION) ||
volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE) ||
volumeSource == toVolumeSource(AUDIO_STREAM_DTMF) ||
volumeSource == toVolumeSource(AUDIO_STREAM_ACCESSIBILITY))) {
volumeSource == a11yVolumeSrc)) {
auto &voiceCurves = getVolumeCurves(callVolumeSrc);
int voiceVolumeIndex = voiceCurves.getVolumeIndex(device);
const float maxVoiceVolDb =
@ -5720,7 +5721,9 @@ float AudioPolicyManager::computeVolume(IVolumeCurves &curves,
// VOICE_CALL stream has minVolumeIndex > 0 : Users cannot set the volume of voice calls to
// 0. We don't want to cap volume when the system has programmatically muted the voice call
// stream. See setVolumeCurveIndex() for more information.
bool exemptFromCapping = (volumeSource == ringVolumeSrc) && (voiceVolumeIndex == 0);
bool exemptFromCapping =
((volumeSource == ringVolumeSrc) || (volumeSource == a11yVolumeSrc))
&& (voiceVolumeIndex == 0);
ALOGV_IF(exemptFromCapping, "%s volume source %d at vol=%f not capped", __func__,
volumeSource, volumeDb);
if ((volumeDb > maxVoiceVolDb) && !exemptFromCapping) {

@ -409,12 +409,17 @@ void AudioPolicyService::updateUidStates_l()
// Another client in the same UID has already been allowed to capture
// OR The client is the assistant
// AND an accessibility service is on TOP or a RTT call is active
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR uses VOICE_RECOGNITION AND is on TOP
// OR uses HOTWORD
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR uses VOICE_RECOGNITION AND is on TOP
// OR uses HOTWORD
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
// OR The client is an accessibility service
// AND Is on TOP
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR The assistant is not on TOP
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
// AND is on TOP
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR the client source is virtual (remote submix, call audio TX or RX...)
@ -422,7 +427,7 @@ void AudioPolicyService::updateUidStates_l()
// AND The assistant is not on TOP
// AND is on TOP or latest started
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
sp<AudioRecordClient> topActive;
sp<AudioRecordClient> latestActive;
@ -458,16 +463,24 @@ void AudioPolicyService::updateUidStates_l()
continue;
}
if (appState == APP_STATE_TOP) {
bool isAssistant = mUidPolicy->isAssistantUid(current->uid);
bool isAccessibility = mUidPolicy->isA11yUid(current->uid);
if (appState == APP_STATE_TOP && !isAccessibility) {
if (current->startTimeNs > topStartNs) {
topActive = current;
topStartNs = current->startTimeNs;
}
if (mUidPolicy->isAssistantUid(current->uid)) {
if (isAssistant) {
isAssistantOnTop = true;
}
}
if (current->startTimeNs > latestStartNs) {
// Assistant capturing for HOTWORD or Accessibility services not considered
// for latest active to avoid masking regular clients started before
if (current->startTimeNs > latestStartNs
&& !((current->attributes.source == AUDIO_SOURCE_HOTWORD
|| isA11yOnTop || rttCallActive)
&& isAssistant)
&& !isAccessibility) {
latestActive = current;
latestStartNs = current->startTimeNs;
}
@ -540,10 +553,20 @@ void AudioPolicyService::updateUidStates_l()
} else if (mUidPolicy->isA11yUid(current->uid)) {
// For accessibility service allow capture if:
// Is on TOP
// AND the source is VOICE_RECOGNITION or HOTWORD
if (isA11yOnTop &&
(source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
allowCapture = true;
// AND the source is VOICE_RECOGNITION or HOTWORD
// Or
// The assistant is not on TOP
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
if (isA11yOnTop) {
if (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD) {
allowCapture = true;
}
} else {
if (!isAssistantOnTop
&& (!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
allowCapture = true;
}
}
}
setAppState_l(current->uid,

@ -1149,6 +1149,8 @@ status_t CameraService::handleEvictionsLocked(const String8& cameraId, int clien
clientPid,
states[states.size() - 1]);
resource_policy::ClientPriority clientPriority = clientDescriptor->getPriority();
// Find clients that would be evicted
auto evicted = mActiveClientManager.wouldEvict(clientDescriptor);
@ -1166,8 +1168,7 @@ status_t CameraService::handleEvictionsLocked(const String8& cameraId, int clien
String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
"(PID %d, score %d state %d) due to eviction policy", curTime.string(),
cameraId.string(), packageName.string(), clientPid,
priorityScores[priorityScores.size() - 1],
states[states.size() - 1]);
clientPriority.getScore(), clientPriority.getState());
for (auto& i : incompatibleClients) {
msg.appendFormat("\n - Blocked by existing device %s client for package %s"
@ -1212,9 +1213,8 @@ status_t CameraService::handleEvictionsLocked(const String8& cameraId, int clien
i->getKey().string(), String8{clientSp->getPackageName()}.string(),
i->getOwnerId(), i->getPriority().getScore(),
i->getPriority().getState(), cameraId.string(),
packageName.string(), clientPid,
priorityScores[priorityScores.size() - 1],
states[states.size() - 1]));
packageName.string(), clientPid, clientPriority.getScore(),
clientPriority.getState()));
// Notify the client of disconnection
clientSp->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
@ -1348,14 +1348,19 @@ Status CameraService::connectDevice(
Status ret = Status::ok();
String8 id = String8(cameraId);
sp<CameraDeviceClient> client = nullptr;
String16 clientPackageNameAdj = clientPackageName;
if (hardware::IPCThreadState::self()->isServingCall()) {
std::string vendorClient =
StringPrintf("vendor.client.pid<%d>", CameraThreadState::getCallingPid());
clientPackageNameAdj = String16(vendorClient.c_str());
}
ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
/*api1CameraId*/-1,
CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageNameAdj,
clientUid, USE_CALLING_PID, API_2, /*shimUpdateOnly*/ false, /*out*/client);
if(!ret.isOk()) {
logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageName),
logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageNameAdj),
ret.toString8());
return ret;
}
@ -2368,11 +2373,7 @@ CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService,
}
mClientPackageName = packages[0];
}
if (hardware::IPCThreadState::self()->isServingCall()) {
std::string vendorClient =
StringPrintf("vendor.client.pid<%d>", CameraThreadState::getCallingPid());
mClientPackageName = String16(vendorClient.c_str());
} else {
if (!hardware::IPCThreadState::self()->isServingCall()) {
mAppOpsManager = std::make_unique<AppOpsManager>();
}
}

@ -2058,6 +2058,13 @@ status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraInfo(
return OK;
}
bool CameraProviderManager::ProviderInfo::DeviceInfo3::isAPI1Compatible() const {
// Do not advertise NIR cameras to API1 camera app.
camera_metadata_ro_entry cfa = mCameraCharacteristics.find(
ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT);
if (cfa.count == 1 && cfa.data.u8[0] == ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR) {
return false;
}
bool isBackwardCompatible = false;
camera_metadata_ro_entry_t caps = mCameraCharacteristics.find(
ANDROID_REQUEST_AVAILABLE_CAPABILITIES);

@ -419,7 +419,7 @@ extern "C" int processDepthPhotoFrame(DepthPhotoInputFrame inputFrame, size_t de
std::vector<std::unique_ptr<Item>> items;
std::vector<std::unique_ptr<Camera>> cameraList;
auto image = Image::FromDataForPrimaryImage("android/mainimage", &items);
auto image = Image::FromDataForPrimaryImage("image/jpeg", &items);
std::unique_ptr<CameraParams> cameraParams(new CameraParams(std::move(image)));
if (cameraParams == nullptr) {
ALOGE("%s: Failed to initialize camera parameters", __FUNCTION__);

@ -29,6 +29,9 @@
#define CLOGE(fmt, ...) ALOGE("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
##__VA_ARGS__)
#define CLOGW(fmt, ...) ALOGW("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
##__VA_ARGS__)
// Convenience macros for transitioning to the error state
#define SET_ERR(fmt, ...) setErrorState( \
"%s: " fmt, __FUNCTION__, \
@ -3267,14 +3270,19 @@ void Camera3Device::removeInFlightRequestIfReadyLocked(int idx) {
ALOGVV("%s: removed frame %d from InFlightMap", __FUNCTION__, frameNumber);
}
// Sanity check - if we have too many in-flight frames, something has
// likely gone wrong
if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
CLOGE("In-flight list too large: %zu", mInFlightMap.size());
} else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
kInFlightWarnLimitHighSpeed) {
CLOGE("In-flight list too large for high speed configuration: %zu",
mInFlightMap.size());
// Sanity check - if we have too many in-flight frames with long total inflight duration,
// something has likely gone wrong. This might still be legit only if application send in
// a long burst of long exposure requests.
if (mExpectedInflightDuration > kMinWarnInflightDuration) {
if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
CLOGW("In-flight list too large: %zu, total inflight duration %" PRIu64,
mInFlightMap.size(), mExpectedInflightDuration);
} else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
kInFlightWarnLimitHighSpeed) {
CLOGW("In-flight list too large for high speed configuration: %zu,"
"total inflight duration %" PRIu64,
mInFlightMap.size(), mExpectedInflightDuration);
}
}
}

@ -227,6 +227,7 @@ class Camera3Device :
static const size_t kDumpLockAttempts = 10;
static const size_t kDumpSleepDuration = 100000; // 0.10 sec
static const nsecs_t kActiveTimeout = 500000000; // 500 ms
static const nsecs_t kMinWarnInflightDuration = 5000000000; // 5 s
static const size_t kInFlightWarnLimit = 30;
static const size_t kInFlightWarnLimitHighSpeed = 256; // batch size 32 * pipe depth 8
static const nsecs_t kDefaultExpectedDuration = 100000000; // 100 ms

@ -21,6 +21,7 @@
#include "minijail.h"
#include <binder/ProcessState.h>
#include <cutils/properties.h>
#include <hidl/HidlTransportSupport.h>
#include <media/stagefright/omx/1.0/Omx.h>
#include <media/stagefright/omx/1.0/OmxStore.h>
@ -57,7 +58,8 @@ int main(int argc __unused, char** argv)
} else {
LOG(INFO) << "IOmx HAL service created.";
}
sp<IOmxStore> omxStore = new implementation::OmxStore(omx);
sp<IOmxStore> omxStore = new implementation::OmxStore(
property_get_int64("vendor.media.omx", 1) ? omx : nullptr);
if (omxStore == nullptr) {
LOG(ERROR) << "Cannot create IOmxStore HAL service.";
} else if (omxStore->registerAsService() != OK) {

Loading…
Cancel
Save