Merge master@5406228 into git_qt-dev-plus-aosp.

Change-Id: I84559829bbaa69321df4581ff5b18599066a4e1c
BUG: 129345239
gugelfrei
Bill Rassieur 5 years ago
commit fc1aaa9277

@ -287,3 +287,16 @@ camera_status_t ACameraDevice_createCaptureSessionWithSessionParameters(
}
return device->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
EXPORT
camera_status_t ACameraDevice_isSessionConfigurationSupported(
const ACameraDevice* device,
const ACaptureSessionOutputContainer* sessionOutputContainer) {
ATRACE_CALL();
if (device == nullptr || sessionOutputContainer == nullptr) {
ALOGE("%s: Error: invalid input: device %p, sessionOutputContainer %p",
__FUNCTION__, device, sessionOutputContainer);
return ACAMERA_ERROR_INVALID_PARAMETER;
}
return device->isSessionConfigurationSupported(sessionOutputContainer);
}

@ -227,6 +227,55 @@ CameraDevice::createCaptureSession(
return ACAMERA_OK;
}
camera_status_t CameraDevice::isSessionConfigurationSupported(
const ACaptureSessionOutputContainer* sessionOutputContainer) const {
Mutex::Autolock _l(mDeviceLock);
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
return ret;
}
SessionConfiguration sessionConfiguration(0 /*inputWidth*/, 0 /*inputHeight*/,
-1 /*inputFormat*/, CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE);
for (const auto& output : sessionOutputContainer->mOutputs) {
sp<IGraphicBufferProducer> iGBP(nullptr);
ret = getIGBPfromAnw(output.mWindow, iGBP);
if (ret != ACAMERA_OK) {
ALOGE("Camera device %s failed to extract graphic producer from native window",
getId());
return ret;
}
String16 physicalId16(output.mPhysicalCameraId.c_str());
OutputConfiguration outConfig(iGBP, output.mRotation, physicalId16,
OutputConfiguration::INVALID_SET_ID, true);
for (auto& anw : output.mSharedWindows) {
ret = getIGBPfromAnw(anw, iGBP);
if (ret != ACAMERA_OK) {
ALOGE("Camera device %s failed to extract graphic producer from native window",
getId());
return ret;
}
outConfig.addGraphicProducer(iGBP);
}
sessionConfiguration.addOutputConfiguration(outConfig);
}
bool supported = false;
binder::Status remoteRet = mRemote->isSessionConfigurationSupported(
sessionConfiguration, &supported);
if (remoteRet.serviceSpecificErrorCode() ==
hardware::ICameraService::ERROR_INVALID_OPERATION) {
return ACAMERA_ERROR_UNSUPPORTED_OPERATION;
} else if (!remoteRet.isOk()) {
return ACAMERA_ERROR_UNKNOWN;
} else {
return supported ? ACAMERA_OK : ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
}
}
camera_status_t CameraDevice::updateOutputConfigurationLocked(ACaptureSessionOutput *output) {
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {

@ -35,6 +35,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <camera/CaptureResult.h>
#include <camera/camera2/OutputConfiguration.h>
#include <camera/camera2/SessionConfiguration.h>
#include <camera/camera2/CaptureRequest.h>
#include <camera/NdkCameraManager.h>
@ -77,6 +78,9 @@ class CameraDevice final : public RefBase {
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session);
camera_status_t isSessionConfigurationSupported(
const ACaptureSessionOutputContainer* sessionOutputContainer) const;
// Callbacks from camera service
class ServiceCallback : public hardware::camera2::BnCameraDeviceCallbacks {
public:
@ -369,6 +373,11 @@ struct ACameraDevice {
return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
camera_status_t isSessionConfigurationSupported(
const ACaptureSessionOutputContainer* sessionOutputContainer) const {
return mDevice->isSessionConfigurationSupported(sessionOutputContainer);
}
/***********************
* Device interal APIs *
***********************/

@ -845,6 +845,43 @@ camera_status_t ACameraDevice_createCaptureRequest_withPhysicalIds(
const ACameraIdList* physicalIdList,
/*out*/ACaptureRequest** request) __INTRODUCED_IN(29);
/**
* Check whether a particular {@ACaptureSessionOutputContainer} is supported by
* the camera device.
*
* <p>This method performs a runtime check of a given {@link
* ACaptureSessionOutputContainer}. The result confirms whether or not the
* passed CaptureSession outputs can be successfully used to create a camera
* capture session using {@link ACameraDevice_createCaptureSession}.</p>
*
* <p>This method can be called at any point before, during and after active
* capture session. It must not impact normal camera behavior in any way and
* must complete significantly faster than creating a capture session.</p>
*
* <p>Although this method is faster than creating a new capture session, it is not intended
* to be used for exploring the entire space of supported stream combinations.</p>
*
* @param device the camera device of interest
* @param sessionOutputContainer the {@link ACaptureSessionOutputContainer} of
* interest.
*
* @return <ul>
* <li>{@link ACAMERA_OK} if the given {@link ACaptureSessionOutputContainer}
* is supported by the camera device.</li>
* <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device, or sessionOutputContainer
* is NULL.</li>
* <li>{@link ACAMERA_ERROR_STREAM_CONFIGURE_FAIL} if the given
* {@link ACaptureSessionOutputContainer}
* is not supported by
* the camera
* device.</li>
* <li>{@link ACAMERA_ERROR_UNSUPPORTED_OPERATION} if the query operation is not
* supported by the camera device.</li>
*/
camera_status_t ACameraDevice_isSessionConfigurationSupported(
const ACameraDevice* device,
const ACaptureSessionOutputContainer* sessionOutputContainer) __INTRODUCED_IN(29);
#endif /* __ANDROID_API__ >= 29 */
__END_DECLS

@ -106,7 +106,8 @@ typedef enum {
/**
* Camera device does not support the stream configuration provided by application in
* {@link ACameraDevice_createCaptureSession}.
* {@link ACameraDevice_createCaptureSession} or {@link
* ACameraDevice_isSessionConfigurationSupported}.
*/
ACAMERA_ERROR_STREAM_CONFIGURE_FAIL = ACAMERA_ERROR_BASE - 9,
@ -130,6 +131,11 @@ typedef enum {
* The application does not have permission to open camera.
*/
ACAMERA_ERROR_PERMISSION_DENIED = ACAMERA_ERROR_BASE - 13,
/**
* The operation is not supported by the camera device.
*/
ACAMERA_ERROR_UNSUPPORTED_OPERATION = ACAMERA_ERROR_BASE - 14,
} camera_status_t;
#endif /* __ANDROID_API__ >= 24 */

@ -14,6 +14,7 @@ LIBCAMERA2NDK {
ACameraDevice_createCaptureRequest_withPhysicalIds; # introduced=29
ACameraDevice_createCaptureSession;
ACameraDevice_createCaptureSessionWithSessionParameters; # introduced=28
ACameraDevice_isSessionConfigurationSupported; # introduced=29
ACameraDevice_getId;
ACameraManager_create;
ACameraManager_delete;

@ -38,6 +38,7 @@ namespace acam {
using HCameraMetadata = frameworks::cameraservice::device::V2_0::CameraMetadata;
using OutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
using SessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using hardware::Void;
// Static member definitions
@ -216,6 +217,47 @@ CameraDevice::createCaptureSession(
return ACAMERA_OK;
}
camera_status_t CameraDevice::isSessionConfigurationSupported(
const ACaptureSessionOutputContainer* sessionOutputContainer) const {
Mutex::Autolock _l(mDeviceLock);
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
return ret;
}
SessionConfiguration sessionConfig;
sessionConfig.inputWidth = 0;
sessionConfig.inputHeight = 0;
sessionConfig.inputFormat = -1;
sessionConfig.operationMode = StreamConfigurationMode::NORMAL_MODE;
sessionConfig.outputStreams.resize(sessionOutputContainer->mOutputs.size());
size_t index = 0;
for (const auto& output : sessionOutputContainer->mOutputs) {
sessionConfig.outputStreams[index].rotation = utils::convertToHidl(output.mRotation);
sessionConfig.outputStreams[index].windowGroupId = -1;
sessionConfig.outputStreams[index].windowHandles.resize(output.mSharedWindows.size() + 1);
sessionConfig.outputStreams[index].windowHandles[0] = output.mWindow;
sessionConfig.outputStreams[index].physicalCameraId = output.mPhysicalCameraId;
index++;
}
bool configSupported = false;
Status status = Status::NO_ERROR;
auto remoteRet = mRemote->isSessionConfigurationSupported(sessionConfig,
[&status, &configSupported](auto s, auto supported) {
status = s;
configSupported = supported;
});
if (status == Status::INVALID_OPERATION) {
return ACAMERA_ERROR_UNSUPPORTED_OPERATION;
} else if (!remoteRet.isOk()) {
return ACAMERA_ERROR_UNKNOWN;
} else {
return configSupported ? ACAMERA_OK : ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
}
}
void CameraDevice::addRequestSettingsMetadata(ACaptureRequest *aCaptureRequest,
sp<CaptureRequest> &req) {
CameraMetadata metadataCopy = aCaptureRequest->settings->getInternalData();

@ -101,6 +101,9 @@ class CameraDevice final : public RefBase {
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session);
camera_status_t isSessionConfigurationSupported(
const ACaptureSessionOutputContainer* sessionOutputContainer) const;
// Callbacks from camera service
class ServiceCallback : public ICameraDeviceCallback {
public:
@ -397,6 +400,11 @@ struct ACameraDevice {
return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
camera_status_t isSessionConfigurationSupported(
const ACaptureSessionOutputContainer* sessionOutputContainer) const {
return mDevice->isSessionConfigurationSupported(sessionOutputContainer);
}
/***********************
* Device interal APIs *
***********************/

@ -121,6 +121,12 @@ class CameraHelper {
cameraIdList.numCameras = idPointerList.size();
cameraIdList.cameraIds = idPointerList.data();
ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
ALOGE("ACameraDevice_isSessionConfigurationSupported failed, ret=%d", ret);
return ret;
}
ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
if (ret != AMEDIA_OK) {
ALOGE("ACameraDevice_createCaptureSession failed, ret=%d", ret);

@ -36,13 +36,18 @@ cc_library_shared {
ldflags: ["-Wl,-Bsymbolic"],
}
filegroup {
name: "codec2_soft_exports",
srcs: [ "exports.lds" ],
}
// public dependency for software codec implementation
// to be used by code under media/codecs/* only as its stability is not guaranteed
cc_defaults {
name: "libcodec2_soft-defaults",
defaults: ["libcodec2-impl-defaults"],
vendor_available: true,
version_script: ":codec2_soft_exports",
export_shared_lib_headers: [
"libsfplugin_ccodec_utils",
],

@ -0,0 +1,7 @@
{
global:
CreateCodec2Factory;
DestroyCodec2Factory;
local: *;
};

@ -589,6 +589,21 @@ private:
bool mIsHdr10Plus;
};
struct Av1ProfileLevelMapper : ProfileLevelMapperHelper {
virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
return sAv1Levels.map(from, to);
}
virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
return sAv1Levels.map(from, to);
}
virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
return sAv1Profiles.map(from, to);
}
virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
return sAv1Profiles.map(from, to);
}
};
} // namespace
// static
@ -613,6 +628,8 @@ C2Mapper::GetProfileLevelMapper(std::string mediaType) {
return std::make_shared<Vp8ProfileLevelMapper>();
} else if (mediaType == MIMETYPE_VIDEO_VP9) {
return std::make_shared<Vp9ProfileLevelMapper>();
} else if (mediaType == MIMETYPE_VIDEO_AV1) {
return std::make_shared<Av1ProfileLevelMapper>();
}
return nullptr;
}

@ -1336,6 +1336,13 @@ MatroskaExtractor::~MatroskaExtractor() {
mReader = NULL;
delete mDataSource;
for (size_t i = 0; i < mTracks.size(); ++i) {
TrackInfo *info = &mTracks.editItemAt(i);
if (info->mMeta) {
AMediaFormat_delete(info->mMeta);
}
}
}
size_t MatroskaExtractor::countTracks() {
@ -1808,6 +1815,8 @@ status_t MatroskaExtractor::initTrackInfo(
void MatroskaExtractor::addTracks() {
const mkvparser::Tracks *tracks = mSegment->GetTracks();
AMediaFormat *meta = nullptr;
for (size_t index = 0; index < tracks->GetTracksCount(); ++index) {
const mkvparser::Track *track = tracks->GetTrackByIndex(index);
@ -1832,7 +1841,11 @@ void MatroskaExtractor::addTracks() {
enum { VIDEO_TRACK = 1, AUDIO_TRACK = 2 };
AMediaFormat *meta = AMediaFormat_new();
if (meta) {
AMediaFormat_clear(meta);
} else {
meta = AMediaFormat_new();
}
status_t err = OK;
int32_t nalSize = -1;
@ -2067,21 +2080,26 @@ void MatroskaExtractor::addTracks() {
long long durationNs = mSegment->GetDuration();
AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_DURATION, (durationNs + 500) / 1000);
const char *mimetype = "";
if (!AMediaFormat_getString(meta, AMEDIAFORMAT_KEY_MIME, &mimetype)) {
// do not add this track to the track list
ALOGW("ignoring track with unknown mime");
continue;
}
mTracks.push();
size_t n = mTracks.size() - 1;
TrackInfo *trackInfo = &mTracks.editItemAt(n);
initTrackInfo(track, meta, trackInfo);
trackInfo->mNalLengthSize = nalSize;
const char *mimetype = "";
AMediaFormat_getString(meta, AMEDIAFORMAT_KEY_MIME, &mimetype);
if ((!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) ||
(!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_AVC) && isSetCsdFrom1stFrame)) {
// Attempt to recover from AVC track without codec private data
err = synthesizeAVCC(trackInfo, n);
if (err != OK) {
mTracks.pop();
continue;
}
} else if ((!strcmp("V_MPEG2", codecID) && codecPrivateSize == 0) ||
(!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_MPEG2) && isSetCsdFrom1stFrame)) {
@ -2089,6 +2107,7 @@ void MatroskaExtractor::addTracks() {
err = synthesizeMPEG2(trackInfo, n);
if (err != OK) {
mTracks.pop();
continue;
}
} else if ((!strcmp("V_MPEG4/ISO/ASP", codecID) && codecPrivateSize == 0) ||
(!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_MPEG4) && isSetCsdFrom1stFrame) ||
@ -2099,9 +2118,14 @@ void MatroskaExtractor::addTracks() {
err = synthesizeMPEG4(trackInfo, n);
if (err != OK) {
mTracks.pop();
continue;
}
}
// the TrackInfo owns the metadata now
meta = nullptr;
}
if (meta) {
AMediaFormat_delete(meta);
}
}

@ -61,10 +61,8 @@ private:
TrackInfo() {
mMeta = NULL;
}
~TrackInfo() {
if (mMeta) {
AMediaFormat_delete(mMeta);
}
}
unsigned long mTrackNum;
bool mEncrypted;

@ -3675,8 +3675,10 @@ status_t MPEG4Extractor::parseITunesMetaData(off64_t offset, size_t size) {
void *tmpData;
size_t tmpDataSize;
const char *s;
if (size >= 8 && metadataKey &&
!AMediaFormat_getBuffer(mFileMetaData, metadataKey, &tmpData, &tmpDataSize)) {
!AMediaFormat_getBuffer(mFileMetaData, metadataKey, &tmpData, &tmpDataSize) &&
!AMediaFormat_getString(mFileMetaData, metadataKey, &s)) {
if (!strcmp(metadataKey, "albumart")) {
AMediaFormat_setBuffer(mFileMetaData, metadataKey,
buffer + 8, size - 8);
@ -3918,10 +3920,9 @@ void MPEG4Extractor::parseID3v2MetaData(off64_t offset) {
};
static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
void *tmpData;
size_t tmpDataSize;
for (size_t i = 0; i < kNumMapEntries; ++i) {
if (!AMediaFormat_getBuffer(mFileMetaData, kMap[i].key, &tmpData, &tmpDataSize)) {
const char *ss;
if (!AMediaFormat_getString(mFileMetaData, kMap[i].key, &ss)) {
ID3::Iterator *it = new ID3::Iterator(id3, kMap[i].tag1);
if (it->done()) {
delete it;
@ -5318,7 +5319,9 @@ size_t MPEG4Source::parseNALSize(const uint8_t *data) const {
}
int32_t MPEG4Source::parseHEVCLayerId(const uint8_t *data, size_t size) {
CHECK(data != nullptr && size >= (mNALLengthSize + 2));
if (data == nullptr || size < mNALLengthSize + 2) {
return -1;
}
// HEVC NAL-header (16-bit)
// 1 6 6 3

@ -1280,7 +1280,7 @@ void MyOggExtractor::parseFileMetaData() {
//ALOGI("comment #%d: '%s'", i + 1, mVc.user_comments[i]);
}
AMediaFormat_getInt32(mFileMeta, "haptic", &mHapticChannelCount);
AMediaFormat_getInt32(mFileMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, &mHapticChannelCount);
}
void MyOggExtractor::setChannelMask(int channelCount) {
@ -1297,6 +1297,8 @@ void MyOggExtractor::setChannelMask(int channelCount) {
const audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(
audioChannelCount) | hapticChannelMask;
AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK, channelMask);
AMediaFormat_setInt32(
mMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, mHapticChannelCount);
}
} else {
AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK,

@ -410,8 +410,8 @@ status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t tri
}
// Call these directly because we are already holding the lock.
mAudioRecord->setMicrophoneDirection(mSelectedMicDirection);
mAudioRecord->setMicrophoneFieldDimension(mSelectedMicFieldDimension);
mAudioRecord->setPreferredMicrophoneDirection(mSelectedMicDirection);
mAudioRecord->setPreferredMicrophoneFieldDimension(mSelectedMicFieldDimension);
if (status != NO_ERROR) {
mActive = false;
@ -1381,7 +1381,7 @@ status_t AudioRecord::getActiveMicrophones(std::vector<media::MicrophoneInfo>* a
return mAudioRecord->getActiveMicrophones(activeMicrophones).transactionError();
}
status_t AudioRecord::setMicrophoneDirection(audio_microphone_direction_t direction)
status_t AudioRecord::setPreferredMicrophoneDirection(audio_microphone_direction_t direction)
{
AutoMutex lock(mLock);
if (mSelectedMicDirection == direction) {
@ -1394,11 +1394,11 @@ status_t AudioRecord::setMicrophoneDirection(audio_microphone_direction_t direct
// the internal AudioRecord hasn't be created yet, so just stash the attribute.
return OK;
} else {
return mAudioRecord->setMicrophoneDirection(direction).transactionError();
return mAudioRecord->setPreferredMicrophoneDirection(direction).transactionError();
}
}
status_t AudioRecord::setMicrophoneFieldDimension(float zoom) {
status_t AudioRecord::setPreferredMicrophoneFieldDimension(float zoom) {
AutoMutex lock(mLock);
if (mSelectedMicFieldDimension == zoom) {
// NOP
@ -1410,7 +1410,7 @@ status_t AudioRecord::setMicrophoneFieldDimension(float zoom) {
// the internal AudioRecord hasn't be created yet, so just stash the attribute.
return OK;
} else {
return mAudioRecord->setMicrophoneFieldDimension(zoom).transactionError();
return mAudioRecord->setPreferredMicrophoneFieldDimension(zoom).transactionError();
}
}

@ -39,9 +39,9 @@ interface IAudioRecord {
/* Set the microphone direction (for processing).
*/
void setMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
void setPreferredMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
/* Set the microphone zoom (for processing).
*/
void setMicrophoneFieldDimension(float zoom);
void setPreferredMicrophoneFieldDimension(float zoom);
}

@ -542,11 +542,11 @@ public:
/* Set the Microphone direction (for processing purposes).
*/
status_t setMicrophoneDirection(audio_microphone_direction_t direction);
status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
/* Set the Microphone zoom factor (for processing purposes).
*/
status_t setMicrophoneFieldDimension(float zoom);
status_t setPreferredMicrophoneFieldDimension(float zoom);
/* Get the unique port ID assigned to this AudioRecord instance by audio policy manager.
* The ID is unique across all audioserver clients and can change during the life cycle

@ -855,25 +855,26 @@ status_t StreamInHalHidl::updateSinkMetadata(const
#endif
#if MAJOR_VERSION < 5
status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
status_t StreamInHalHidl::setPreferredMicrophoneDirection(
audio_microphone_direction_t direction __unused) {
if (mStream == 0) return NO_INIT;
return INVALID_OPERATION;
}
status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom __unused) {
status_t StreamInHalHidl::setPreferredMicrophoneFieldDimension(float zoom __unused) {
if (mStream == 0) return NO_INIT;
return INVALID_OPERATION;
}
#else
status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction) {
status_t StreamInHalHidl::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
if (!mStream) return NO_INIT;
return processReturn("setMicrophoneDirection",
mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
return processReturn("setPreferredMicrophoneDirection",
mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
}
status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom) {
status_t StreamInHalHidl::setPreferredMicrophoneFieldDimension(float zoom) {
if (!mStream) return NO_INIT;
return processReturn("setMicrophoneFieldDimension",
return processReturn("setPreferredMicrophoneFieldDimension",
mStream->setMicrophoneFieldDimension(zoom));
}
#endif

@ -221,10 +221,11 @@ class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
// Set microphone direction (for processing)
virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) override;
virtual status_t setPreferredMicrophoneDirection(
audio_microphone_direction_t direction) override;
// Set microphone zoom (for processing)
virtual status_t setMicrophoneFieldDimension(float zoom) override;
virtual status_t setPreferredMicrophoneFieldDimension(float zoom) override;
// Called when the metadata of the stream's sink has been changed.
status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;

@ -369,20 +369,21 @@ status_t StreamInHalLocal::getActiveMicrophones(std::vector<media::MicrophoneInf
#endif
#if MAJOR_VERSION < 5
status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
status_t StreamInHalLocal::setPreferredMicrophoneDirection(
audio_microphone_direction_t direction __unused) {
return INVALID_OPERATION;
}
status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom __unused) {
status_t StreamInHalLocal::setPreferredMicrophoneFieldDimension(float zoom __unused) {
return INVALID_OPERATION;
}
#else
status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction) {
status_t StreamInHalLocal::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
if (mStream->set_microphone_direction == NULL) return INVALID_OPERATION;
return mStream->set_microphone_direction(mStream, direction);
}
status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom) {
status_t StreamInHalLocal::setPreferredMicrophoneFieldDimension(float zoom) {
if (mStream->set_microphone_field_dimension == NULL) return INVALID_OPERATION;
return mStream->set_microphone_field_dimension(mStream, zoom);
@ -391,3 +392,5 @@ status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom) {
} // namespace CPP_VERSION
} // namespace android

@ -205,10 +205,10 @@ class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
// Sets microphone direction (for processing)
virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
// Sets microphone zoom (for processing)
virtual status_t setMicrophoneFieldDimension(float zoom);
virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
// Called when the metadata of the stream's sink has been changed.
status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;

@ -180,10 +180,10 @@ class StreamInHalInterface : public virtual StreamHalInterface {
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
// Set direction for capture processing
virtual status_t setMicrophoneDirection(audio_microphone_direction_t) = 0;
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t) = 0;
// Set zoom factor for capture stream
virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
struct SinkMetadata {
std::vector<record_track_metadata_t> tracks;

@ -132,6 +132,9 @@ cc_library_static {
shared_libs: [
"liblog",
],
header_libs: [
"libhardware_headers"
],
cflags: [
"-fvisibility=hidden",
"-DBUILD_FLOAT",

@ -298,6 +298,7 @@ typedef struct
LVM_PSA_DecaySpeed_en PSA_PeakDecayRate; /* Peak value decay rate*/
#ifdef SUPPORT_MC
LVM_INT32 NrChannels;
LVM_INT32 ChMask;
#endif
} LVM_ControlParams_t;

@ -93,6 +93,7 @@ LVM_ReturnStatus_en LVM_SetControlParameters(LVM_Handle_t hInstance,
#ifdef SUPPORT_MC
pInstance->Params.NrChannels = pParams->NrChannels;
pInstance->Params.ChMask = pParams->ChMask;
#endif
/*
* Cinema Sound parameters
@ -584,6 +585,7 @@ LVM_ReturnStatus_en LVM_ApplyNewSettings(LVM_Handle_t hInstance)
#ifdef SUPPORT_MC
pInstance->NrChannels = LocalParams.NrChannels;
pInstance->ChMask = LocalParams.ChMask;
#endif
/* Clear all internal data if format change*/

@ -291,6 +291,7 @@ typedef struct
#ifdef SUPPORT_MC
LVM_INT16 NrChannels;
LVM_INT32 ChMask;
#endif
} LVM_Instance_t;

@ -21,6 +21,7 @@
/* Includes */
/* */
/****************************************************************************************/
#include <system/audio.h>
#include "LVM_Private.h"
#include "VectorArithmetic.h"
@ -67,6 +68,7 @@ LVM_ReturnStatus_en LVM_Process(LVM_Handle_t hInstance,
LVM_ReturnStatus_en Status;
#ifdef SUPPORT_MC
LVM_INT32 NrChannels = pInstance->NrChannels;
LVM_INT32 ChMask = pInstance->ChMask;
#define NrFrames SampleCount // alias for clarity
#endif
@ -119,6 +121,7 @@ LVM_ReturnStatus_en LVM_Process(LVM_Handle_t hInstance,
#ifdef SUPPORT_MC
/* Update the local variable NrChannels from pInstance->NrChannels value */
NrChannels = pInstance->NrChannels;
ChMask = pInstance->ChMask;
#endif
if(Status != LVM_SUCCESS)
@ -140,6 +143,7 @@ LVM_ReturnStatus_en LVM_Process(LVM_Handle_t hInstance,
pToProcess = pOutData;
#ifdef SUPPORT_MC
NrChannels = 2;
ChMask = AUDIO_CHANNEL_OUT_STEREO;
#endif
}
@ -254,18 +258,24 @@ LVM_ReturnStatus_en LVM_Process(LVM_Handle_t hInstance,
}
#ifdef SUPPORT_MC
/* TODO - Multichannel support to be added */
if (NrChannels == 2)
/*
* Volume balance
*/
LVC_MixSoft_1St_MC_float_SAT(&pInstance->VC_BalanceMix,
pProcessed,
pProcessed,
NrFrames,
NrChannels,
ChMask);
#else
/*
* Volume balance
*/
LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
pProcessed,
pProcessed,
SampleCount);
#endif
{
/*
* Volume balance
*/
LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
pProcessed,
pProcessed,
SampleCount);
}
/*
* Perform Parametric Spectum Analysis

@ -59,6 +59,31 @@ void LVC_Core_MixHard_1St_2i_D16C31_SAT( LVMixer3_FLOAT_st *ptrInstance1,
}
#ifdef SUPPORT_MC
void LVC_Core_MixHard_1St_MC_float_SAT (Mix_Private_FLOAT_st **ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels)
{
LVM_FLOAT Temp;
LVM_INT16 ii, jj;
for (ii = NrFrames; ii != 0; ii--)
{
for (jj = 0; jj < NrChannels; jj++)
{
Mix_Private_FLOAT_st *pInstance1 = (Mix_Private_FLOAT_st *)(ptrInstance[jj]);
Temp = ((LVM_FLOAT)*(src++) * (LVM_FLOAT)pInstance1->Current);
if (Temp > 1.0f)
*dst++ = 1.0f;
else if (Temp < -1.0f)
*dst++ = -1.0f;
else
*dst++ = (LVM_FLOAT)Temp;
}
}
}
#endif
#else
void LVC_Core_MixHard_1St_2i_D16C31_SAT( LVMixer3_st *ptrInstance1,
LVMixer3_st *ptrInstance2,

@ -146,6 +146,51 @@ void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_FLOAT_st *ptrInstance1,
pInstanceR->Current = CurrentR;
}
#ifdef SUPPORT_MC
void LVC_Core_MixSoft_1St_MC_float_WRA (Mix_Private_FLOAT_st **ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels)
{
LVM_INT32 ii, ch;
LVM_FLOAT Temp =0.0f;
LVM_FLOAT tempCurrent[NrChannels];
for (ch = 0; ch < NrChannels; ch++)
{
tempCurrent[ch] = ptrInstance[ch]->Current;
}
for (ii = NrFrames; ii > 0; ii--)
{
for (ch = 0; ch < NrChannels; ch++)
{
Mix_Private_FLOAT_st *pInstance = ptrInstance[ch];
const LVM_FLOAT Delta = pInstance->Delta;
LVM_FLOAT Current = tempCurrent[ch];
const LVM_FLOAT Target = pInstance->Target;
if (Current < Target)
{
ADD2_SAT_FLOAT(Current, Delta, Temp);
Current = Temp;
if (Current > Target)
Current = Target;
}
else
{
Current -= Delta;
if (Current < Target)
Current = Target;
}
*dst++ = *src++ * Current;
tempCurrent[ch] = Current;
}
}
for (ch = 0; ch < NrChannels; ch++)
{
ptrInstance[ch]->Current = tempCurrent[ch];
}
}
#endif
#else
void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_st *ptrInstance1,
LVMixer3_st *ptrInstance2,

@ -19,6 +19,8 @@
INCLUDE FILES
***********************************************************************************/
#include <system/audio.h>
#include "LVC_Mixer_Private.h"
#include "VectorArithmetic.h"
#include "ScalarArithmetic.h"
@ -30,10 +32,207 @@
#define TRUE 1
#define FALSE 0
#define ARRAY_SIZE(a) ((sizeof(a)) / (sizeof(*(a))))
/**********************************************************************************
FUNCTION LVC_MixSoft_1St_2i_D16C31_SAT
***********************************************************************************/
#ifdef BUILD_FLOAT
#ifdef SUPPORT_MC
/* This threshold is used to decide on the processing to be applied on
* front center and back center channels
*/
#define LVM_VOL_BAL_THR (0.000016f)
void LVC_MixSoft_1St_MC_float_SAT (LVMixer3_2St_FLOAT_st *ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT32 NrChannels,
LVM_INT32 ChMask)
{
char HardMixing = TRUE;
LVM_FLOAT TargetGain;
Mix_Private_FLOAT_st Target_lfe = {LVM_MAXFLOAT, LVM_MAXFLOAT, LVM_MAXFLOAT};
Mix_Private_FLOAT_st Target_ctr = {LVM_MAXFLOAT, LVM_MAXFLOAT, LVM_MAXFLOAT};
Mix_Private_FLOAT_st *pInstance1 = \
(Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
Mix_Private_FLOAT_st *pInstance2 = \
(Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[1].PrivateParams);
Mix_Private_FLOAT_st *pMixPrivInst[4] = {pInstance1, pInstance2, &Target_ctr, &Target_lfe};
Mix_Private_FLOAT_st *pInstance[NrChannels];
if (audio_channel_mask_get_representation(ChMask)
== AUDIO_CHANNEL_REPRESENTATION_INDEX)
{
for (int i = 0; i < 2; i++)
{
pInstance[i] = pMixPrivInst[i];
}
for (int i = 2; i < NrChannels; i++)
{
pInstance[i] = pMixPrivInst[2];
}
}
else
{
// TODO: Combine with system/media/audio_utils/Balance.cpp
// Constants in system/media/audio/include/system/audio-base.h
// 'mixInstIdx' is used to map the appropriate mixer instance for each channel.
const int mixInstIdx[] = {
0, // AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
1, // AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
2, // AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
3, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
0, // AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10u,
1, // AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20u,
0, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40u,
1, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
2, // AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100u,
0, // AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200u,
1, // AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400u,
2, // AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800u,
0, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000u,
2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000u,
1, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000u,
0, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000u,
2, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000u,
1, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000u,
0, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT = 0x40000u,
1, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT = 0x80000u
};
if (pInstance1->Target <= LVM_VOL_BAL_THR ||
pInstance2->Target <= LVM_VOL_BAL_THR)
{
Target_ctr.Target = 0.0f;
Target_ctr.Current = 0.0f;
Target_ctr.Delta = 0.0f;
}
const unsigned int idxArrSize = ARRAY_SIZE(mixInstIdx);
for (unsigned int i = 0, channel = ChMask; channel !=0 ; ++i)
{
const unsigned int idx = __builtin_ctz(channel);
if (idx < idxArrSize)
{
pInstance[i] = pMixPrivInst[mixInstIdx[idx]];
}
else
{
pInstance[i] = pMixPrivInst[2];
}
channel &= ~(1 << idx);
}
}
if (NrFrames <= 0) return;
/******************************************************************************
SOFT MIXING
*******************************************************************************/
if ((pInstance1->Current != pInstance1->Target) ||
(pInstance2->Current != pInstance2->Target))
{
// TODO: combine similar checks below.
if (pInstance1->Delta == LVM_MAXFLOAT
|| Abs_Float(pInstance1->Current - pInstance1->Target) < pInstance1->Delta)
{
/* Difference is not significant anymore. Make them equal. */
pInstance1->Current = pInstance1->Target;
TargetGain = pInstance1->Target;
LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
}
else
{
/* Soft mixing has to be applied */
HardMixing = FALSE;
}
if (HardMixing == TRUE)
{
if (pInstance2->Delta == LVM_MAXFLOAT
|| Abs_Float(pInstance2->Current - pInstance2->Target) < pInstance2->Delta)
{
/* Difference is not significant anymore. Make them equal. */
pInstance2->Current = pInstance2->Target;
TargetGain = pInstance2->Target;
LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[1]), TargetGain);
}
else
{
/* Soft mixing has to be applied */
HardMixing = FALSE;
}
}
if (HardMixing == FALSE)
{
LVC_Core_MixSoft_1St_MC_float_WRA (&pInstance[0],
src, dst, NrFrames, NrChannels);
}
}
/******************************************************************************
HARD MIXING
*******************************************************************************/
if (HardMixing == TRUE)
{
if ((pInstance1->Target == LVM_MAXFLOAT) && (pInstance2->Target == LVM_MAXFLOAT))
{
if (src != dst)
{
Copy_Float(src, dst, NrFrames*NrChannels);
}
}
else
{
LVC_Core_MixHard_1St_MC_float_SAT(&(pInstance[0]),
src, dst, NrFrames, NrChannels);
}
}
/******************************************************************************
CALL BACK
*******************************************************************************/
if (ptrInstance->MixerStream[0].CallbackSet)
{
if (Abs_Float(pInstance1->Current - pInstance1->Target) < pInstance1->Delta)
{
pInstance1->Current = pInstance1->Target; /* Difference is not significant anymore. \
Make them equal. */
TargetGain = pInstance1->Target;
LVC_Mixer_SetTarget(&ptrInstance->MixerStream[0], TargetGain);
ptrInstance->MixerStream[0].CallbackSet = FALSE;
if (ptrInstance->MixerStream[0].pCallBack != 0)
{
(*ptrInstance->MixerStream[0].pCallBack) (\
ptrInstance->MixerStream[0].pCallbackHandle,
ptrInstance->MixerStream[0].pGeneralPurpose,
ptrInstance->MixerStream[0].CallbackParam);
}
}
}
if (ptrInstance->MixerStream[1].CallbackSet)
{
if (Abs_Float(pInstance2->Current - pInstance2->Target) < pInstance2->Delta)
{
pInstance2->Current = pInstance2->Target; /* Difference is not significant anymore.
Make them equal. */
TargetGain = pInstance2->Target;
LVC_Mixer_SetTarget(&ptrInstance->MixerStream[1], TargetGain);
ptrInstance->MixerStream[1].CallbackSet = FALSE;
if (ptrInstance->MixerStream[1].pCallBack != 0)
{
(*ptrInstance->MixerStream[1].pCallBack) (\
ptrInstance->MixerStream[1].pCallbackHandle,
ptrInstance->MixerStream[1].pGeneralPurpose,
ptrInstance->MixerStream[1].CallbackParam);
}
}
}
}
#endif
void LVC_MixSoft_1St_2i_D16C31_SAT( LVMixer3_2St_FLOAT_st *ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,

@ -224,6 +224,14 @@ void LVC_MixSoft_2St_D16C31_SAT( LVMixer3_2St_st *pInstance,
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
#ifdef BUILD_FLOAT
#ifdef SUPPORT_MC
void LVC_MixSoft_1St_MC_float_SAT(LVMixer3_2St_FLOAT_st *pInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst, /* dst can be equal to src */
LVM_INT16 NrFrames,
LVM_INT32 NrChannels,
LVM_INT32 ChMask);
#endif
void LVC_MixSoft_1St_2i_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst, /* dst can be equal to src */

@ -116,6 +116,13 @@ void LVC_Core_MixHard_2St_D16C31_SAT( LVMixer3_st *pInstance1,
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
#ifdef BUILD_FLOAT
#ifdef SUPPORT_MC
void LVC_Core_MixSoft_1St_MC_float_WRA(Mix_Private_FLOAT_st **ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
#endif
void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_FLOAT_st *ptrInstance1,
LVMixer3_FLOAT_st *ptrInstance2,
const LVM_FLOAT *src,
@ -136,6 +143,13 @@ void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_st *ptrInstance1,
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
#ifdef BUILD_FLOAT
#ifdef SUPPORT_MC
void LVC_Core_MixHard_1St_MC_float_SAT(Mix_Private_FLOAT_st **ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
#endif
void LVC_Core_MixHard_1St_2i_D16C31_SAT( LVMixer3_FLOAT_st *ptrInstance1,
LVMixer3_FLOAT_st *ptrInstance2,
const LVM_FLOAT *src,

@ -36,6 +36,10 @@ flags_arr=(
"-csE -tE"
"-csE -eqE" "-tE -eqE"
"-csE -tE -bE -M -eqE"
"-tE -eqE -vcBal:96 -M"
"-tE -eqE -vcBal:-96 -M"
"-tE -eqE -vcBal:0 -M"
"-tE -eqE -bE -vcBal:30 -M"
)
fs_arr=(
@ -56,26 +60,41 @@ fs_arr=(
# run multichannel effects at different configs, saving only the stereo channel
# pair.
error_count=0
for flags in "${flags_arr[@]}"
do
for fs in ${fs_arr[*]}
do
for ch in {1..8}
for chMask in {0..22}
do
adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw \
-o:$testdir/sinesweep_$((ch))_$((fs)).raw -ch:$ch -fs:$fs $flags
-o:$testdir/sinesweep_$((chMask))_$((fs)).raw -chMask:$chMask -fs:$fs $flags
shell_ret=$?
if [ $shell_ret -ne 0 ]; then
echo "error: $shell_ret"
((++error_count))
fi
# two channel files should be identical to higher channel
# computation (first 2 channels).
# Do not compare cases where -bE is in flags (due to mono computation)
if [[ $flags != *"-bE"* ]] && [ "$ch" -gt 2 ]
if [[ $flags != *"-bE"* ]] && [[ "$chMask" -gt 1 ]]
then
adb shell cmp $testdir/sinesweep_2_$((fs)).raw \
$testdir/sinesweep_$((ch))_$((fs)).raw
elif [[ $flags == *"-bE"* ]] && [ "$ch" -gt 2 ]
adb shell cmp $testdir/sinesweep_1_$((fs)).raw \
$testdir/sinesweep_$((chMask))_$((fs)).raw
elif [[ $flags == *"-bE"* ]] && [[ "$chMask" -gt 1 ]]
then
adb shell $testdir/snr $testdir/sinesweep_2_$((fs)).raw \
$testdir/sinesweep_$((ch))_$((fs)).raw -thr:90.308998
adb shell $testdir/snr $testdir/sinesweep_1_$((fs)).raw \
$testdir/sinesweep_$((chMask))_$((fs)).raw -thr:90.308998
fi
# both cmp and snr return EXIT_FAILURE on mismatch.
shell_ret=$?
if [ $shell_ret -ne 0 ]; then
echo "error: $shell_ret"
((++error_count))
fi
done
@ -83,3 +102,5 @@ do
done
adb shell rm -r $testdir
echo "$error_count errors"
exit $error_count

@ -24,6 +24,7 @@
#include <audio_utils/channels.h>
#include <audio_utils/primitives.h>
#include <log/log.h>
#include <system/audio.h>
#include "EffectBundle.h"
#include "LVM_Private.h"
@ -76,6 +77,8 @@
struct lvmConfigParams_t {
int samplingFreq = 44100;
int nrChannels = 2;
int chMask = AUDIO_CHANNEL_OUT_STEREO;
int vcBal = 0;
int fChannels = 2;
bool monoMode = false;
int bassEffectLevel = 0;
@ -87,9 +90,36 @@ struct lvmConfigParams_t {
LVM_Mode_en csEnable = LVM_MODE_OFF;
};
constexpr audio_channel_mask_t lvmConfigChMask[] = {
AUDIO_CHANNEL_OUT_MONO,
AUDIO_CHANNEL_OUT_STEREO,
AUDIO_CHANNEL_OUT_2POINT1,
AUDIO_CHANNEL_OUT_2POINT0POINT2,
AUDIO_CHANNEL_OUT_QUAD,
AUDIO_CHANNEL_OUT_QUAD_BACK,
AUDIO_CHANNEL_OUT_QUAD_SIDE,
AUDIO_CHANNEL_OUT_SURROUND,
(1 << 4) - 1,
AUDIO_CHANNEL_OUT_2POINT1POINT2,
AUDIO_CHANNEL_OUT_3POINT0POINT2,
AUDIO_CHANNEL_OUT_PENTA,
(1 << 5) - 1,
AUDIO_CHANNEL_OUT_3POINT1POINT2,
AUDIO_CHANNEL_OUT_5POINT1,
AUDIO_CHANNEL_OUT_5POINT1_BACK,
AUDIO_CHANNEL_OUT_5POINT1_SIDE,
(1 << 6) - 1,
AUDIO_CHANNEL_OUT_6POINT1,
(1 << 7) - 1,
AUDIO_CHANNEL_OUT_5POINT1POINT2,
AUDIO_CHANNEL_OUT_7POINT1,
(1 << 8) - 1,
};
void printUsage() {
printf("\nUsage: ");
printf("\n <exceutable> -i:<input_file> -o:<out_file> [options]\n");
printf("\n <executable> -i:<input_file> -o:<out_file> [options]\n");
printf("\nwhere, \n <inputfile> is the input file name");
printf("\n on which LVM effects are applied");
printf("\n <outputfile> processed output file");
@ -98,7 +128,34 @@ void printUsage() {
printf("\n -help (or) -h");
printf("\n Prints this usage information");
printf("\n");
printf("\n -ch:<process_channels> (1 through 8)\n\n");
printf("\n -chMask:<channel_mask>\n");
printf("\n 0 - AUDIO_CHANNEL_OUT_MONO");
printf("\n 1 - AUDIO_CHANNEL_OUT_STEREO");
printf("\n 2 - AUDIO_CHANNEL_OUT_2POINT1");
printf("\n 3 - AUDIO_CHANNEL_OUT_2POINT0POINT2");
printf("\n 4 - AUDIO_CHANNEL_OUT_QUAD");
printf("\n 5 - AUDIO_CHANNEL_OUT_QUAD_BACK");
printf("\n 6 - AUDIO_CHANNEL_OUT_QUAD_SIDE");
printf("\n 7 - AUDIO_CHANNEL_OUT_SURROUND");
printf("\n 8 - canonical channel index mask for 4 ch: (1 << 4) - 1");
printf("\n 9 - AUDIO_CHANNEL_OUT_2POINT1POINT2");
printf("\n 10 - AUDIO_CHANNEL_OUT_3POINT0POINT2");
printf("\n 11 - AUDIO_CHANNEL_OUT_PENTA");
printf("\n 12 - canonical channel index mask for 5 ch: (1 << 5) - 1");
printf("\n 13 - AUDIO_CHANNEL_OUT_3POINT1POINT2");
printf("\n 14 - AUDIO_CHANNEL_OUT_5POINT1");
printf("\n 15 - AUDIO_CHANNEL_OUT_5POINT1_BACK");
printf("\n 16 - AUDIO_CHANNEL_OUT_5POINT1_SIDE");
printf("\n 17 - canonical channel index mask for 6 ch: (1 << 6) - 1");
printf("\n 18 - AUDIO_CHANNEL_OUT_6POINT1");
printf("\n 19 - canonical channel index mask for 7 ch: (1 << 7) - 1");
printf("\n 20 - AUDIO_CHANNEL_OUT_5POINT1POINT2");
printf("\n 21 - AUDIO_CHANNEL_OUT_7POINT1");
printf("\n 22 - canonical channel index mask for 8 ch: (1 << 8) - 1");
printf("\n default 0");
printf("\n -vcBal:<Left Right Balance control in dB [-96 to 96 dB]>");
printf("\n -ve values reduce Right channel while +ve value reduces Left channel");
printf("\n default 0");
printf("\n -fch:<file_channels> (1 through 8)\n\n");
printf("\n -M");
printf("\n Mono mode (force all input audio channels to be identical)");
@ -298,6 +355,7 @@ int LvmBundle_init(struct EffectContext *pContext, LVM_ControlParams_t *params)
params->OperatingMode = LVM_MODE_ON;
params->SampleRate = LVM_FS_44100;
params->SourceFormat = LVM_STEREO;
params->ChMask = AUDIO_CHANNEL_OUT_STEREO;
params->SpeakerType = LVM_HEADPHONES;
pContext->pBundledContext->SampleRate = LVM_FS_44100;
@ -452,13 +510,13 @@ int lvmControl(struct EffectContext *pContext,
params->OperatingMode = LVM_MODE_ON;
params->SpeakerType = LVM_HEADPHONES;
const int nrChannels = plvmConfigParams->nrChannels;
params->NrChannels = nrChannels;
if (nrChannels == 1) {
params->ChMask = plvmConfigParams->chMask;
params->NrChannels = plvmConfigParams->nrChannels;
if (params->NrChannels == 1) {
params->SourceFormat = LVM_MONO;
} else if (nrChannels == 2) {
} else if (params->NrChannels == 2) {
params->SourceFormat = LVM_STEREO;
} else if (nrChannels > 2 && nrChannels <= 8) { // FCC_2 FCC_8
} else if (params->NrChannels > 2 && params->NrChannels <= 8) { // FCC_2 FCC_8
params->SourceFormat = LVM_MULTICHANNEL;
} else {
return -EINVAL;
@ -531,7 +589,7 @@ int lvmControl(struct EffectContext *pContext,
/* Volume Control parameters */
params->VC_EffectLevel = 0;
params->VC_Balance = 0;
params->VC_Balance = plvmConfigParams->vcBal;
/* Treble Enhancement parameters */
params->TE_OperatingMode = plvmConfigParams->trebleEnable;
@ -667,13 +725,21 @@ int main(int argc, const char *argv[]) {
return -1;
}
lvmConfigParams.samplingFreq = samplingFreq;
} else if (!strncmp(argv[i], "-ch:", 4)) {
const int nrChannels = atoi(argv[i] + 4);
if (nrChannels > 8 || nrChannels < 1) {
printf("Error: Unsupported number of channels : %d\n", nrChannels);
} else if (!strncmp(argv[i], "-chMask:", 8)) {
const int chMaskConfigIdx = atoi(argv[i] + 8);
if (chMaskConfigIdx < 0 || (size_t)chMaskConfigIdx >= std::size(lvmConfigChMask)) {
ALOGE("\nError: Unsupported Channel Mask : %d\n", chMaskConfigIdx);
return -1;
}
lvmConfigParams.nrChannels = nrChannels;
const audio_channel_mask_t chMask = lvmConfigChMask[chMaskConfigIdx];
lvmConfigParams.chMask = chMask;
lvmConfigParams.nrChannels = audio_channel_count_from_out_mask(chMask);
} else if (!strncmp(argv[i], "-vcBal:", 7)) {
const int vcBalance = atoi(argv[i] + 7);
if (vcBalance > 96 || vcBalance < -96) {
ALOGE("\nError: Unsupported volume balance value: %d\n", vcBalance);
}
lvmConfigParams.vcBal = vcBalance;
} else if (!strncmp(argv[i], "-fch:", 5)) {
const int fChannels = atoi(argv[i] + 5);
if (fChannels > 8 || fChannels < 1) {

@ -84,6 +84,7 @@ int main(int argc, const char *argv[]) {
printf("\nError: missing input/reference files\n");
return -1;
}
int ret = EXIT_SUCCESS;
auto sn = pcm_format == 0
? getSignalNoise<short>(finp, fref)
: getSignalNoise<float>(finp, fref);
@ -92,6 +93,7 @@ int main(int argc, const char *argv[]) {
// compare the measured snr value with threshold
if (snr < thr) {
printf("%.6f less than threshold %.6f\n", snr, thr);
ret = EXIT_FAILURE;
} else {
printf("%.6f\n", snr);
}
@ -99,5 +101,5 @@ int main(int argc, const char *argv[]) {
fclose(finp);
fclose(fref);
return 0;
return ret;
}

@ -1315,6 +1315,7 @@ int Effect_setConfig(EffectContext *pContext, effect_config_t *pConfig){
#ifdef SUPPORT_MC
ActiveParams.NrChannels = NrChannels;
ActiveParams.ChMask = pConfig->inputCfg.channels;
#endif
LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);

@ -66,8 +66,8 @@ enum {
ENABLE_AUDIO_DEVICE_CALLBACK,
GET_ACTIVE_MICROPHONES,
GET_PORT_ID,
SET_MICROPHONE_DIRECTION,
SET_MICROPHONE_FIELD_DIMENSION
SET_PREFERRED_MICROPHONE_DIRECTION,
SET_PREFERRED_MICROPHONE_FIELD_DIMENSION
};
class BpMediaRecorder: public BpInterface<IMediaRecorder>
@ -409,21 +409,21 @@ public:
return status;
}
status_t setMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setMicrophoneDirection(%d)", direction);
status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setPreferredMicrophoneDirection(%d)", direction);
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
data.writeInt32(direction);
status_t status = remote()->transact(SET_MICROPHONE_DIRECTION, data, &reply);
status_t status = remote()->transact(SET_PREFERRED_MICROPHONE_DIRECTION, data, &reply);
return status == NO_ERROR ? (status_t)reply.readInt32() : status;
}
status_t setMicrophoneFieldDimension(float zoom) {
ALOGV("setMicrophoneFieldDimension(%f)", zoom);
status_t setPreferredMicrophoneFieldDimension(float zoom) {
ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
data.writeFloat(zoom);
status_t status = remote()->transact(SET_MICROPHONE_FIELD_DIMENSION, data, &reply);
status_t status = remote()->transact(SET_PREFERRED_MICROPHONE_FIELD_DIMENSION, data, &reply);
return status == NO_ERROR ? (status_t)reply.readInt32() : status;
}
@ -709,20 +709,20 @@ status_t BnMediaRecorder::onTransact(
}
return NO_ERROR;
}
case SET_MICROPHONE_DIRECTION: {
ALOGV("SET_MICROPHONE_DIRECTION");
case SET_PREFERRED_MICROPHONE_DIRECTION: {
ALOGV("SET_PREFERRED_MICROPHONE_DIRECTION");
CHECK_INTERFACE(IMediaRecorder, data, reply);
int direction = data.readInt32();
status_t status =
setMicrophoneDirection(static_cast<audio_microphone_direction_t>(direction));
status_t status = setPreferredMicrophoneDirection(
static_cast<audio_microphone_direction_t>(direction));
reply->writeInt32(status);
return NO_ERROR;
}
case SET_MICROPHONE_FIELD_DIMENSION: {
case SET_PREFERRED_MICROPHONE_FIELD_DIMENSION: {
ALOGV("SET_MICROPHONE_FIELD_DIMENSION");
CHECK_INTERFACE(IMediaRecorder, data, reply);
float zoom = data.readFloat();
status_t status = setMicrophoneFieldDimension(zoom);
status_t status = setPreferredMicrophoneFieldDimension(zoom);
reply->writeInt32(status);
return NO_ERROR;
}

@ -65,6 +65,7 @@ static const char *AMediaFormatKeyGroupInt32[] = {
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL,
AMEDIAFORMAT_KEY_GRID_COLUMNS,
AMEDIAFORMAT_KEY_GRID_ROWS,
AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT,
AMEDIAFORMAT_KEY_HEIGHT,
AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD,
AMEDIAFORMAT_KEY_IS_ADTS,

@ -73,8 +73,8 @@ public:
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) = 0;
};

@ -72,8 +72,8 @@ struct MediaRecorderBase {
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) const = 0;

@ -264,8 +264,8 @@ public:
status_t getRoutedDeviceId(audio_port_handle_t *deviceId);
status_t enableAudioDeviceCallback(bool enabled);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
status_t setMicrophoneDirection(audio_microphone_direction_t direction);
status_t setMicrophoneFieldDimension(float zoom);
status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const;

@ -842,14 +842,14 @@ status_t MediaRecorder::getActiveMicrophones(std::vector<media::MicrophoneInfo>*
return mMediaRecorder->getActiveMicrophones(activeMicrophones);
}
status_t MediaRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setMicrophoneDirection(%d)", direction);
return mMediaRecorder->setMicrophoneDirection(direction);
status_t MediaRecorder::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setPreferredMicrophoneDirection(%d)", direction);
return mMediaRecorder->setPreferredMicrophoneDirection(direction);
}
status_t MediaRecorder::setMicrophoneFieldDimension(float zoom) {
ALOGV("setMicrophoneFieldDimension(%f)", zoom);
return mMediaRecorder->setMicrophoneFieldDimension(zoom);
status_t MediaRecorder::setPreferredMicrophoneFieldDimension(float zoom) {
ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
return mMediaRecorder->setPreferredMicrophoneFieldDimension(zoom);
}
status_t MediaRecorder::getPortId(audio_port_handle_t *portId) const

@ -45,10 +45,17 @@ package media.profiles {
ctor public CamcorderProfiles();
method public int getCameraId();
method public java.util.List<media.profiles.EncoderProfile> getEncoderProfile();
method public java.util.List<media.profiles.CamcorderProfiles.ImageDecoding> getImageDecoding();
method public java.util.List<media.profiles.CamcorderProfiles.ImageEncoding> getImageEncoding();
method public void setCameraId(int);
}
public static class CamcorderProfiles.ImageDecoding {
ctor public CamcorderProfiles.ImageDecoding();
method public int getMemCap();
method public void setMemCap(int);
}
public static class CamcorderProfiles.ImageEncoding {
ctor public CamcorderProfiles.ImageEncoding();
method public int getQuality();

@ -42,6 +42,11 @@
<xs:attribute name="quality" type="xs:int"/>
</xs:complexType>
</xs:element>
<xs:element name="ImageDecoding" minOccurs="0" maxOccurs="unbounded">
<xs:complexType>
<xs:attribute name="memCap" type="xs:int"/>
</xs:complexType>
</xs:element>
</xs:sequence>
<xs:attribute name="cameraId" type="xs:int"/>
</xs:complexType>

@ -538,18 +538,19 @@ status_t MediaRecorderClient::getActiveMicrophones(
return NO_INIT;
}
status_t MediaRecorderClient::setMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setMicrophoneDirection(%d)", direction);
status_t MediaRecorderClient::setPreferredMicrophoneDirection(
audio_microphone_direction_t direction) {
ALOGV("setPreferredMicrophoneDirection(%d)", direction);
if (mRecorder != NULL) {
return mRecorder->setMicrophoneDirection(direction);
return mRecorder->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
status_t MediaRecorderClient::setMicrophoneFieldDimension(float zoom) {
ALOGV("setMicrophoneFieldDimension(%f)", zoom);
status_t MediaRecorderClient::setPreferredMicrophoneFieldDimension(float zoom) {
ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
if (mRecorder != NULL) {
return mRecorder->setMicrophoneFieldDimension(zoom);
return mRecorder->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}

@ -109,8 +109,8 @@ public:
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones);
virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
virtual status_t setMicrophoneFieldDimension(float zoom);
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) override;
private:

@ -164,9 +164,12 @@ void StagefrightRecorder::updateMetrics() {
mAnalyticsItem->setInt32(kRecorderVideoIframeInterval, mIFramesIntervalSec);
// TBD mAudioSourceNode = 0;
// TBD mUse64BitFileOffset = false;
mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
if (mMovieTimeScale != -1)
mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
if (mAudioTimeScale != -1)
mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
if (mVideoTimeScale != -1)
mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
// TBD mCameraId = 0;
// TBD mStartTimeOffsetMs = -1;
mAnalyticsItem->setInt32(kRecorderVideoProfile, mVideoEncoderProfile);
@ -2210,7 +2213,7 @@ status_t StagefrightRecorder::getMaxAmplitude(int *max) {
}
status_t StagefrightRecorder::getMetrics(Parcel *reply) {
ALOGD("StagefrightRecorder::getMetrics");
ALOGV("StagefrightRecorder::getMetrics");
if (reply == NULL) {
ALOGE("Null pointer argument");
@ -2274,20 +2277,20 @@ status_t StagefrightRecorder::getActiveMicrophones(
return NO_INIT;
}
status_t StagefrightRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setMicrophoneDirection(%d)", direction);
status_t StagefrightRecorder::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setPreferredMicrophoneDirection(%d)", direction);
mSelectedMicDirection = direction;
if (mAudioSourceNode != 0) {
return mAudioSourceNode->setMicrophoneDirection(direction);
return mAudioSourceNode->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
status_t StagefrightRecorder::setMicrophoneFieldDimension(float zoom) {
ALOGV("setMicrophoneFieldDimension(%f)", zoom);
status_t StagefrightRecorder::setPreferredMicrophoneFieldDimension(float zoom) {
ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
mSelectedMicFieldDimension = zoom;
if (mAudioSourceNode != 0) {
return mAudioSourceNode->setMicrophoneFieldDimension(zoom);
return mAudioSourceNode->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}

@ -77,8 +77,8 @@ struct StagefrightRecorder : public MediaRecorderBase {
virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
virtual status_t setMicrophoneFieldDimension(float zoom);
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const override;
private:

@ -510,18 +510,18 @@ status_t AudioSource::getActiveMicrophones(
return NO_INIT;
}
status_t AudioSource::setMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setMicrophoneDirection(%d)", direction);
status_t AudioSource::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
ALOGV("setPreferredMicrophoneDirection(%d)", direction);
if (mRecord != 0) {
return mRecord->setMicrophoneDirection(direction);
return mRecord->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
status_t AudioSource::setMicrophoneFieldDimension(float zoom) {
ALOGV("setMicrophoneFieldDimension(%f)", zoom);
status_t AudioSource::setPreferredMicrophoneFieldDimension(float zoom) {
ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
if (mRecord != 0) {
return mRecord->setMicrophoneFieldDimension(zoom);
return mRecord->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}

@ -309,7 +309,6 @@ static void extractAlbumArt(
void parseVorbisComment(
AMediaFormat *fileMeta, const char *comment, size_t commentLength) {
// Haptic tag is only kept here as it will only be used in extractor to generate channel mask.
const char* const haptic = "haptic";
struct {
const char *const mTag;
const char *mKey;
@ -330,7 +329,7 @@ void parseVorbisComment(
{ "LYRICIST", AMEDIAFORMAT_KEY_LYRICIST },
{ "METADATA_BLOCK_PICTURE", AMEDIAFORMAT_KEY_ALBUMART },
{ "ANDROID_LOOP", AMEDIAFORMAT_KEY_LOOP },
{ "ANDROID_HAPTIC", haptic },
{ "ANDROID_HAPTIC", AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT },
};
for (size_t j = 0; j < sizeof(kMap) / sizeof(kMap[0]); ++j) {
@ -346,12 +345,12 @@ void parseVorbisComment(
if (!strcasecmp(&comment[tagLen + 1], "true")) {
AMediaFormat_setInt32(fileMeta, AMEDIAFORMAT_KEY_LOOP, 1);
}
} else if (kMap[j].mKey == haptic) {
} else if (kMap[j].mKey == AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT) {
char *end;
errno = 0;
const int hapticChannelCount = strtol(&comment[tagLen + 1], &end, 10);
if (errno == 0) {
AMediaFormat_setInt32(fileMeta, haptic, hapticChannelCount);
AMediaFormat_setInt32(fileMeta, kMap[j].mKey, hapticChannelCount);
} else {
ALOGE("Error(%d) when parsing haptic channel count", errno);
}

@ -18,6 +18,7 @@
#define LOG_TAG "RemoteMediaExtractor"
#include <utils/Log.h>
#include <binder/IPCThreadState.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/MediaAnalyticsItem.h>
#include <media/MediaSource.h>
@ -51,6 +52,11 @@ RemoteMediaExtractor::RemoteMediaExtractor(
if (MEDIA_LOG) {
mAnalyticsItem = MediaAnalyticsItem::create(kKeyExtractor);
// we're in the extractor service, we want to attribute to the app
// that invoked us.
int uid = IPCThreadState::self()->getCallingUid();
mAnalyticsItem->setUid(uid);
// track the container format (mpeg, aac, wvm, etc)
size_t ntracks = extractor->countTracks();
mAnalyticsItem->setCString(kExtractorFormat, extractor->name());

@ -967,6 +967,11 @@ status_t convertMetaDataToMessage(
if (meta->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
msg->setInt32("pcm-encoding", pcmEncoding);
}
int32_t hapticChannelCount;
if (meta->findInt32(kKeyHapticChannelCount, &hapticChannelCount)) {
msg->setInt32("haptic-channel-count", hapticChannelCount);
}
}
int32_t maxInputSize;
@ -1708,6 +1713,11 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
meta->setInt32(kKeyPcmEncoding, pcmEncoding);
}
int32_t hapticChannelCount;
if (msg->findInt32("haptic-channel-count", &hapticChannelCount)) {
meta->setInt32(kKeyHapticChannelCount, hapticChannelCount);
}
}
int32_t maxInputSize;

@ -173,7 +173,7 @@ Word16 lsp_init(lspState **st)
*st = NULL;
/* allocate memory */
if ((s = (lspState *) malloc(sizeof(lspState))) == NULL)
if ((s = (lspState *) calloc(sizeof(lspState), 1)) == NULL)
{
/* fprintf(stderr, "lsp_init: can not malloc state structure\n"); */
return -1;
@ -182,11 +182,13 @@ Word16 lsp_init(lspState **st)
/* Initialize quantization state */
if (0 != Q_plsf_init(&s->qSt))
{
lsp_exit(&s);
return -1;
}
if (0 != lsp_reset(s))
{
lsp_exit(&s);
return -1;
}

@ -268,13 +268,7 @@ Word16 GSMInitDecode(void **state_data,
if (Decoder_amr_init(&s->decoder_amrState)
|| Post_Process_reset(&s->postHP_state))
{
Speech_Decode_FrameState *tmp = s;
/*
* dereferencing type-punned pointer avoid
* breaking strict-aliasing rules
*/
void** tempVoid = (void**) tmp;
GSMDecodeFrameExit(tempVoid);
free(s);
return (-1);
}

@ -2125,7 +2125,10 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits(
size_t offset = 0;
while (offset < buffer->size()) {
const uint8_t *adtsHeader = buffer->data() + offset;
CHECK_LT(offset + 5, buffer->size());
if (buffer->size() <= offset+5) {
ALOGV("buffer does not contain a complete header");
return ERROR_MALFORMED;
}
// non-const pointer for decryption if needed
uint8_t *adtsFrame = buffer->data() + offset;

@ -70,8 +70,8 @@ struct AudioSource : public MediaSource, public MediaBufferObserver {
status_t removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
status_t setMicrophoneDirection(audio_microphone_direction_t direction);
status_t setMicrophoneFieldDimension(float zoom);
status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const;

@ -62,8 +62,6 @@ enum {
kKeyAV1C = 'av1c', // raw data
kKeyThumbnailHVCC = 'thvc', // raw data
kKeyD263 = 'd263', // raw data
kKeyVorbisInfo = 'vinf', // raw data
kKeyVorbisBooks = 'vboo', // raw data
kKeyOpusHeader = 'ohdr', // raw data
kKeyOpusCodecDelay = 'ocod', // uint64_t (codec delay in ns)
kKeyOpusSeekPreRoll = 'ospr', // uint64_t (seek preroll in ns)
@ -238,6 +236,8 @@ enum {
kKeyOpaqueCSD0 = 'csd0',
kKeyOpaqueCSD1 = 'csd1',
kKeyOpaqueCSD2 = 'csd2',
kKeyHapticChannelCount = 'hapC',
};
enum {

@ -149,6 +149,11 @@ void HlsSampleDecryptor::processAAC(size_t adtsHdrSize, uint8_t *data, size_t si
}
// ADTS header is included in the size
if (size < adtsHdrSize) {
ALOGV("processAAC: size (%zu) < adtsHdrSize (%zu)", size, adtsHdrSize);
android_errorWriteLog(0x534e4554, "128433933");
return;
}
size_t offset = adtsHdrSize;
size_t remainingBytes = size - adtsHdrSize;

@ -177,8 +177,8 @@ sp<WebmElement> WebmWriter::audioTrack(const sp<MetaData>& md) {
const void *headerData3;
size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
if (!md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1)
|| !md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3)) {
if (!md->findData(kKeyOpaqueCSD0, &type, &headerData1, &headerSize1)
|| !md->findData(kKeyOpaqueCSD1, &type, &headerData3, &headerSize3)) {
ALOGE("Missing header format keys for vorbis track");
md->dumpToLog();
return NULL;

@ -1,6 +1,12 @@
// Signature format: 2.0
package media.codecs {
public class Alias {
ctor public Alias();
method public String getName();
method public void setName(String);
}
public class Decoders {
ctor public Decoders();
method public java.util.List<media.codecs.MediaCodec> getMediaCodec();
@ -23,6 +29,23 @@ package media.codecs {
method public void setValue(String);
}
public class Include {
ctor public Include();
method public String getHref();
method public void setHref(String);
}
public class Included {
ctor public Included();
method public media.codecs.Decoders getDecoders_optional();
method public media.codecs.Encoders getEncoders_optional();
method public java.util.List<media.codecs.Include> getInclude_optional();
method public media.codecs.Settings getSettings_optional();
method public void setDecoders_optional(media.codecs.Decoders);
method public void setEncoders_optional(media.codecs.Encoders);
method public void setSettings_optional(media.codecs.Settings);
}
public class Limit {
ctor public Limit();
method public String getIn();
@ -47,12 +70,13 @@ package media.codecs {
public class MediaCodec {
ctor public MediaCodec();
method public java.util.List<media.codecs.Feature> getFeature();
method public java.util.List<media.codecs.Limit> getLimit();
method public java.util.List<media.codecs.Alias> getAlias_optional();
method public java.util.List<media.codecs.Feature> getFeature_optional();
method public java.util.List<media.codecs.Limit> getLimit_optional();
method public String getName();
method public java.util.List<media.codecs.Quirk> getQuirk();
method public java.util.List<media.codecs.Type> getType();
method public java.util.List<media.codecs.Quirk> getQuirk_optional();
method public String getType();
method public java.util.List<media.codecs.Type> getType_optional();
method public String getUpdate();
method public void setName(String);
method public void setType(String);
@ -61,9 +85,13 @@ package media.codecs {
public class MediaCodecs {
ctor public MediaCodecs();
method public java.util.List<media.codecs.Decoders> getDecoders();
method public java.util.List<media.codecs.Encoders> getEncoders();
method public java.util.List<media.codecs.Settings> getSettings();
method public media.codecs.Decoders getDecoders_optional();
method public media.codecs.Encoders getEncoders_optional();
method public java.util.List<media.codecs.Include> getInclude_optional();
method public media.codecs.Settings getSettings_optional();
method public void setDecoders_optional(media.codecs.Decoders);
method public void setEncoders_optional(media.codecs.Encoders);
method public void setSettings_optional(media.codecs.Settings);
}
public class Quirk {
@ -89,6 +117,7 @@ package media.codecs {
public class Type {
ctor public Type();
method public java.util.List<media.codecs.Alias> getAlias();
method public java.util.List<media.codecs.Feature> getFeature();
method public java.util.List<media.codecs.Limit> getLimit();
method public String getName();
@ -99,7 +128,8 @@ package media.codecs {
public class XmlParser {
ctor public XmlParser();
method public static media.codecs.MediaCodecs read(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
method public static media.codecs.Included readIncluded(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
method public static media.codecs.MediaCodecs readMediaCodecs(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
method public static String readText(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
method public static void skip(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
}

@ -20,11 +20,22 @@
xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="MediaCodecs">
<xs:complexType>
<xs:sequence>
<xs:element name="Decoders" type="Decoders" maxOccurs="unbounded"/>
<xs:element name="Encoders" type="Encoders" maxOccurs="unbounded"/>
<xs:element name="Settings" type="Settings" maxOccurs="unbounded"/>
</xs:sequence>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element name="Include" type="Include" maxOccurs="unbounded"/>
<xs:element name="Settings" type="Settings"/>
<xs:element name="Decoders" type="Decoders"/>
<xs:element name="Encoders" type="Encoders"/>
</xs:choice>
</xs:complexType>
</xs:element>
<xs:element name="Included">
<xs:complexType>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element name="Include" type="Include" maxOccurs="unbounded"/>
<xs:element name="Settings" type="Settings"/>
<xs:element name="Decoders" type="Decoders"/>
<xs:element name="Encoders" type="Encoders"/>
</xs:choice>
</xs:complexType>
</xs:element>
<xs:complexType name="Decoders">
@ -43,12 +54,13 @@
</xs:sequence>
</xs:complexType>
<xs:complexType name="MediaCodec">
<xs:sequence>
<xs:element name="Quirk" type="Quirk" maxOccurs="unbounded"/>
<xs:element name="Type" type="Type" maxOccurs="unbounded"/>
<xs:element name="Limit" type="Limit" maxOccurs="unbounded"/>
<xs:element name="Feature" type="Feature" maxOccurs="unbounded"/>
</xs:sequence>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element name="Quirk" type="Quirk" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Type" type="Type" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Alias" type="Alias" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
</xs:choice>
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="type" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
@ -58,12 +70,16 @@
</xs:complexType>
<xs:complexType name="Type">
<xs:sequence>
<xs:element name="Limit" type="Limit" maxOccurs="unbounded"/>
<xs:element name="Feature" type="Feature" maxOccurs="unbounded"/>
<xs:element name="Alias" type="Alias" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
</xs:complexType>
<xs:complexType name="Alias">
<xs:attribute name="name" type="xs:string"/>
</xs:complexType>
<xs:complexType name="Limit">
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="default" type="xs:string"/>
@ -86,4 +102,7 @@
<xs:attribute name="value" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
</xs:complexType>
<xs:complexType name="Include">
<xs:attribute name="href" type="xs:string"/>
</xs:complexType>
</xs:schema>

@ -324,6 +324,7 @@ EXPORT const char* AMEDIAFORMAT_KEY_FRAME_RATE = "frame-rate";
EXPORT const char* AMEDIAFORMAT_KEY_GENRE = "genre";
EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLUMNS = "grid-cols";
EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
EXPORT const char* AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT = "haptic-channel-count";
EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
EXPORT const char* AMEDIAFORMAT_KEY_HDR10_PLUS_INFO = "hdr10-plus-info";
EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";

@ -209,6 +209,7 @@ extern const char* AMEDIAFORMAT_KEY_EXIF_OFFSET __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_EXIF_SIZE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_FRAME_COUNT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_GENRE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_ICC_PROFILE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_LOCATION __INTRODUCED_IN(29);

@ -89,6 +89,7 @@ LIBMEDIANDK {
AMEDIAFORMAT_KEY_GENRE; # var introduced=29
AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT; # var introduced=29
AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
AMEDIAFORMAT_KEY_ICC_PROFILE; # var introduced=29

@ -615,9 +615,9 @@ using effect_buffer_t = int16_t;
virtual binder::Status stop();
virtual binder::Status getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones);
virtual binder::Status setMicrophoneDirection(
virtual binder::Status setPreferredMicrophoneDirection(
int /*audio_microphone_direction_t*/ direction);
virtual binder::Status setMicrophoneFieldDimension(float zoom);
virtual binder::Status setPreferredMicrophoneFieldDimension(float zoom);
private:
const sp<RecordThread::RecordTrack> mRecordTrack;

@ -71,8 +71,8 @@ public:
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
status_t setMicrophoneDirection(audio_microphone_direction_t direction);
status_t setMicrophoneFieldDimension(float zoom);
status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
status_t setPreferredMicrophoneFieldDimension(float zoom);
static bool checkServerLatencySupported(
audio_format_t format, audio_input_flags_t flags) {

@ -7718,18 +7718,19 @@ status_t AudioFlinger::RecordThread::getActiveMicrophones(
return status;
}
status_t AudioFlinger::RecordThread::setMicrophoneDirection(audio_microphone_direction_t direction)
status_t AudioFlinger::RecordThread::setPreferredMicrophoneDirection(
audio_microphone_direction_t direction)
{
ALOGV("setMicrophoneDirection(%d)", direction);
ALOGV("setPreferredMicrophoneDirection(%d)", direction);
AutoMutex _l(mLock);
return mInput->stream->setMicrophoneDirection(direction);
return mInput->stream->setPreferredMicrophoneDirection(direction);
}
status_t AudioFlinger::RecordThread::setMicrophoneFieldDimension(float zoom)
status_t AudioFlinger::RecordThread::setPreferredMicrophoneFieldDimension(float zoom)
{
ALOGV("setMicrophoneFieldDimension(%f)", zoom);
ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
AutoMutex _l(mLock);
return mInput->stream->setMicrophoneFieldDimension(zoom);
return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
}
void AudioFlinger::RecordThread::updateMetadata_l()

@ -1607,8 +1607,8 @@ public:
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
status_t setMicrophoneDirection(audio_microphone_direction_t direction);
status_t setMicrophoneFieldDimension(float zoom);
status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
status_t setPreferredMicrophoneFieldDimension(float zoom);
void updateMetadata_l() override;

@ -1838,16 +1838,16 @@ binder::Status AudioFlinger::RecordHandle::getActiveMicrophones(
mRecordTrack->getActiveMicrophones(activeMicrophones));
}
binder::Status AudioFlinger::RecordHandle::setMicrophoneDirection(
binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
int /*audio_microphone_direction_t*/ direction) {
ALOGV("%s()", __func__);
return binder::Status::fromStatusT(mRecordTrack->setMicrophoneDirection(
return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
static_cast<audio_microphone_direction_t>(direction)));
}
binder::Status AudioFlinger::RecordHandle::setMicrophoneFieldDimension(float zoom) {
binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
ALOGV("%s()", __func__);
return binder::Status::fromStatusT(mRecordTrack->setMicrophoneFieldDimension(zoom));
return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
}
// ----------------------------------------------------------------------------
@ -2144,22 +2144,22 @@ status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
}
}
status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneDirection(
status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneDirection(
audio_microphone_direction_t direction) {
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
return recordThread->setMicrophoneDirection(direction);
return recordThread->setPreferredMicrophoneDirection(direction);
} else {
return BAD_VALUE;
}
}
status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneFieldDimension(float zoom) {
status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
return recordThread->setMicrophoneFieldDimension(zoom);
return recordThread->setPreferredMicrophoneFieldDimension(zoom);
} else {
return BAD_VALUE;
}

@ -4828,6 +4828,7 @@ void AudioPolicyManager::closeOutput(audio_io_handle_t output)
ALOGW("closeOutput() unknown output %d", output);
return;
}
const bool closingOutputWasActive = closingOutput->isActive();
mPolicyMixes.closeOutput(closingOutput);
// look for duplicated outputs connected to the output being removed.
@ -4867,6 +4868,9 @@ void AudioPolicyManager::closeOutput(audio_io_handle_t output)
mpClientInterface->onAudioPatchListUpdate();
}
if (closingOutputWasActive) {
closingOutput->stop();
}
closingOutput->close();
removeOutput(output);

@ -376,15 +376,17 @@ status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
return PERMISSION_DENIED;
}
bool canCaptureOutput = captureAudioOutputAllowed(pid, uid);
if ((attr->source == AUDIO_SOURCE_VOICE_UPLINK ||
attr->source == AUDIO_SOURCE_VOICE_DOWNLINK ||
attr->source == AUDIO_SOURCE_VOICE_CALL ||
attr->source == AUDIO_SOURCE_ECHO_REFERENCE) &&
!captureAudioOutputAllowed(pid, uid)) {
!canCaptureOutput) {
return PERMISSION_DENIED;
}
if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed(pid, uid)) {
bool canCaptureHotword = captureHotwordAllowed(pid, uid);
if ((attr->source == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
return BAD_VALUE;
}
@ -415,7 +417,7 @@ status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
// FIXME: use the same permission as for remote submix for now.
case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
if (!captureAudioOutputAllowed(pid, uid)) {
if (!canCaptureOutput) {
ALOGE("getInputForAttr() permission denied: capture not allowed");
status = PERMISSION_DENIED;
}
@ -442,7 +444,8 @@ status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
}
sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
*selectedDeviceId, opPackageName);
*selectedDeviceId, opPackageName,
canCaptureOutput, canCaptureHotword);
mAudioRecordClients.add(*portId, client);
}

@ -414,32 +414,35 @@ void AudioPolicyService::updateUidStates_l()
{
// Go over all active clients and allow capture (does not force silence) in the
// following cases:
// The client is the assistant
// Another client in the same UID has already been allowed to capture
// OR The client is the assistant
// AND an accessibility service is on TOP
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR uses VOICE_RECOGNITION AND is on TOP OR latest started
// OR uses HOTWORD
// AND there is no privacy sensitive active capture
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
// OR The client is an accessibility service
// AND is on TOP OR latest started
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR the source is one of: AUDIO_SOURCE_VOICE_DOWNLINK, AUDIO_SOURCE_VOICE_UPLINK,
// AUDIO_SOURCE_VOICE_CALL
// OR the client source is virtual (remote submix, call audio TX or RX...)
// OR Any other client
// AND The assistant is not on TOP
// AND is on TOP OR latest started
// AND there is no privacy sensitive active capture
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
//TODO: mamanage pre processing effects according to use case priority
sp<AudioRecordClient> topActive;
sp<AudioRecordClient> latestActive;
sp<AudioRecordClient> latestSensitiveActive;
nsecs_t topStartNs = 0;
nsecs_t latestStartNs = 0;
nsecs_t latestSensitiveStartNs = 0;
bool isA11yOnTop = mUidPolicy->isA11yOnTop();
bool isAssistantOnTop = false;
bool isSensitiveActive = false;
bool isInCall = mPhoneState == AUDIO_MODE_IN_CALL;
// if Sensor Privacy is enabled then all recordings should be silenced.
if (mSensorPrivacyPolicy->isSensorPrivacyEnabled()) {
@ -449,15 +452,18 @@ void AudioPolicyService::updateUidStates_l()
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
if (!current->active) continue;
if (isPrivacySensitiveSource(current->attributes.source)) {
if (current->startTimeNs > latestSensitiveStartNs) {
latestSensitiveActive = current;
latestSensitiveStartNs = current->startTimeNs;
}
isSensitiveActive = true;
if (!current->active) {
continue;
}
app_state_t appState = apmStatFromAmState(mUidPolicy->getUidState(current->uid));
// clients which app is in IDLE state are not eligible for top active or
// latest active
if (appState == APP_STATE_IDLE) {
continue;
}
if (mUidPolicy->getUidState(current->uid) == ActivityManager::PROCESS_STATE_TOP) {
if (appState == APP_STATE_TOP) {
if (current->startTimeNs > topStartNs) {
topActive = current;
topStartNs = current->startTimeNs;
@ -470,72 +476,105 @@ void AudioPolicyService::updateUidStates_l()
latestActive = current;
latestStartNs = current->startTimeNs;
}
if (isPrivacySensitiveSource(current->attributes.source)) {
if (current->startTimeNs > latestSensitiveStartNs) {
latestSensitiveActive = current;
latestSensitiveStartNs = current->startTimeNs;
}
isSensitiveActive = true;
}
}
if (topActive == nullptr && latestActive == nullptr) {
return;
// if no active client with UI on Top, consider latest active as top
if (topActive == nullptr) {
topActive = latestActive;
}
if (topActive != nullptr) {
latestActive = nullptr;
}
std::vector<uid_t> enabledUids;
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
if (!current->active) continue;
if (!current->active) {
continue;
}
// keep capture allowed if another client with the same UID has already
// been allowed to capture
if (std::find(enabledUids.begin(), enabledUids.end(), current->uid)
!= enabledUids.end()) {
continue;
}
audio_source_t source = current->attributes.source;
bool isOnTop = current == topActive;
bool isLatest = current == latestActive;
bool isLatestSensitive = current == latestSensitiveActive;
bool forceIdle = true;
bool isTopOrLatestActive = topActive == nullptr ? false : current->uid == topActive->uid;
bool isLatestSensitive = latestSensitiveActive == nullptr ?
false : current->uid == latestSensitiveActive->uid;
// By default allow capture if:
// The assistant is not on TOP
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
bool allowCapture = !isAssistantOnTop
&& !(isSensitiveActive && !(isLatestSensitive || current->canCaptureOutput))
&& !(isInCall && !current->canCaptureOutput);
if (isVirtualSource(source)) {
forceIdle = false;
// Allow capture for virtual (remote submix, call audio TX or RX...) sources
allowCapture = true;
} else if (mUidPolicy->isAssistantUid(current->uid)) {
// For assistant allow capture if:
// An accessibility service is on TOP
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR is on TOP OR latest started AND uses VOICE_RECOGNITION
// OR uses HOTWORD
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
if (isA11yOnTop) {
if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
forceIdle = false;
allowCapture = true;
}
} else {
if ((((isOnTop || isLatest) && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
source == AUDIO_SOURCE_HOTWORD) && !isSensitiveActive) {
forceIdle = false;
if (((isTopOrLatestActive && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
source == AUDIO_SOURCE_HOTWORD) &&
(!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
allowCapture = true;
}
}
} else if (mUidPolicy->isA11yUid(current->uid)) {
if ((isOnTop || isLatest) &&
(source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
forceIdle = false;
}
} else {
if (!isAssistantOnTop && (isOnTop || isLatest) &&
(!isSensitiveActive || isLatestSensitive)) {
forceIdle = false;
// For accessibility service allow capture if:
// Is on TOP OR latest started
// AND the source is VOICE_RECOGNITION or HOTWORD
if (isTopOrLatestActive &&
(source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
allowCapture = true;
}
}
setAppState_l(current->uid,
forceIdle ? APP_STATE_IDLE :
apmStatFromAmState(mUidPolicy->getUidState(current->uid)));
allowCapture ? apmStatFromAmState(mUidPolicy->getUidState(current->uid)) :
APP_STATE_IDLE);
if (allowCapture) {
enabledUids.push_back(current->uid);
}
}
}
void AudioPolicyService::silenceAllRecordings_l() {
for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
setAppState_l(current->uid, APP_STATE_IDLE);
if (!isVirtualSource(current->attributes.source)) {
setAppState_l(current->uid, APP_STATE_IDLE);
}
}
}
/* static */
app_state_t AudioPolicyService::apmStatFromAmState(int amState) {
switch (amState) {
case ActivityManager::PROCESS_STATE_UNKNOWN:
if (amState == ActivityManager::PROCESS_STATE_UNKNOWN) {
return APP_STATE_IDLE;
case ActivityManager::PROCESS_STATE_TOP:
return APP_STATE_TOP;
default:
break;
} else if (amState <= ActivityManager::PROCESS_STATE_TOP) {
// include persistent services
return APP_STATE_TOP;
}
return APP_STATE_FOREGROUND;
}

@ -753,13 +753,17 @@ private:
AudioRecordClient(const audio_attributes_t attributes,
const audio_io_handle_t io, uid_t uid, pid_t pid,
const audio_session_t session, const audio_port_handle_t deviceId,
const String16& opPackageName) :
const String16& opPackageName,
bool canCaptureOutput, bool canCaptureHotword) :
AudioClient(attributes, io, uid, pid, session, deviceId),
opPackageName(opPackageName), startTimeNs(0) {}
opPackageName(opPackageName), startTimeNs(0),
canCaptureOutput(canCaptureOutput), canCaptureHotword(canCaptureHotword) {}
~AudioRecordClient() override = default;
const String16 opPackageName; // client package name
nsecs_t startTimeNs;
const bool canCaptureOutput;
const bool canCaptureHotword;
};
// --- AudioPlaybackClient ---

@ -321,7 +321,7 @@ status_t Camera3Stream::finishConfiguration() {
// so. As documented in hardware/camera3.h:configure_streams().
if (mState == STATE_IN_RECONFIG &&
mOldUsage == mUsage &&
mOldMaxBuffers == camera3_stream::max_buffers) {
mOldMaxBuffers == camera3_stream::max_buffers && !mDataSpaceOverridden) {
mState = STATE_CONFIGURED;
return OK;
}

@ -97,6 +97,21 @@ hardware::camera2::params::OutputConfiguration convertFromHidl(
return outputConfiguration;
}
hardware::camera2::params::SessionConfiguration convertFromHidl(
const HSessionConfiguration &hSessionConfiguration) {
hardware::camera2::params::SessionConfiguration sessionConfig(
hSessionConfiguration.inputWidth, hSessionConfiguration.inputHeight,
hSessionConfiguration.inputFormat,
static_cast<int>(hSessionConfiguration.operationMode));
for (const auto& hConfig : hSessionConfiguration.outputStreams) {
hardware::camera2::params::OutputConfiguration config = convertFromHidl(hConfig);
sessionConfig.addOutputConfiguration(config);
}
return sessionConfig;
}
// The camera metadata here is cloned. Since we're reading metadata over
// hwbinder we would need to clone it in order to avoid aligment issues.
bool convertFromHidl(const HCameraMetadata &src, CameraMetadata *dst) {

@ -53,6 +53,7 @@ using HGraphicBufferProducer = hardware::graphics::bufferqueue::V1_0::IGraphicBu
using HOutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
using HPhysicalCameraSettings = frameworks::cameraservice::device::V2_0::PhysicalCameraSettings;
using HPhysicalCaptureResultInfo = frameworks::cameraservice::device::V2_0::PhysicalCaptureResultInfo;
using HSessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using HSubmitInfo = frameworks::cameraservice::device::V2_0::SubmitInfo;
using HStatus = frameworks::cameraservice::common::V2_0::Status;
using HStreamConfigurationMode = frameworks::cameraservice::device::V2_0::StreamConfigurationMode;
@ -70,6 +71,9 @@ bool convertFromHidl(const HCameraMetadata &src, CameraMetadata *dst);
hardware::camera2::params::OutputConfiguration convertFromHidl(
const HOutputConfiguration &hOutputConfiguration);
hardware::camera2::params::SessionConfiguration convertFromHidl(
const HSessionConfiguration &hSessionConfiguration);
HCameraDeviceStatus convertToHidlCameraDeviceStatus(int32_t status);
void convertToHidl(const std::vector<hardware::CameraStatus> &src,

@ -41,6 +41,7 @@ using hardware::Return;
using hardware::Void;
using HSubmitInfo = device::V2_0::SubmitInfo;
using hardware::camera2::params::OutputConfiguration;
using hardware::camera2::params::SessionConfiguration;
static constexpr int32_t CAMERA_REQUEST_METADATA_QUEUE_SIZE = 1 << 20 /* 1 MB */;
static constexpr int32_t CAMERA_RESULT_METADATA_QUEUE_SIZE = 1 << 20 /* 1 MB */;
@ -255,6 +256,18 @@ Return<HStatus> HidlCameraDeviceUser::updateOutputConfiguration(
return B2HStatus(ret);
}
Return<void> HidlCameraDeviceUser::isSessionConfigurationSupported(
const HSessionConfiguration& hSessionConfiguration,
isSessionConfigurationSupported_cb _hidl_cb) {
bool supported = false;
SessionConfiguration sessionConfiguration = convertFromHidl(hSessionConfiguration);
binder::Status ret = mDeviceRemote->isSessionConfigurationSupported(
sessionConfiguration, &supported);
HStatus status = B2HStatus(ret);
_hidl_cb(status, supported);
return Void();
}
} // implementation
} // V2_0
} // device

@ -53,6 +53,7 @@ using TemplateId = frameworks::cameraservice::device::V2_0::TemplateId;
using HCameraDeviceUser = device::V2_0::ICameraDeviceUser;
using HCameraMetadata = cameraservice::service::V2_0::CameraMetadata;
using HCaptureRequest = device::V2_0::CaptureRequest;
using HSessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using HOutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
using HPhysicalCameraSettings = frameworks::cameraservice::device::V2_0::PhysicalCameraSettings;
using HStatus = frameworks::cameraservice::common::V2_0::Status;
@ -97,6 +98,10 @@ struct HidlCameraDeviceUser final : public HCameraDeviceUser {
virtual Return<HStatus> updateOutputConfiguration(
int32_t streamId, const HOutputConfiguration& outputConfiguration) override;
virtual Return<void> isSessionConfigurationSupported(
const HSessionConfiguration& sessionConfiguration,
isSessionConfigurationSupported_cb _hidl_cb) override;
bool initStatus() { return mInitSuccess; }
std::shared_ptr<CaptureResultMetadataQueue> getCaptureResultMetadataQueue() {

Loading…
Cancel
Save