diff --git a/Android.mk b/Android.mk index 8acc35a..f0dafd0 100644 --- a/Android.mk +++ b/Android.mk @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -ifeq ($(TARGET_USE_CAMERA_VHAL), true) +#ifeq ($(TARGET_USE_CAMERA_VHAL), true) LOCAL_PATH := $(call my-dir) include $(CLEAR_VARS) +ifneq ($(TARGET_BOARD_PLATFORM), celadon) ####### Build FFmpeg modules from prebuilt libs ######### FFMPEG_PREBUILD := prebuilts/ffmpeg-4.2.2/android-x86_64 @@ -24,6 +25,7 @@ FFMPEG_LIB_PATH := ${FFMPEG_PREBUILD}/lib include $(CLEAR_VARS) LOCAL_MODULE := libavcodec +LOCAL_CHECK_ELF_FILES := false LOCAL_MULTILIB := 64 LOCAL_SRC_FILES := $(FFMPEG_LIB_PATH)/$(LOCAL_MODULE).so LOCAL_PROPRIETARY_MODULE := true @@ -33,6 +35,7 @@ include $(BUILD_PREBUILT) include $(CLEAR_VARS) LOCAL_MODULE := libswresample +LOCAL_CHECK_ELF_FILES := false LOCAL_MULTILIB := 64 LOCAL_SRC_FILES := $(FFMPEG_LIB_PATH)/$(LOCAL_MODULE).so LOCAL_PROPRIETARY_MODULE := true @@ -42,6 +45,7 @@ include $(BUILD_PREBUILT) include $(CLEAR_VARS) LOCAL_MODULE := libavutil +LOCAL_CHECK_ELF_FILES := false LOCAL_MULTILIB := 64 LOCAL_SRC_FILES := $(FFMPEG_LIB_PATH)/$(LOCAL_MODULE).so LOCAL_PROPRIETARY_MODULE := true @@ -51,6 +55,7 @@ include $(BUILD_PREBUILT) include $(CLEAR_VARS) LOCAL_MODULE := libavdevice +LOCAL_CHECK_ELF_FILES := false LOCAL_MULTILIB := 64 LOCAL_SRC_FILES := $(FFMPEG_LIB_PATH)/$(LOCAL_MODULE).so LOCAL_PROPRIETARY_MODULE := true @@ -60,6 +65,7 @@ include $(BUILD_PREBUILT) include $(CLEAR_VARS) LOCAL_MODULE := libavfilter +LOCAL_CHECK_ELF_FILES := false LOCAL_MULTILIB := 64 LOCAL_SRC_FILES := $(FFMPEG_LIB_PATH)/$(LOCAL_MODULE).so LOCAL_PROPRIETARY_MODULE := true @@ -69,6 +75,7 @@ include $(BUILD_PREBUILT) include $(CLEAR_VARS) LOCAL_MODULE := libavformat +LOCAL_CHECK_ELF_FILES := false LOCAL_MULTILIB := 64 LOCAL_SRC_FILES := $(FFMPEG_LIB_PATH)/$(LOCAL_MODULE).so LOCAL_PROPRIETARY_MODULE := true @@ -78,6 +85,7 @@ include $(BUILD_PREBUILT) include $(CLEAR_VARS) LOCAL_MODULE := libswscale +LOCAL_CHECK_ELF_FILES := false LOCAL_MULTILIB := 64 LOCAL_SRC_FILES := $(FFMPEG_LIB_PATH)/$(LOCAL_MODULE).so LOCAL_PROPRIETARY_MODULE := true @@ -85,12 +93,18 @@ LOCAL_MODULE_SUFFIX := .so LOCAL_MODULE_CLASS := SHARED_LIBRARIES include $(BUILD_PREBUILT) ########################################################## +endif include $(CLEAR_VARS) ##################### Build camera-vhal ####################### +ifeq ($(TARGET_BOARD_PLATFORM), celadon) +LOCAL_MODULE := camera.$(TARGET_BOARD_PLATFORM) +else LOCAL_MODULE := camera.$(TARGET_PRODUCT) +endif + LOCAL_MULTILIB := 64 LOCAL_VENDOR_MODULE := true @@ -108,42 +122,40 @@ camera_vhal_src := \ src/Exif.cpp \ src/Thumbnail.cpp \ src/CameraSocketServerThread.cpp \ - src/CameraSocketCommand.cpp \ - src/CGCodec.cpp - + src/CameraSocketCommand.cpp +ifneq ($(TARGET_BOARD_PLATFORM), celadon) +camera_vhal_src += src/CGCodec.cpp +endif camera_vhal_c_includes := external/libjpeg-turbo \ external/libexif \ external/libyuv/files/include \ frameworks/native/include/media/hardware \ - device/generic/goldfish/include \ - device/generic/goldfish-opengl/system/OpenglSystemCommon \ hardware/libhardware/modules/gralloc \ $(LOCAL_PATH)/include \ $(LOCAL_PATH)/$(FFMPEG_PREBUILD)/include \ $(call include-path-for, camera) camera_vhal_shared_libraries := \ - libbinder \ libexif \ liblog \ libutils \ libcutils \ - libEGL \ - libGLESv1_CM \ - libGLESv2 \ libui \ libdl \ libjpeg \ libcamera_metadata \ libhardware \ - libsync \ - libavcodec \ + libsync + +ifneq ($(TARGET_BOARD_PLATFORM), celadon) +camera_vhal_shared_libraries += libavcodec \ libavdevice \ libavfilter \ libavformat \ libavutil \ libswresample \ libswscale +endif camera_vhal_static_libraries := \ android.hardware.camera.common@1.0-helper \ @@ -158,6 +170,12 @@ ifeq ($(BOARD_USES_GRALLOC1), true) camera_vhal_cflags += -DUSE_GRALLOC1 endif +ifeq ($(TARGET_BOARD_PLATFORM), celadon) +camera_vhal_cflags += -DGRALLOC_MAPPER4 +else +camera_vhal_cflags += -DENABLE_FFMPEG +endif + LOCAL_MODULE_RELATIVE_PATH := ${camera_vhal_module_relative_path} LOCAL_CFLAGS := ${camera_vhal_cflags} LOCAL_CPPFLAGS += -std=c++17 @@ -168,13 +186,6 @@ LOCAL_SRC_FILES := ${camera_vhal_src} LOCAL_SHARED_LIBRARIES := ${camera_vhal_shared_libraries} LOCAL_STATIC_LIBRARIES := ${camera_vhal_static_libraries} -LOCAL_EXPORT_C_INCLUDES := \ - $(LOCAL_PATH)/include \ - $(LOCAL_PATH)/$(FFMPEG_PREBUILD)/include - -# to support platfrom build system -LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_EXPORT_C_INCLUDES) - include $(BUILD_SHARED_LIBRARY) ##################################################### @@ -217,10 +228,14 @@ LOCAL_SHARED_LIBRARIES := ${jpeg_shared_libraries} LOCAL_C_INCLUDES += ${jpeg_c_includes} LOCAL_SRC_FILES := ${jpeg_src} +ifeq ($(TARGET_BOARD_PLATFORM), celadon) +LOCAL_MODULE := camera.$(TARGET_BOARD_PLATFORM).jpeg +else LOCAL_MODULE := camera.$(TARGET_PRODUCT).jpeg +endif include $(BUILD_SHARED_LIBRARY) ###################################################### -endif # TARGET_USE_CAMERA_VHAL +#endif # TARGET_USE_CAMERA_VHAL diff --git a/include/CameraSocketCommand.h b/include/CameraSocketCommand.h index 93d63ef..cd7e5b8 100644 --- a/include/CameraSocketCommand.h +++ b/include/CameraSocketCommand.h @@ -32,16 +32,17 @@ namespace android { namespace socket { -enum class VideoCodecType { kH264 = 0 }; -enum class FrameResolution { k480p = 0, k720p, k1080p }; +enum class VideoCodecType { kH264 = 1, kH265 = 2,kI420 = 4, kMJPEG = 8, kAll = 15 }; +enum class FrameResolution { k480p = 1, k720p = 2, k1080p = 4, kWXGA = 8, kAll = 15 }; -struct CameraFrameInfo { - VideoCodecType codec_type = VideoCodecType::kH264; - FrameResolution resolution = FrameResolution::k480p; - uint32_t reserved[4]; +enum class SensorOrientation { + ORIENTATION_0 = 0, + ORIENTATION_90 = 90, + ORIENTATION_180 = 180, + ORIENTATION_270 = 270 }; -enum class CameraOperation { kOpen = 11, kClose = 12, kNone = 13 }; +enum class CameraFacing { BACK_FACING = 0, FRONT_FACING = 1 }; enum class CameraSessionState { kNone, @@ -53,17 +54,73 @@ enum class CameraSessionState { extern const std::unordered_map kCameraSessionStateNames; -enum class CameraVHalVersion { - kV1 = 0, // decode out of camera vhal - kV2 = 1, // decode in camera vhal -}; +typedef enum _ack_value { + NACK_CONFIG = 0, + ACK_CONFIG = 1, +} camera_ack_t; -// has default values. -struct CameraConfig { - CameraVHalVersion version = CameraVHalVersion::kV2; - CameraOperation operation = CameraOperation::kNone; - CameraFrameInfo frame_info; -}; +typedef struct _camera_config { + uint32_t cameraId; + uint32_t codec_type; + uint32_t resolution; + char pkg_name[128]; + uint32_t reserved[5]; +} camera_config_t; + +typedef enum _camera_cmd { + CMD_OPEN = 11, + CMD_CLOSE = 12, +} camera_cmd_t; + +typedef enum _camera_version { + CAMERA_VHAL_VERSION_1 = 0, // decode out of camera vhal + CAMERA_VHAL_VERSION_2 = 1, // decode in camera vhal +} camera_version_t; + +typedef struct _camera_config_cmd { + camera_version_t version; + camera_cmd_t cmd; + camera_config_t config; +} camera_config_cmd_t; + +typedef struct _camera_info { + uint32_t cameraId; + uint32_t codec_type; + uint32_t resolution; + uint32_t sensorOrientation; + uint32_t facing; // '0' for back camera and '1' for front camera + uint32_t reserved[3]; +} camera_info_t; + +typedef struct _camera_capability { + uint32_t codec_type; // All supported codec_type + uint32_t resolution; // All supported resolution + uint32_t maxNumberOfCameras; // Max will be restricted to 2 + uint32_t reserved[5]; +} camera_capability_t; + +typedef enum _camera_packet_type { + REQUEST_CAPABILITY = 0, + CAPABILITY = 1, + CAMERA_CONFIG = 2, + CAMERA_DATA = 3, + ACK = 4, + CAMERA_INFO = 5, +} camera_packet_type_t; + +typedef struct _camera_header { + camera_packet_type_t type; + uint32_t size; // number of cameras * sizeof(camera_info_t) +} camera_header_t; + +typedef struct _camera_packet { + camera_header_t header; + uint8_t payload[0]; +} camera_packet_t; + +const char* camera_type_to_str(int type); +const char* codec_type_to_str(uint32_t type); +const char* resolution_to_str(uint32_t resolution); } // namespace socket } // namespace android diff --git a/include/CameraSocketServerThread.h b/include/CameraSocketServerThread.h index cf376d5..8c95de6 100644 --- a/include/CameraSocketServerThread.h +++ b/include/CameraSocketServerThread.h @@ -28,41 +28,78 @@ #include #include #include +#include +#include +#ifdef ENABLE_FFMPEG #include "CGCodec.h" +#endif #include "CameraSocketCommand.h" +#include +#include "VirtualBuffer.h" namespace android { +enum tranSock +{ + UNIX = 0, + TCP = 1, + VSOCK = 2, + PIPE = 3, +}; + class VirtualCameraFactory; class CameraSocketServerThread : public Thread { public: +#ifdef ENABLE_FFMPEG CameraSocketServerThread(std::string suffix, std::shared_ptr decoder, std::atomic &state); +#else + CameraSocketServerThread(std::string suffix, + std::atomic &state); +#endif ~CameraSocketServerThread(); virtual void requestExit(); virtual status_t requestExitAndWait(); int getClientFd(); - void clearBuffer(); - void clearBuffer(char *buffer, int width, int height); + ssize_t size_update = 0; + pthread_cond_t mSignalHotplug = PTHREAD_COND_INITIALIZER; + pthread_mutex_t mHotplugLock = PTHREAD_MUTEX_INITIALIZER; + + bool configureCapabilities(bool skipCapRead); + int UpdateCameraInfo(); private: virtual status_t readyToRun(); virtual bool threadLoop() override; + bool ProcessCameraDataFromPipe(ClientVideoBuffer *handle); + + void setCameraResolution(uint32_t resolution); + void setCameraMaxSupportedResolution(int32_t width, int32_t height); Mutex mMutex; bool mRunning; // guarding only when it's important int mSocketServerFd = -1; std::string mSocketPath; int mClientFd = -1; + int mNumOfCamerasRequested; // Number of cameras requested to support by client. +#ifdef ENABLE_FFMPEG std::shared_ptr mVideoDecoder; +#endif std::atomic &mCameraSessionState; // maximum size of a H264 packet in any aggregation packet is 65535 bytes. // Source: https://tools.ietf.org/html/rfc6184#page-13 std::array mSocketBuffer = {}; size_t mSocketBufferSize = 0; + + struct ValidateClientCapability { + bool validCodecType = false; + bool validResolution = false; + bool validOrientation = false; + bool validCameraFacing = false; + }; }; } // namespace android diff --git a/include/GrallocModule.h b/include/GrallocModule.h index ca9cab7..9a1ec1e 100644 --- a/include/GrallocModule.h +++ b/include/GrallocModule.h @@ -15,8 +15,10 @@ #ifdef USE_GRALLOC1 #include +#ifndef GRALLOC_MAPPER4 #include #endif +#endif class GrallocModule { public: @@ -110,8 +112,10 @@ class GrallocModule { int32_t fenceFd = -1; int error = m_gralloc1_unlock(m_gralloc1_device, handle, &fenceFd); if (!error) { +#ifndef GRALLOC_MAPPER4 sync_wait(fenceFd, -1); close(fenceFd); +#endif } return error; } @@ -125,7 +129,25 @@ class GrallocModule { } } } - +#ifdef GRALLOC_MAPPER4 + int importBuffer(buffer_handle_t handle, buffer_handle_t *outBuffer) { + switch (m_major_version) { + case 1: +#ifdef USE_GRALLOC1 + { + return m_gralloc1_importbuffer(m_gralloc1_device, handle, outBuffer); + } +#endif + default: { + ALOGE( + "[Gralloc] no gralloc module to import; unknown gralloc major " + "version (%d)", + m_major_version); + return -1; + } + } + } +#endif private: GrallocModule() { const hw_module_t *module = nullptr; @@ -152,6 +174,11 @@ class GrallocModule { m_gralloc1_getNumFlexPlanes = (GRALLOC1_PFN_GET_NUM_FLEX_PLANES)m_gralloc1_device->getFunction( m_gralloc1_device, GRALLOC1_FUNCTION_GET_NUM_FLEX_PLANES); +#ifdef GRALLOC_MAPPER4 + m_gralloc1_importbuffer = (GRALLOC1_PFN_IMPORT_BUFFER)m_gralloc1_device->getFunction( + m_gralloc1_device, GRALLOC1_FUNCTION_IMPORT_BUFFER); + +#endif break; #endif default: @@ -167,6 +194,9 @@ class GrallocModule { GRALLOC1_PFN_UNLOCK m_gralloc1_unlock = nullptr; GRALLOC1_PFN_LOCK_FLEX m_gralloc1_lockflex = nullptr; GRALLOC1_PFN_GET_NUM_FLEX_PLANES m_gralloc1_getNumFlexPlanes = nullptr; +#ifdef GRALLOC_MAPPER4 + GRALLOC1_PFN_IMPORT_BUFFER m_gralloc1_importbuffer=nullptr; +#endif #endif }; diff --git a/include/VirtualBaseCamera.h b/include/VirtualBaseCamera.h index 31d8f08..4314279 100644 --- a/include/VirtualBaseCamera.h +++ b/include/VirtualBaseCamera.h @@ -48,8 +48,7 @@ class VirtualBaseCamera { * Return: * NO_ERROR on success, or an appropriate error status on failure. */ - virtual status_t Initialize(const char *device_name, const char *frame_dims, - const char *facing_dir) = 0; + virtual status_t Initialize() = 0; /**************************************************************************** * Camera API implementation @@ -61,7 +60,7 @@ class VirtualBaseCamera { * NOTE: When this method is called the object is locked. * Note that failures in this method are reported as negative EXXX statuses. */ - virtual status_t connectCamera(hw_device_t **device) = 0; + virtual status_t openCamera(hw_device_t **device) = 0; /* Closes connection to the virtual camera. * This method is called in response to camera_device::close callback. @@ -78,6 +77,8 @@ class VirtualBaseCamera { */ virtual status_t getCameraInfo(struct camera_info *info) = 0; + virtual status_t setTorchMode(const char* camera_id, bool enable) =0; + /**************************************************************************** * Data members ***************************************************************************/ @@ -85,13 +86,13 @@ class VirtualBaseCamera { virtual status_t setCameraFD(int socketFd); virtual status_t cleanCameraFD(int socketFd); + int mCameraID; protected: /* Fixed camera information for camera2 devices. Must be valid to access if * mCameraDeviceVersion is >= HARDWARE_DEVICE_API_VERSION(2,0) */ camera_metadata_t *mCameraInfo = nullptr; /* Zero-based ID assigned to this camera. */ - int mCameraID; int mCameraSocketFD = -1; private: diff --git a/include/VirtualBuffer.h b/include/VirtualBuffer.h index b12148b..22ffb08 100644 --- a/include/VirtualBuffer.h +++ b/include/VirtualBuffer.h @@ -3,21 +3,61 @@ #include -#define MAX_CLIENT_BUF 8 +#define BPP_NV12 1.5 // 12 bpp + namespace android { extern bool gIsInFrameI420; extern bool gIsInFrameH264; +extern bool gIsInFrameMJPG; extern bool gUseVaapi; +// Max no of cameras supported based on client device request. +extern uint32_t gMaxNumOfCamerasSupported; + +// Max supported res width and height out of all cameras. +// Used for input buffer allocation. +extern int32_t gMaxSupportedWidth; +extern int32_t gMaxSupportedHeight; + +// Max supported res width and height of each camera. +// This would be vary for each camera based on its +// capability requested by client. And used for metadata updation +// during boot time. +extern int32_t gCameraMaxWidth; +extern int32_t gCameraMaxHeight; + +// Camera input res width and height during running +// condition. It would vary based on app's request. +extern int32_t gSrcWidth; +extern int32_t gSrcHeight; + +// Input Codec type info based on client device request. +extern uint32_t gCodecType; + +// Orientation info of the image sensor based on client device request. +extern uint32_t gCameraSensorOrientation; + +// Camera facing as either back or front based on client device request. +// True for back and false for front camera always. +extern bool gCameraFacingBack; + +// Indicate client capability info received successfully when it is true. +extern bool gCapabilityInfoReceived; + +// Status of metadata update, which helps to sync and update +// each metadata for each camera seperately. +extern bool gStartMetadataUpdate; +extern bool gDoneMetadataUpdate; + enum class VideoBufferType { kI420, kARGB, }; struct Resolution { - int width = 640; - int height = 480; + int width = gMaxSupportedWidth; + int height = gMaxSupportedHeight; }; /// Video buffer and its information struct VideoBuffer { @@ -29,12 +69,22 @@ struct VideoBuffer { VideoBufferType type; ~VideoBuffer() {} + // To reset allocated buffer. void reset() { std::fill(buffer, buffer + resolution.width * resolution.height, 0x10); uint8_t* uv_offset = buffer + resolution.width * resolution.height; std::fill(uv_offset, uv_offset + (resolution.width * resolution.height) / 2, 0x80); decoded = false; } + + // To clear used buffer based on current resolution. + void clearBuffer() { + std::fill(buffer, buffer + gSrcWidth * gSrcHeight, 0x10); + uint8_t* uv_offset = buffer + gSrcWidth * gSrcHeight; + std::fill(uv_offset, uv_offset + (gSrcWidth * gSrcHeight) / 2, 0x80); + decoded = false; + } + bool decoded = false; }; @@ -58,8 +108,8 @@ class ClientVideoBuffer { ClientVideoBuffer() { for (int i = 0; i < 1; i++) { - clientBuf[i].buffer = - new uint8_t[clientBuf[i].resolution.width * clientBuf[i].resolution.height * 3 / 2]; + clientBuf[i].buffer = new uint8_t[clientBuf[i].resolution.width * + clientBuf[i].resolution.height * BPP_NV12]; } clientRevCount = 0; clientUsedCount = 0; @@ -78,6 +128,14 @@ class ClientVideoBuffer { clientRevCount = clientUsedCount = 0; receivedFrameNo = decodedFrameNo = 0; } + + void clearBuffer() { + for (int i = 0; i < 1; i++) { + clientBuf[i].clearBuffer(); + } + clientRevCount = clientUsedCount = 0; + receivedFrameNo = decodedFrameNo = 0; + } }; extern std::mutex client_buf_mutex; }; // namespace android diff --git a/include/VirtualCamera3.h b/include/VirtualCamera3.h index 8cccf28..132abb4 100644 --- a/include/VirtualCamera3.h +++ b/include/VirtualCamera3.h @@ -85,19 +85,20 @@ class VirtualCamera3 : public camera3_device, public VirtualBaseCamera { ***************************************************************************/ public: - virtual status_t Initialize(const char *device_name, const char *frame_dims, - const char *facing_dir); + virtual status_t Initialize(); /**************************************************************************** * Camera module API and generic hardware device API implementation ***************************************************************************/ public: - virtual status_t connectCamera(hw_device_t **device); + virtual status_t openCamera(hw_device_t **device); virtual status_t closeCamera(); virtual status_t getCameraInfo(struct camera_info *info); + + virtual status_t setTorchMode(const char* camera_id, bool enable); /**************************************************************************** * Camera API implementation. diff --git a/include/VirtualCameraFactory.h b/include/VirtualCameraFactory.h index a5b388d..c896331 100644 --- a/include/VirtualCameraFactory.h +++ b/include/VirtualCameraFactory.h @@ -26,7 +26,11 @@ #include #include #include "CameraSocketServerThread.h" +#ifdef ENABLE_FFMPEG #include "CGCodec.h" +#endif + +#define MAX_NUMBER_OF_SUPPORTED_CAMERAS 2 // Max restricted to two, but can be extended. namespace android { @@ -90,6 +94,13 @@ class VirtualCameraFactory { * callback. */ int getCameraInfo(int camera_id, struct camera_info *info); +/* + * Gets virtual camera torch mode support. + * + * This method is called in response to camera_module_t::isSetTorchModeSupported + * callback. + */ + int setTorchMode(const char* camera_id, bool enable); /* * Sets virtual camera callbacks. @@ -122,6 +133,12 @@ class VirtualCameraFactory { */ static int get_camera_info(int camera_id, struct camera_info *info); + + /* + * camera_module_t::get_torch_support_info callback entry point. + */ + static int set_torch_mode(const char* camera_id, bool enable); + /* * camera_module_t::set_callbacks callback entry point. */ @@ -150,36 +167,27 @@ class VirtualCameraFactory { ***************************************************************************/ /* - * Gets fake camera orientation. - */ - int getFakeCameraOrientation() { - const char *key = "remote.camera.fake.orientation"; - int degree = property_get_int32(key, 90); - return degree; - } - - /* - * Gets number of virtual cameras. + * Gets number of virtual remote cameras. */ - int getVirtualCameraNum() const { return mVirtualCameraNum; } + int getVirtualCameraNum() const { return mNumOfCamerasSupported; } /* * Checks whether or not the constructor has succeeded. */ bool isConstructedOK() const { return mConstructedOK; } -private: + bool constructVirtualCamera(); + /**************************************************************************** * Private API ***************************************************************************/ /* - * Creates a fake camera and adds it to mVirtualCameras. If backCamera is - * true, it will be created as if it were a camera on the back of the phone. - * Otherwise, it will be front-facing. + * Creates a virtual remote camera and adds it to mVirtualCameras. */ - void createFakeCamera(std::shared_ptr socket_server, - std::shared_ptr decoder, bool backCamera); +void createVirtualRemoteCamera(std::shared_ptr socket_server, + int cameraId); +private: /* * Waits till remote-props has done setup, timeout after 500ms. */ @@ -206,11 +214,8 @@ class VirtualCameraFactory { // Array of cameras available for the emulation. VirtualBaseCamera **mVirtualCameras; - // Number of virtual cameras (including the fake ones). - int mVirtualCameraNum; - - // Number of virtual fake cameras. - int mFakeCameraNum; + // Number of cameras supported in the HAL based on client request. + int mNumOfCamerasSupported; // Flags whether or not constructor has succeeded. bool mConstructedOK; @@ -221,15 +226,12 @@ class VirtualCameraFactory { public: // Contains device open entry point, as required by HAL API. static struct hw_module_methods_t mCameraModuleMethods; + pthread_cond_t mSignalCapRead = PTHREAD_COND_INITIALIZER; + pthread_mutex_t mCapReadLock = PTHREAD_MUTEX_INITIALIZER; -private: - // NV12 Decoder - std::shared_ptr mDecoder; - - // Socket server std::shared_ptr mSocketServer; - - bool createSocketServer(std::shared_ptr decoder); +private: + bool createSocketServer(); }; }; // end of namespace android diff --git a/include/VirtualFakeCamera3.h b/include/VirtualFakeCamera3.h index ec45e00..adf6da8 100644 --- a/include/VirtualFakeCamera3.h +++ b/include/VirtualFakeCamera3.h @@ -33,7 +33,9 @@ #include #include #include +#ifdef ENABLE_FFMPEG #include "CGCodec.h" +#endif #include "CameraSocketServerThread.h" #include "CameraSocketCommand.h" @@ -52,11 +54,16 @@ namespace android { */ class VirtualFakeCamera3 : public VirtualCamera3, private Sensor::SensorListener { public: - VirtualFakeCamera3(int cameraId, bool facingBack, struct hw_module_t *module, +#ifdef ENABLE_FFMPEG + VirtualFakeCamera3(int cameraId, struct hw_module_t *module, std::shared_ptr socket_server, std::shared_ptr decoder, std::atomic &state); - +#else + VirtualFakeCamera3(int cameraId, struct hw_module_t *module, + std::shared_ptr socket_server, + std::atomic &state); +#endif virtual ~VirtualFakeCamera3(); /**************************************************************************** @@ -64,20 +71,22 @@ class VirtualFakeCamera3 : public VirtualCamera3, private Sensor::SensorListener ***************************************************************************/ public: - virtual status_t Initialize(const char *device_name, const char *frame_dims, - const char *facing_dir); + virtual status_t Initialize(); /**************************************************************************** * Camera module API and generic hardware device API implementation ***************************************************************************/ public: - virtual status_t connectCamera(hw_device_t **device); + virtual status_t openCamera(hw_device_t **device); virtual status_t closeCamera(); virtual status_t getCameraInfo(struct camera_info *info); + virtual status_t setTorchMode(const char* camera_id, bool enable); + + /**************************************************************************** * VirtualCamera3 abstract API implementation ***************************************************************************/ @@ -98,6 +107,12 @@ class VirtualFakeCamera3 : public VirtualCamera3, private Sensor::SensorListener virtual void dump(int fd); private: + /** Initialize the Sensor and Decoder part of the Camera*/ + status_t connectCamera(); + + /** Set the Decoder resolution based on the app's res request*/ + uint32_t setDecoderResolution(uint32_t resolution); + /** * Get the requested capability set for this camera */ @@ -127,6 +142,15 @@ class VirtualFakeCamera3 : public VirtualCamera3, private Sensor::SensorListener /** Handle interrupt events from the sensor */ void onSensorEvent(uint32_t frameNumber, Event e, nsecs_t timestamp); + /** Update max supported res width and height based on capability data.*/ + void setMaxSupportedResolution(); + + /** Update input codec type for each camera based on capability data.*/ + void setInputCodecType(); + + /** Update camera facing info based on capability data from client.*/ + void setCameraFacingInfo(); + /**************************************************************************** * Static configuration information ***************************************************************************/ @@ -140,8 +164,7 @@ class VirtualFakeCamera3 : public VirtualCamera3, private Sensor::SensorListener // sensor-generated buffers which use a nonpositive ID. Otherwise, HAL3 has // no concept of a stream id. static const uint32_t kGenericStreamId = 1; - static const int32_t kAvailableFormats[]; - static const uint32_t kAvailableRawSizes[]; + static const int32_t kHalSupportedFormats[]; static const int64_t kSyncWaitTimeout = 10000000; // 10 ms static const int32_t kMaxSyncTimeoutCount = 1000; // 1000 kSyncWaitTimeouts static const uint32_t kFenceTimeoutMs = 2000; // 2 s @@ -159,6 +182,13 @@ class VirtualFakeCamera3 : public VirtualCamera3, private Sensor::SensorListener int32_t mSensorWidth; int32_t mSensorHeight; + uint32_t mSrcWidth; + uint32_t mSrcHeight; + + uint32_t mCodecType; + uint32_t mDecoderResolution; + bool mDecoderInitDone; + SortedVector mCapabilities; /** @@ -194,13 +224,20 @@ class VirtualFakeCamera3 : public VirtualCamera3, private Sensor::SensorListener // socket server std::shared_ptr mSocketServer; +#ifdef ENABLE_FFMPEG // NV12 Video decoder handle std::shared_ptr mDecoder = nullptr; - +#endif std::atomic &mCameraSessionState; bool createSocketServer(bool facing_back); - status_t sendCommandToClient(socket::CameraOperation operation); + status_t sendCommandToClient(socket::camera_cmd_t cmd); + + enum DecoderResolution { + DECODER_SUPPORTED_RESOLUTION_480P = 480, + DECODER_SUPPORTED_RESOLUTION_720P = 720, + DECODER_SUPPORTED_RESOLUTION_1080P = 1080, + }; /** Processing thread for sending out results */ diff --git a/include/fake-pipeline2/JpegCompressor.h b/include/fake-pipeline2/JpegCompressor.h index c050a38..b5aabcf 100644 --- a/include/fake-pipeline2/JpegCompressor.h +++ b/include/fake-pipeline2/JpegCompressor.h @@ -74,7 +74,7 @@ class JpegCompressor : private Thread, public virtual RefBase { status_t reserve(); // TODO: Measure this - static const size_t kMaxJpegSize = 300000; + static const size_t kMaxJpegSize = 1200000; private: Mutex mBusyMutex; diff --git a/include/fake-pipeline2/Sensor.h b/include/fake-pipeline2/Sensor.h index ca198c5..a0db84e 100644 --- a/include/fake-pipeline2/Sensor.h +++ b/include/fake-pipeline2/Sensor.h @@ -78,8 +78,10 @@ #include "utils/Thread.h" #include "utils/Mutex.h" #include "utils/Timers.h" +#ifdef ENABLE_FFMPEG #include "CGCodec.h" #include "CGLog.h" +#endif #include #include #include @@ -89,16 +91,17 @@ using namespace std::chrono_literals; -#define FRAME_SIZE_240P 320 * 240 * 1.5 -#define FRAME_SIZE_480P 640 * 480 * 1.5 - namespace android { class Sensor : private Thread, public virtual RefBase { public: - // width: Width of pixel array - // height: Height of pixel array + // width: Max width of client camera HW. + // height: Max height of client camera HW. +#ifdef ENABLE_FFMPEG Sensor(uint32_t width, uint32_t height, std::shared_ptr decoder = nullptr); +#else + Sensor(uint32_t width, uint32_t height); +#endif ~Sensor(); /* @@ -125,12 +128,6 @@ class Sensor : private Thread, public virtual RefBase { // To simplify tracking sensor's current frame void setFrameNumber(uint32_t frameNumber); - /* - * Controls that cause reconfiguration delay - */ - - void setBinning(int horizontalFactor, int verticalFactor); - /* * Synchronizing with sensor operation (vertical sync) */ @@ -242,7 +239,7 @@ class Sensor : private Thread, public virtual RefBase { void captureRGBA(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height); void captureRGB(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height); void captureNV12(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height); - void captureJPEG(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height); + void captureNV21(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height); void captureDepth(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height); void captureDepthCloud(uint8_t *img); void saveNV21(uint8_t *img, uint32_t size); @@ -252,27 +249,37 @@ class Sensor : private Thread, public virtual RefBase { // m_major_version 0: CPU 1: SG1 uint8_t m_major_version = 1; - // memories for preview usecases - uint32_t destPrevBufSize = FRAME_SIZE_480P; - std::array mDstTempPrevBuf = {}; - std::array mDstPrevBuf = {}; + // Max supported resolution and size of client/source camera HW. + // HAL supports max 1080p resolution. + int mSrcWidth = 0; + int mSrcHeight = 0; + uint32_t mSrcFrameSize = 0; - // memories for capture/record usecases - uint32_t mDstBufSize = FRAME_SIZE_480P; - std::array mDstTempBuf = {}; - std::array mDstBuf = {}; + /** + * Allocate static memories to avoid continuous allocation on every open camera. + * Hence allocating buffers for max supported resolution, that is 1080p. + */ - //memories for JPEG/BLOB capture usecases - uint32_t mDstJpegBufSize = FRAME_SIZE_480P; - std::array mDstJpegTempBuf = {}; - std::array mDstJpegBuf = {}; + static const size_t maxSupportedResWidth = 1920; + static const size_t maxSupportedResHeight = 1080; + static const size_t bpp = 2; // 12 bpp for NV12/NV21 and 4 bits extra for FHD operations. + static const size_t buffSize = maxSupportedResWidth * maxSupportedResHeight * bpp; - // vHAL buffer - int mSrcWidth = 640; - int mSrcHeight = 480; + // Allocate memories for resolution scaling operation in preview. + std::array mDstTempPrevBuf = {}; + std::array mDstPrevBuf = {}; - std::shared_ptr mDecoder = {}; + // Allocate memories for resolution scaling operation in capture/record. + std::array mDstTempBuf = {}; + std::array mDstBuf = {}; + // Allocate memories for resolution scaling operation in JPEG capture. + std::array mDstJpegTempBuf = {}; + std::array mDstJpegBuf = {}; + +#ifdef ENABLE_FFMPEG + std::shared_ptr mDecoder = {}; +#endif bool getNV12Frames(uint8_t *out_buf, int *out_size, std::chrono::milliseconds timeout_ms = 5ms); void dump_yuv(uint8_t *img1, size_t img1_size, uint8_t *img2, size_t img2_size, const std::string &filename); diff --git a/src/CameraSocketCommand.cpp b/src/CameraSocketCommand.cpp index 5eb3454..c25b8db 100644 --- a/src/CameraSocketCommand.cpp +++ b/src/CameraSocketCommand.cpp @@ -30,5 +30,48 @@ const std::unordered_map kCameraSessionStateNam {CameraSessionState::kCameraClosed, "Camera closed"}, {CameraSessionState::kDecodingStopped, "Decoding stopped"}, }; + +const char* camera_type_to_str(int type) { + switch (type) { + case REQUEST_CAPABILITY: + return "REQUEST_CAPABILITY"; + case CAPABILITY: + return "CAPABILITY"; + case CAMERA_CONFIG: + return "CAMERA_CONFIG"; + case CAMERA_DATA: + return "CAMERA_DATA"; + case ACK: + return "ACK"; + default: + return "invalid"; + } +} + +const char* codec_type_to_str(uint32_t type) { + switch (type) { + case int(android::socket::VideoCodecType::kH264): + return "H264"; + case int(android::socket::VideoCodecType::kH265): + return "H265"; + case int(android::socket::VideoCodecType::kI420): + return "I420"; + default: + return "invalid"; + } +} + +const char* resolution_to_str(uint32_t resolution) { + switch (resolution) { + case int(android::socket::FrameResolution::k480p): + return "480p"; + case int(android::socket::FrameResolution::k720p): + return "720p"; + case int(android::socket::FrameResolution::k1080p): + return "1080p"; + default: + return "invalid"; + } } -} // namespace android \ No newline at end of file +} // namespace socket +} // namespace android diff --git a/src/CameraSocketServerThread.cpp b/src/CameraSocketServerThread.cpp index 116580c..2e18bc6 100644 --- a/src/CameraSocketServerThread.cpp +++ b/src/CameraSocketServerThread.cpp @@ -17,6 +17,8 @@ //#define LOG_NNDEBUG 0 #define LOG_TAG "CameraSocketServerThread: " #include +#define HAVE_JPEG // required for libyuv.h to export MJPEG decode APIs +#include #ifdef LOG_NNDEBUG #define ALOGVV(...) ALOGV(__VA_ARGS__) @@ -42,23 +44,42 @@ #include "CameraSocketServerThread.h" #include "VirtualBuffer.h" #include "VirtualCameraFactory.h" +#include #include android::ClientVideoBuffer *android::ClientVideoBuffer::ic_instance = 0; +extern android::VirtualCameraFactory gVirtualCameraFactory; namespace android { +uint32_t gMaxNumOfCamerasSupported; + +int32_t gMaxSupportedWidth; +int32_t gMaxSupportedHeight; + +int32_t gCameraMaxWidth; +int32_t gCameraMaxHeight; + +uint32_t gCodecType; +uint32_t gCameraSensorOrientation; +bool gCameraFacingBack; + +bool gStartMetadataUpdate; +bool gDoneMetadataUpdate; +int gDataPipeHandle = -1; + using namespace socket; + CameraSocketServerThread::CameraSocketServerThread(std::string suffix, - std::shared_ptr decoder, std::atomic &state) : Thread(/*canCallJava*/ false), mRunning{true}, mSocketServerFd{-1}, - mVideoDecoder{decoder}, mCameraSessionState{state} { + mCameraSessionState{state} { std::string sock_path = "/ipc/camera-socket" + suffix; char *k8s_env_value = getenv("K8S_ENV"); - mSocketPath = (k8s_env_value != NULL && !strcmp(k8s_env_value, "true")) - ? "/conn/camera-socket" : sock_path.c_str(); + mSocketPath = (k8s_env_value != NULL && !strcmp(k8s_env_value, "true")) ? "/conn/camera-socket" + : sock_path.c_str(); ALOGI("%s camera socket server path is %s", __FUNCTION__, mSocketPath.c_str()); + mNumOfCamerasRequested = 0; } CameraSocketServerThread::~CameraSocketServerThread() { @@ -97,82 +118,952 @@ status_t CameraSocketServerThread::readyToRun() { return OK; } -void CameraSocketServerThread::clearBuffer() { - ALOGVV(LOG_TAG " %s Enter", __FUNCTION__); - mSocketBuffer.fill(0); - ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); - char *fbuffer = (char *)handle->clientBuf[handle->clientRevCount % 1].buffer; +void CameraSocketServerThread::setCameraMaxSupportedResolution(int32_t width, int32_t height) { + ALOGVV(LOG_TAG "%s: E", __FUNCTION__); - if (gIsInFrameI420) { - // TODO: Use width and height for current resolution - clearBuffer(fbuffer, 640, 480); + if (gMaxSupportedWidth < width && gMaxSupportedHeight < height) { + gMaxSupportedWidth = width; + gMaxSupportedHeight = height; + ALOGI(LOG_TAG "%s: Set Camera Max supported resolution: %dx%d", __FUNCTION__, + gMaxSupportedWidth, gMaxSupportedHeight); } - ALOGVV(LOG_TAG " %s: Exit", __FUNCTION__); } -void CameraSocketServerThread::clearBuffer(char *buffer, int width, int height) { +void CameraSocketServerThread::setCameraResolution(uint32_t resolution) { + ALOGVV(LOG_TAG "%s: E", __FUNCTION__); + + switch (resolution) { + case uint32_t(FrameResolution::k480p): + gCameraMaxWidth = 640; + gCameraMaxHeight = 480; + break; + case uint32_t(FrameResolution::k720p): + gCameraMaxWidth = 1280; + gCameraMaxHeight = 720; + break; + case uint32_t(FrameResolution::k1080p): + gCameraMaxWidth = 1920; + gCameraMaxHeight = 1080; + break; + case uint32_t(FrameResolution::kWXGA): + gCameraMaxWidth = 640; + gCameraMaxHeight = 360; + break; + + default: + break; + } + ALOGI(LOG_TAG "%s: Set Camera resolution: %dx%d", __FUNCTION__, gCameraMaxWidth, + gCameraMaxHeight); + + setCameraMaxSupportedResolution(gCameraMaxWidth, gCameraMaxHeight); +} +#ifndef PIPE +bool CameraSocketServerThread::configureCapabilities(bool skip) { ALOGVV(LOG_TAG " %s Enter", __FUNCTION__); - char *uv_offset = buffer + width * height; - memset(buffer, 0x10, (width * height)); - memset(uv_offset, 0x80, (width * height) / 2); + + bool status = false; + bool valid_client_cap_info = false; + int camera_id, expctd_cam_id; + struct ValidateClientCapability val_client_cap[MAX_NUMBER_OF_SUPPORTED_CAMERAS]; + size_t ack_packet_size = sizeof(camera_header_t) + sizeof(camera_ack_t); + size_t cap_packet_size = sizeof(camera_header_t) + sizeof(camera_capability_t); + ssize_t recv_size = 0; + camera_ack_t ack_payload = ACK_CONFIG; + + camera_info_t camera_info[MAX_NUMBER_OF_SUPPORTED_CAMERAS] = {}; + camera_capability_t capability = {}; + + camera_packet_t *cap_packet = NULL; + camera_packet_t *ack_packet = NULL; + camera_header_t header = {}; + + if ((recv_size = recv(mClientFd, (char *)&header, sizeof(camera_header_t), MSG_WAITALL)) < 0) { + ALOGE(LOG_TAG "%s: Failed to receive header, err: %s ", __FUNCTION__, strerror(errno)); + goto out; + } + + if (header.type != REQUEST_CAPABILITY) { + ALOGE(LOG_TAG "%s: Invalid packet type\n", __FUNCTION__); + goto out; + } + ALOGI(LOG_TAG "%s: Received REQUEST_CAPABILITY header from client", __FUNCTION__); + + cap_packet = (camera_packet_t *)malloc(cap_packet_size); + if (cap_packet == NULL) { + ALOGE(LOG_TAG "%s: cap camera_packet_t allocation failed: %d ", __FUNCTION__, __LINE__); + return false; + } + + cap_packet->header.type = CAPABILITY; + cap_packet->header.size = sizeof(camera_capability_t); + capability.codec_type = (uint32_t)VideoCodecType::kAll; + capability.resolution = (uint32_t)FrameResolution::kAll; + capability.maxNumberOfCameras = MAX_NUMBER_OF_SUPPORTED_CAMERAS; + + memcpy(cap_packet->payload, &capability, sizeof(camera_capability_t)); + if (send(mClientFd, cap_packet, cap_packet_size, 0) < 0) { + ALOGE(LOG_TAG "%s: Failed to send camera capabilities, err: %s ", __FUNCTION__, + strerror(errno)); + goto out; + } + ALOGI(LOG_TAG "%s: Sent CAPABILITY packet to client", __FUNCTION__); + + if ((recv_size = recv(mClientFd, (char *)&header, sizeof(camera_header_t), MSG_WAITALL)) < 0) { + ALOGE(LOG_TAG "%s: Failed to receive header, err: %s ", __FUNCTION__, strerror(errno)); + goto out; + } + + if (header.type != CAMERA_INFO) { + ALOGE(LOG_TAG "%s: invalid camera_packet_type: %s", __FUNCTION__, + camera_type_to_str(header.type)); + goto out; + } + + // Get the number fo cameras requested to support from client. + for (int i = 1; i <= MAX_NUMBER_OF_SUPPORTED_CAMERAS; i++) { + if (header.size == i * sizeof(camera_info_t)) { + mNumOfCamerasRequested = i; + break; + } else if (mNumOfCamerasRequested == 0 && i == MAX_NUMBER_OF_SUPPORTED_CAMERAS) { + ALOGE(LOG_TAG + "%s: Failed to support number of cameras requested by client " + "which is higher than the max number of cameras supported in the HAL", + __FUNCTION__); + goto out; + } + } + + if (mNumOfCamerasRequested == 0) { + ALOGE(LOG_TAG "%s: invalid header size received, size = %zu", __FUNCTION__, recv_size); + goto out; + } else { + // Update the number of cameras globally to create camera pipeline. + gMaxNumOfCamerasSupported = mNumOfCamerasRequested; + } + if ((recv_size = recv(mClientFd, (char *)&camera_info, + mNumOfCamerasRequested * sizeof(camera_info_t), MSG_WAITALL)) < 0) { + ALOGE(LOG_TAG "%s: Failed to receive camera info, err: %s ", __FUNCTION__, strerror(errno)); + goto out; + } + + ALOGI(LOG_TAG "%s: Received CAMERA_INFO packet from client with recv_size: %zd ", __FUNCTION__, + recv_size); + ALOGI(LOG_TAG "%s: Number of cameras requested = %d", __FUNCTION__, mNumOfCamerasRequested); + + gVirtualCameraFactory.constructVirtualCamera(); + // validate capability info received from the client. + for (int i = 0; i < mNumOfCamerasRequested; i++) { + expctd_cam_id = i; + if (expctd_cam_id == (int)camera_info[i].cameraId) + ALOGVV(LOG_TAG + "%s: Camera Id number %u received from client is matching with expected Id", + __FUNCTION__, camera_info[i].cameraId); + else + ALOGI(LOG_TAG + "%s: [Warning] Camera Id number %u received from client is not matching with " + "expected Id %d", + __FUNCTION__, camera_info[i].cameraId, expctd_cam_id); + + ALOGI("received codec type %d", camera_info[i].codec_type); + switch (camera_info[i].codec_type) { + case uint32_t(VideoCodecType::kH264): + gIsInFrameH264 = true; + val_client_cap[i].validCodecType = true; + break; + case uint32_t(VideoCodecType::kI420): + gIsInFrameI420 = true; + val_client_cap[i].validCodecType = true; + break; + case uint32_t(VideoCodecType::kMJPEG): + gIsInFrameMJPG = true; + val_client_cap[i].validCodecType = true; + break; + default: + val_client_cap[i].validCodecType = false; + break; + } + + switch (camera_info[i].resolution) { + case uint32_t(FrameResolution::kWXGA): + case uint32_t(FrameResolution::k480p): + case uint32_t(FrameResolution::k720p): + case uint32_t(FrameResolution::k1080p): + val_client_cap[i].validResolution = true; + break; + default: + val_client_cap[i].validResolution = false; + break; + } + + switch (camera_info[i].sensorOrientation) { + case uint32_t(SensorOrientation::ORIENTATION_0): + case uint32_t(SensorOrientation::ORIENTATION_90): + case uint32_t(SensorOrientation::ORIENTATION_180): + case uint32_t(SensorOrientation::ORIENTATION_270): + val_client_cap[i].validOrientation = true; + break; + default: + val_client_cap[i].validOrientation = false; + break; + } + + switch (camera_info[i].facing) { + case uint32_t(CameraFacing::BACK_FACING): + case uint32_t(CameraFacing::FRONT_FACING): + val_client_cap[i].validCameraFacing = true; + break; + default: + val_client_cap[i].validCameraFacing = false; + break; + } + } + + // Check whether recceived any invalid capability info or not. + // ACK packet to client would be updated based on this verification. + for (int i = 0; i < mNumOfCamerasRequested; i++) { + if (!val_client_cap[i].validCodecType || !val_client_cap[i].validResolution || + !val_client_cap[i].validOrientation || !val_client_cap[i].validCameraFacing) { + valid_client_cap_info = false; + ALOGE("%s: capability info received from client is not completely correct and expected", + __FUNCTION__); + break; + } else { + ALOGVV("%s: capability info received from client is correct and expected", + __FUNCTION__); + valid_client_cap_info = true; + } + } + + // Updating metadata for each camera seperately with its capability info received. + for (int i = 0; i < mNumOfCamerasRequested; i++) { + // Going to update metadata for each camera, so update the status. + gStartMetadataUpdate = false; + gDoneMetadataUpdate = false; + camera_id = i; + ALOGI(LOG_TAG + "%s - Client requested for codec_type: %s, resolution: %s, orientation: %u, and " + "facing: %u for camera Id %d", + __FUNCTION__, codec_type_to_str(camera_info[i].codec_type), + resolution_to_str(camera_info[i].resolution), camera_info[i].sensorOrientation, + camera_info[i].facing, camera_id); + + if (val_client_cap[i].validResolution) { + // Set Camera capable resolution based on remote client capability info. + setCameraResolution(camera_info[i].resolution); + } else { + // Set default resolution if receive invalid capability info from client. + // Default resolution would be 480p. + setCameraResolution((uint32_t)FrameResolution::k480p); + ALOGE(LOG_TAG + "%s: Not received valid resolution, " + "hence selected 480p as default", + __FUNCTION__); + } + + if (val_client_cap[i].validCodecType) { + // Set codec type based on remote client capability info. + gCodecType = camera_info[i].codec_type; + } else { + // Set default codec type if receive invalid capability info from client. + // Default codec type would be H264. + gCodecType = (uint32_t)VideoCodecType::kH264; + ALOGE(LOG_TAG "%s: Not received valid codec type, hence selected H264 as default", + __FUNCTION__); + } + + if (val_client_cap[i].validOrientation) { + // Set Camera sensor orientation based on remote client camera orientation. + gCameraSensorOrientation = camera_info[i].sensorOrientation; + } else { + // Set default camera sensor orientation if received invalid orientation data from + // client. Default sensor orientation would be zero deg and consider as landscape + // display. + gCameraSensorOrientation = (uint32_t)SensorOrientation::ORIENTATION_0; + ALOGE(LOG_TAG + "%s: Not received valid sensor orientation, " + "hence selected ORIENTATION_0 as default", + __FUNCTION__); + } + + if (val_client_cap[i].validCameraFacing) { + // Set camera facing based on client request. + if (camera_info[i].facing == (uint32_t)CameraFacing::BACK_FACING) + gCameraFacingBack = true; + else + gCameraFacingBack = false; + } else { + // Set default camera facing info if received invalid facing info from client. + // Default would be back for camera Id '0' and front for camera Id '1'. + if (camera_id == 1) + gCameraFacingBack = false; + else + gCameraFacingBack = true; + ALOGE(LOG_TAG + "%s: Not received valid camera facing info, " + "hence selected default", + __FUNCTION__); + } +#if 0 + // Start updating metadata for one camera, so update the status. + gStartMetadataUpdate = true; + + // Wait till complete the metadata update for a camera. + while (!gDoneMetadataUpdate) { + ALOGVV("%s: wait till complete the metadata update for a camera", __FUNCTION__); + // 200us sleep for this thread. + std::this_thread::sleep_for(std::chrono::microseconds(200)); + } +#endif + gVirtualCameraFactory.createVirtualRemoteCamera(gVirtualCameraFactory.mSocketServer, camera_id); + } + + pthread_cond_signal(&gVirtualCameraFactory.mSignalCapRead); + pthread_mutex_unlock(&gVirtualCameraFactory.mCapReadLock); + + ack_packet = (camera_packet_t *)malloc(ack_packet_size); + if (ack_packet == NULL) { + ALOGE(LOG_TAG "%s: ack camera_packet_t allocation failed: %d ", __FUNCTION__, __LINE__); + goto out; + } + ack_payload = (valid_client_cap_info) ? ACK_CONFIG : NACK_CONFIG; + + ack_packet->header.type = ACK; + ack_packet->header.size = sizeof(camera_ack_t); + + memcpy(ack_packet->payload, &ack_payload, sizeof(camera_ack_t)); + if (send(mClientFd, ack_packet, ack_packet_size, 0) < 0) { + ALOGE(LOG_TAG "%s: Failed to send camera capabilities, err: %s ", __FUNCTION__, + strerror(errno)); + goto out; + } + ALOGI(LOG_TAG "%s: Sent ACK packet to client with ack_size: %zu ", __FUNCTION__, + ack_packet_size); + + status = true; +out: + free(ack_packet); + free(cap_packet); ALOGVV(LOG_TAG " %s: Exit", __FUNCTION__); + return status; } +#else +bool CameraSocketServerThread::configureCapabilities(bool skipCapRead) { + ALOGE(LOG_TAG " %s Enter", __FUNCTION__); -bool CameraSocketServerThread::threadLoop() { - mSocketServerFd = ::socket(AF_UNIX, SOCK_STREAM, 0); - if (mSocketServerFd < 0) { - ALOGE("%s:%d Fail to construct camera socket with error: %s", __FUNCTION__, __LINE__, + bool status = false; + bool valid_client_cap_info = false; + int camera_id, expctd_cam_id; + struct ValidateClientCapability val_client_cap[MAX_NUMBER_OF_SUPPORTED_CAMERAS]; + size_t ack_packet_size = sizeof(camera_header_t) + sizeof(camera_ack_t); + size_t cap_packet_size = sizeof(camera_header_t) + sizeof(camera_capability_t); + ssize_t recv_size = 0; + camera_ack_t ack_payload = ACK_CONFIG; + + camera_info_t camera_info[MAX_NUMBER_OF_SUPPORTED_CAMERAS] = {}; + camera_capability_t capability = {}; + + camera_packet_t *cap_packet = NULL; + camera_packet_t *ack_packet = NULL; + camera_header_t header = {}; + int trans_mode = PIPE; +if(!skipCapRead) { + + int headerRead = read(mClientFd, &header, sizeof(camera_header_t)); + ALOGE("header read size %d\n",headerRead); + if(headerRead < 0) + return false; + if (header.type != REQUEST_CAPABILITY) { + ALOGE(LOG_TAG "%s: Invalid packet type\n", __FUNCTION__); + return false; + } +} + ALOGI(LOG_TAG "%s: Received REQUEST_CAPABILITY header from client", __FUNCTION__); + + cap_packet = (camera_packet_t *)malloc(cap_packet_size); + if (cap_packet == NULL) { + ALOGE(LOG_TAG "%s: cap camera_packet_t allocation failed: %d ", __FUNCTION__, __LINE__); + return false; + } + + cap_packet->header.type = CAPABILITY; + cap_packet->header.size = sizeof(camera_capability_t); + capability.codec_type = (uint32_t)VideoCodecType::kAll; + capability.resolution = (uint32_t)FrameResolution::kAll; + capability.maxNumberOfCameras = MAX_NUMBER_OF_SUPPORTED_CAMERAS; + + memcpy(cap_packet->payload, &capability, sizeof(camera_capability_t)); + int bytesRead = 0; + bytesRead = write(mClientFd, cap_packet, cap_packet_size); + ALOGE("cap packet size write %d\n",bytesRead); + if(bytesRead < 0) + return false; + + ALOGI(LOG_TAG "%s: Sent CAPABILITY packet to client", __FUNCTION__); + + if(trans_mode == PIPE) { + recv_size = read(mClientFd, &header, sizeof(camera_header_t)); + ALOGE("cap packet size %d\n",(int)recv_size); + + } + if (header.type != CAMERA_INFO) { + ALOGE(LOG_TAG "%s: invalid camera_packet_type: %s", __FUNCTION__, + camera_type_to_str(header.type)); + goto out; + } + + // Get the number fo cameras requested to support from client. + for (int i = 1; i <= MAX_NUMBER_OF_SUPPORTED_CAMERAS; i++) { + if (header.size == i * sizeof(camera_info_t)) { + mNumOfCamerasRequested = i; + break; + } else if (header.size == 0) { + mNumOfCamerasRequested = 0; + break; + } else if (i == MAX_NUMBER_OF_SUPPORTED_CAMERAS) { + ALOGE(LOG_TAG + "%s: Failed to support number of cameras requested by client " + "which is higher than the max number of cameras supported in the HAL", + __FUNCTION__); + goto out; + } + } + + if (mNumOfCamerasRequested == 0) { + ALOGE(LOG_TAG "%s: No Camera Found", __FUNCTION__); + gMaxNumOfCamerasSupported = 0; +// goto out; + } else { + // Update the number of cameras globally to create camera pipeline. + gMaxNumOfCamerasSupported = mNumOfCamerasRequested; + } + + if(trans_mode == PIPE) { + recv_size = read(mClientFd, &camera_info, mNumOfCamerasRequested * sizeof(camera_info_t)); + ALOGE("cam info size %d\n",(int)recv_size); + + } else { + + if ((recv_size = recv(mClientFd, (char *)&camera_info, + mNumOfCamerasRequested * sizeof(camera_info_t), MSG_WAITALL)) < 0) { + ALOGE(LOG_TAG "%s: Failed to receive camera info, err: %s ", __FUNCTION__, strerror(errno)); + goto out; + } +} + ALOGI(LOG_TAG "%s: Received CAMERA_INFO packet from client with recv_size: %zd ", __FUNCTION__, + recv_size); + ALOGI(LOG_TAG "%s: Number of cameras requested = %d", __FUNCTION__, mNumOfCamerasRequested); + + // pthread_cond_signal(&gVirtualCameraFactory.mSignalCapRead); + // pthread_mutex_unlock(&gVirtualCameraFactory.mCapReadLock); + + gVirtualCameraFactory.constructVirtualCamera(); + // validate capability info received from the client. + for (int i = 0; i < mNumOfCamerasRequested; i++) { + expctd_cam_id = i; + if (expctd_cam_id == (int)camera_info[i].cameraId) + ALOGVV(LOG_TAG + "%s: Camera Id number %u received from client is matching with expected Id", + __FUNCTION__, camera_info[i].cameraId); + else + ALOGI(LOG_TAG + "%s: [Warning] Camera Id number %u received from client is not matching with " + "expected Id %d", + __FUNCTION__, camera_info[i].cameraId, expctd_cam_id); + + ALOGI("received codec type %d", camera_info[i].codec_type); + switch (camera_info[i].codec_type) { + case uint32_t(VideoCodecType::kH264): + gIsInFrameH264 = true; + val_client_cap[i].validCodecType = true; + break; + case uint32_t(VideoCodecType::kI420): + gIsInFrameI420 = true; + val_client_cap[i].validCodecType = true; + break; + case uint32_t(VideoCodecType::kMJPEG): + gIsInFrameMJPG = true; + val_client_cap[i].validCodecType = true; + break; + default: + val_client_cap[i].validCodecType = false; + break; + } + + switch (camera_info[i].resolution) { + case uint32_t(FrameResolution::kWXGA): + case uint32_t(FrameResolution::k480p): + case uint32_t(FrameResolution::k720p): + case uint32_t(FrameResolution::k1080p): + val_client_cap[i].validResolution = true; + break; + default: + val_client_cap[i].validResolution = false; + break; + } + + switch (camera_info[i].sensorOrientation) { + case uint32_t(SensorOrientation::ORIENTATION_0): + case uint32_t(SensorOrientation::ORIENTATION_90): + case uint32_t(SensorOrientation::ORIENTATION_180): + case uint32_t(SensorOrientation::ORIENTATION_270): + val_client_cap[i].validOrientation = true; + break; + default: + val_client_cap[i].validOrientation = false; + break; + } + + switch (camera_info[i].facing) { + case uint32_t(CameraFacing::BACK_FACING): + case uint32_t(CameraFacing::FRONT_FACING): + val_client_cap[i].validCameraFacing = true; + break; + default: + val_client_cap[i].validCameraFacing = false; + break; + } + } + + // Check whether recceived any invalid capability info or not. + // ACK packet to client would be updated based on this verification. + for (int i = 0; i < mNumOfCamerasRequested; i++) { + if (!val_client_cap[i].validCodecType || !val_client_cap[i].validResolution || + !val_client_cap[i].validOrientation || !val_client_cap[i].validCameraFacing) { + valid_client_cap_info = false; + ALOGE("%s: capability info received from client is not completely correct and expected", + __FUNCTION__); + break; + } else { + ALOGVV("%s: capability info received from client is correct and expected", + __FUNCTION__); + valid_client_cap_info = true; + } + } + + // Updating metadata for each camera seperately with its capability info received. + for (int i = 0; i < mNumOfCamerasRequested; i++) { + // Going to update metadata for each camera, so update the status. + gStartMetadataUpdate = false; + gDoneMetadataUpdate = false; + camera_id = i; + ALOGI(LOG_TAG + "%s - Client requested for codec_type: %s, resolution: %s, orientation: %u, and " + "facing: %u for camera Id %d", + __FUNCTION__, codec_type_to_str(camera_info[i].codec_type), + resolution_to_str(camera_info[i].resolution), camera_info[i].sensorOrientation, + camera_info[i].facing, camera_id); + + if (val_client_cap[i].validResolution) { + // Set Camera capable resolution based on remote client capability info. + setCameraResolution(camera_info[i].resolution); + } else { + // Set default resolution if receive invalid capability info from client. + // Default resolution would be 480p. + setCameraResolution((uint32_t)FrameResolution::k480p); + ALOGE(LOG_TAG + "%s: Not received valid resolution, " + "hence selected 480p as default", + __FUNCTION__); + } + + if (val_client_cap[i].validCodecType) { + // Set codec type based on remote client capability info. + gCodecType = camera_info[i].codec_type; + } else { + // Set default codec type if receive invalid capability info from client. + // Default codec type would be H264. + gCodecType = (uint32_t)VideoCodecType::kH264; + ALOGE(LOG_TAG "%s: Not received valid codec type, hence selected H264 as default", + __FUNCTION__); + } + + if (val_client_cap[i].validOrientation) { + // Set Camera sensor orientation based on remote client camera orientation. + gCameraSensorOrientation = camera_info[i].sensorOrientation; + } else { + // Set default camera sensor orientation if received invalid orientation data from + // client. Default sensor orientation would be zero deg and consider as landscape + // display. + gCameraSensorOrientation = (uint32_t)SensorOrientation::ORIENTATION_0; + ALOGE(LOG_TAG + "%s: Not received valid sensor orientation, " + "hence selected ORIENTATION_0 as default", + __FUNCTION__); + } + + if (val_client_cap[i].validCameraFacing) { + // Set camera facing based on client request. + if (camera_info[i].facing == (uint32_t)CameraFacing::BACK_FACING) + gCameraFacingBack = true; + else + gCameraFacingBack = false; + } else { + // Set default camera facing info if received invalid facing info from client. + // Default would be back for camera Id '0' and front for camera Id '1'. + if (camera_id == 1) + gCameraFacingBack = false; + else + gCameraFacingBack = true; + ALOGE(LOG_TAG + "%s: Not received valid camera facing info, " + "hence selected default", + __FUNCTION__); + } +#if 0 + // Start updating metadata for one camera, so update the status. + gStartMetadataUpdate = true; + + // Wait till complete the metadata update for a camera. + while (!gDoneMetadataUpdate) { + ALOGVV("%s: wait till complete the metadata update for a camera", __FUNCTION__); + // 200us sleep for this thread. + std::this_thread::sleep_for(std::chrono::microseconds(200)); + } +#endif + gVirtualCameraFactory.createVirtualRemoteCamera(gVirtualCameraFactory.mSocketServer, camera_id); + + } + + //gVirtualCameraFactory.constructVirtualCamera(); + pthread_cond_signal(&gVirtualCameraFactory.mSignalCapRead); + pthread_mutex_unlock(&gVirtualCameraFactory.mCapReadLock); + + ack_packet = (camera_packet_t *)malloc(ack_packet_size); + if (ack_packet == NULL) { + ALOGE(LOG_TAG "%s: ack camera_packet_t allocation failed: %d ", __FUNCTION__, __LINE__); + goto out; + } + ack_payload = (valid_client_cap_info) ? ACK_CONFIG : NACK_CONFIG; + + ack_packet->header.type = ACK; + ack_packet->header.size = sizeof(camera_ack_t); + + memcpy(ack_packet->payload, &ack_payload, sizeof(camera_ack_t)); + if(trans_mode == PIPE) { + recv_size = write(mClientFd, ack_packet, ack_packet_size); + ALOGE("cap ack size %d\n",(int)recv_size); + + } else { + + if (send(mClientFd, ack_packet, ack_packet_size, 0) < 0) { + ALOGE(LOG_TAG "%s: Failed to send camera capabilities, err: %s ", __FUNCTION__, strerror(errno)); + goto out; + } +} + ALOGI(LOG_TAG "%s: Sent ACK packet to client with ack_size: %zu ", __FUNCTION__, + ack_packet_size); + + status = true; +out: + free(ack_packet); + free(cap_packet); + ALOGVV(LOG_TAG " %s: Exit", __FUNCTION__); + return status; +} +#endif +bool CameraSocketServerThread::ProcessCameraDataFromPipe(ClientVideoBuffer *handle) { + int size_header =0; + ssize_t size_pending =0; + camera_header_t buffer_header = {}; +ALOGE("ProcessCameraDataFromPipe start\n"); + int retryCount = 0; + uint8_t *fbuffer = (uint8_t *)handle->clientBuf[handle->clientRevCount % 1].buffer; + size_header = read(gDataPipeHandle, (char *)&buffer_header, sizeof(camera_header_t)); + if(buffer_header.type == CAMERA_DATA){ + + size_pending = buffer_header.size; + while(size_pending != 0){ + ssize_t size_data = 0; + size_data = read(gDataPipeHandle, (char *)fbuffer+size_update, size_pending); + + if(size_data < 0){ + if(retryCount > 3) { + ALOGE("Dropping frame \n"); + break; + } + retryCount++; + ALOGE(LOG_TAG "entered into recv error, break to recover"); + continue; + } + size_update += size_data; + size_pending -= size_data; + if (size_pending == 0){ + handle->clientRevCount++; +#if 0 + FILE *fp_dump = fopen("/data/dump.yuv","w"); + if(fp_dump != NULL){ + fwrite(fbuffer,size_update,1,fp_dump); + ALOGE(LOG_TAG "dump camera frame"); + fclose(fp_dump); + } +#endif + size_update = 0; + ALOGE(LOG_TAG "[I420] %s: Packet rev %d and " + "size %zd", + __FUNCTION__, handle->clientRevCount, size_data); + break; + } + } + } else if(buffer_header.type == REQUEST_CAPABILITY){ + ALOGE("Calling request Capability \n"); + if(!configureCapabilities(true)) { + return false; + } + } else { + ALOGE("invalid packet received"); return false; } + return true; +} - struct sockaddr_un addr_un; +bool CameraSocketServerThread::threadLoop() { + struct sockaddr_un addr_un; memset(&addr_un, 0, sizeof(addr_un)); addr_un.sun_family = AF_UNIX; - strncpy(&addr_un.sun_path[0], mSocketPath.c_str(), strlen(mSocketPath.c_str())); - int ret = 0; - if ((access(mSocketPath.c_str(), F_OK)) != -1) { - ALOGI(" %s camera socket server file is %s", __FUNCTION__, mSocketPath.c_str()); - ret = unlink(mSocketPath.c_str()); + int new_client_fd =-1; + int so_reuseaddr = 1; + struct sockaddr_vm addr_vm ; + struct sockaddr_in addr_ip; + int trans_mode = 0; + int pipe_handle = -1; + char mode[PROPERTY_VALUE_MAX]; + + if ((property_get("ro.vendor.camera.transference", mode, nullptr) > 0) ){ + if (!strcmp(mode, "TCP")) { + trans_mode = TCP; + }else if (!strcmp(mode, "UNIX")) { + trans_mode = UNIX; + }else if (!strcmp(mode, "VSOCK")) { + trans_mode = VSOCK; + } + } + else{ + //Fall back to unix socket by default + //trans_mode = UNIX; + //D to do + trans_mode = VSOCK; + ALOGV("%s: falling back to UNIX as the trans mode is not set",__FUNCTION__); + } + if(trans_mode == UNIX) + { + mSocketServerFd = ::socket(AF_UNIX, SOCK_STREAM, 0); + if (mSocketServerFd < 0) { + ALOGV("%s:%d Fail to construct camera socket with error: %s", __FUNCTION__, __LINE__, + strerror(errno)); + return false; + } + + struct sockaddr_un addr_un; + memset(&addr_un, 0, sizeof(addr_un)); + addr_un.sun_family = AF_UNIX; + strncpy(&addr_un.sun_path[0], mSocketPath.c_str(), strlen(mSocketPath.c_str())); + + int ret = 0; + if ((access(mSocketPath.c_str(), F_OK)) != -1) { + ALOGI(" %s camera socket server file is %s", __FUNCTION__, mSocketPath.c_str()); + ret = unlink(mSocketPath.c_str()); + if (ret < 0) { + ALOGE(LOG_TAG " %s Failed to unlink %s address %d, %s", __FUNCTION__, + mSocketPath.c_str(), ret, strerror(errno)); + return false; + } + } else { + ALOGV(LOG_TAG " %s camera socket server file %s will created. ", __FUNCTION__, + mSocketPath.c_str()); + } + + ret = ::bind(mSocketServerFd, (struct sockaddr *)&addr_un, + sizeof(sa_family_t) + strlen(mSocketPath.c_str()) + 1); if (ret < 0) { - ALOGE(LOG_TAG " %s Failed to unlink %s address %d, %s", __FUNCTION__, - mSocketPath.c_str(), ret, strerror(errno)); + ALOGE(LOG_TAG " %s Failed to bind %s address %d, %s", __FUNCTION__, mSocketPath.c_str(), + ret, strerror(errno)); return false; } - } else { - ALOGV(LOG_TAG " %s camera socket server file %s will created. ", __FUNCTION__, - mSocketPath.c_str()); - } - ret = ::bind(mSocketServerFd, (struct sockaddr *)&addr_un, - sizeof(sa_family_t) + strlen(mSocketPath.c_str()) + 1); - if (ret < 0) { - ALOGE(LOG_TAG " %s Failed to bind %s address %d, %s", __FUNCTION__, mSocketPath.c_str(), - ret, strerror(errno)); - return false; - } + struct stat st; + __mode_t mod = S_IRWXU | S_IRWXG | S_IRWXO; + if (fstat(mSocketServerFd, &st) == 0) { + mod |= st.st_mode; + } + chmod(mSocketPath.c_str(), mod); + stat(mSocketPath.c_str(), &st); - struct stat st; - __mode_t mod = S_IRWXU | S_IRWXG | S_IRWXO; - if (fstat(mSocketServerFd, &st) == 0) { - mod |= st.st_mode; + ret = listen(mSocketServerFd, 5); + if (ret < 0) { + ALOGE("%s Failed to listen on %s", __FUNCTION__, mSocketPath.c_str()); + return false; + } } - chmod(mSocketPath.c_str(), mod); - stat(mSocketPath.c_str(), &st); + else if(trans_mode == TCP){ + int ret = 0; + int new_client_fd =-1; + int port = 8085; + int so_reuseaddr = 1; + mSocketServerFd = ::socket(AF_INET, SOCK_STREAM, 0); + if (mSocketServerFd < 0) { + ALOGV(LOG_TAG " %s:Line:[%d] Fail to construct camera socket with error: [%s]", + __FUNCTION__, __LINE__, strerror(errno)); + return false; + } + if (setsockopt(mSocketServerFd, SOL_SOCKET, SO_REUSEADDR, &so_reuseaddr, + sizeof(int)) < 0) { + ALOGV(LOG_TAG " %s setsockopt(SO_REUSEADDR) failed. : %d\n", __func__, + mSocketServerFd); + return false; + } + addr_ip.sin_family = AF_INET; + addr_ip.sin_addr.s_addr = htonl(INADDR_ANY); + addr_ip.sin_port = htons(port); - ret = listen(mSocketServerFd, 5); - if (ret < 0) { - ALOGE("%s Failed to listen on %s", __FUNCTION__, mSocketPath.c_str()); + ret = ::bind(mSocketServerFd, (struct sockaddr *)&addr_ip, + sizeof(struct sockaddr_in)); + if (ret < 0) { + ALOGV(LOG_TAG " %s Failed to bind port(%d). ret: %d, %s", __func__, port, ret, + strerror(errno)); + return false; + } + ret = listen(mSocketServerFd, 5); + if (ret < 0) { + ALOGV("%s Failed to listen on ", __FUNCTION__); + return false; + } + }else if(trans_mode == VSOCK) { + memset(&addr_ip, 0, sizeof(addr_ip)); + addr_vm.svm_family = AF_VSOCK; + addr_vm.svm_port = 1982; + addr_vm.svm_cid = 3; + int ret = 0; + int so_reuseaddr = 1; + size_update = 0; + mSocketServerFd = ::socket(AF_VSOCK, SOCK_STREAM, 0); + if (mSocketServerFd < 0) { + ALOGV(LOG_TAG " %s:Line:[%d] Fail to construct camera socket with error: [%s]", + __FUNCTION__, __LINE__, strerror(errno)); return false; - } + } + ret = ::bind(mSocketServerFd, (struct sockaddr *)&addr_vm, + sizeof(struct sockaddr_vm)); + if (ret < 0) { + ALOGV(LOG_TAG " %s Failed to bind port(%d). ret: %d, %s", __func__, addr_vm.svm_port, ret, + strerror(errno)); + return false; + } + ret = listen(mSocketServerFd, 32); + if (ret < 0) { + ALOGV("%s Failed to listen on ", __FUNCTION__); + return false; + } + + } else if(trans_mode == PIPE) { + while (mRunning) { + if(trans_mode == PIPE) { + while (1) { + pipe_handle = open("/dev/virtpipe-common", O_RDWR); + if (pipe_handle < 0) { + ALOGD("%s open /dev/virtpipe-common fail errno=%d, error=%s\n", __FUNCTION__, errno, strerror(errno)); + sleep(1); + continue; + } else { + break; + } + } + while(1) { + ALOGD("%s: opening pipe...\n", __FUNCTION__); + if (write(pipe_handle, "camera_ctrl", strlen("camera_ctrl")) < 0) { + ALOGE("%s: open pipe fail...\n", __FUNCTION__); + sleep(1); + continue; + } else { + break; + } + } + + while (1) { + gDataPipeHandle = open("/dev/virtpipe-common", O_RDWR); + if (gDataPipeHandle < 0) { + ALOGD("%s open /dev/virtpipe-common fail errno=%d, error=%s\n", __FUNCTION__, errno, strerror(errno)); + sleep(1); + continue; + } else { + break; + } + } + while(1) { + ALOGD("%s: opening camera data pipe...\n", __FUNCTION__); + if (write(gDataPipeHandle, "camera_data", strlen("camera_data")) < 0) { + ALOGE("%s: open camera data pipe fail...\n", __FUNCTION__); + sleep(1); + continue; + } else { + break; + } + } + ALOGE("pipe connected success \n"); + } + + ALOGI(LOG_TAG " %s: Wait for camera client to connect. . .", __FUNCTION__); + + new_client_fd = pipe_handle; + ALOGI(LOG_TAG " %s: Accepted client: [%d]", __FUNCTION__, new_client_fd); + if (new_client_fd < 0) { + ALOGE(LOG_TAG " %s: Fail to accept client. Error: [%s]", __FUNCTION__, strerror(errno)); + continue; + } + mClientFd = new_client_fd; + + bool status = false; + status = configureCapabilities(false); + if (status) { + ALOGI(LOG_TAG + "%s: Capability negotiation and metadata update" + "for %d camera(s) completed successfully..", + __FUNCTION__, mNumOfCamerasRequested); + } + + ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); + uint8_t *fbuffer = (uint8_t *)handle->clientBuf[handle->clientRevCount % 1].buffer; + // Reset and clear the input buffer before receiving the frames. + handle->reset(); + + struct pollfd fd; + int event; + + fd.fd = mClientFd; // your socket handler + fd.events = POLLIN | POLLHUP; + pthread_cond_signal(&mSignalHotplug); + int retryLoop = 0; + while (true) { + if(!ProcessCameraDataFromPipe(handle)) { + retryLoop++; + if(retryLoop > 5) { + break; + } + sleep(1); + continue; + } + retryLoop = 0; + } + } + } while (mRunning) { ALOGI(LOG_TAG " %s: Wait for camera client to connect. . .", __FUNCTION__); - socklen_t alen = sizeof(struct sockaddr_un); - - int new_client_fd = ::accept(mSocketServerFd, (struct sockaddr *)&addr_un, &alen); + if (trans_mode == TCP) { + socklen_t alen = sizeof(struct sockaddr_in); + new_client_fd = ::accept(mSocketServerFd, (struct sockaddr *)&addr_ip, &alen); + } + else if(trans_mode == VSOCK){ + socklen_t alen = sizeof(struct sockaddr_vm); + new_client_fd = ::accept(mSocketServerFd, (struct sockaddr *)&addr_vm, &alen); + } + else if(trans_mode == UNIX) { + socklen_t alen = sizeof(struct sockaddr_un); + new_client_fd = ::accept(mSocketServerFd, (struct sockaddr *)&addr_un, &alen); + } ALOGI(LOG_TAG " %s: Accepted client: [%d]", __FUNCTION__, new_client_fd); if (new_client_fd < 0) { ALOGE(LOG_TAG " %s: Fail to accept client. Error: [%s]", __FUNCTION__, strerror(errno)); @@ -180,10 +1071,19 @@ bool CameraSocketServerThread::threadLoop() { } mClientFd = new_client_fd; - ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); - char *fbuffer = (char *)handle->clientBuf[handle->clientRevCount % 1].buffer; + bool status = false; + status = configureCapabilities(false); + if (status) { + ALOGI(LOG_TAG + "%s: Capability negotiation and metadata update" + "for %d camera(s) completed successfully..", + __FUNCTION__, mNumOfCamerasRequested); + } - clearBuffer(fbuffer, 640, 480); + ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); + uint8_t *fbuffer = (uint8_t *)handle->clientBuf[handle->clientRevCount % 1].buffer; + // Reset and clear the input buffer before receiving the frames. + handle->reset(); struct pollfd fd; int event; @@ -193,7 +1093,7 @@ bool CameraSocketServerThread::threadLoop() { while (true) { // check if there are any events on fd. - int ret = poll(&fd, 1, 3000); // 1 second for timeout + int ret = poll(&fd, 1, 3000); // 3 seconds for timeout event = fd.revents; // returned events @@ -204,11 +1104,51 @@ bool CameraSocketServerThread::threadLoop() { shutdown(mClientFd, SHUT_RDWR); close(mClientFd); mClientFd = -1; - clearBuffer(fbuffer, 640, 480); + handle->reset(); break; } else if (event & POLLIN) { // preview / record // data is available in socket => read data if (gIsInFrameI420) { + if(trans_mode == VSOCK){ + int size_header =0; + ssize_t size_pending =0; + //Check if the header type is data + camera_header_t buffer_header = {}; + size_header = recv(mClientFd, (char *)&buffer_header, sizeof(camera_header_t), 0); + if(buffer_header.type == CAMERA_DATA){ + size_pending = buffer_header.size; + while(size_pending != 0){ + ssize_t size_data = 0; + size_data = recv(mClientFd, (char *)fbuffer+size_update, size_pending, 0); + if(size_data < 0){ + //error handling while in preview + ALOGE(LOG_TAG "entered into recv error, break to recover"); + continue; + } + size_update += size_data; + size_pending -= size_data; + if (size_pending == 0){ + handle->clientRevCount++; +#if 0 + FILE *fp_dump = fopen("/data/dump.yuv","w"); + if(fp_dump != NULL){ + fwrite(fbuffer,460800,1,fp_dump); + fclose(fp_dump); + } +#endif + size_update = 0; + + ALOGV(LOG_TAG + "[I420] %s: Packet rev %d and " + "size %zd", + __FUNCTION__, handle->clientRevCount, size_data); + break; + } + } + }else + ALOGE("received NOT OK"); + + }else{ ssize_t size = 0; if ((size = recv(mClientFd, (char *)fbuffer, 460800, MSG_WAITALL)) > 0) { @@ -217,55 +1157,140 @@ bool CameraSocketServerThread::threadLoop() { "[I420] %s: Pocket rev %d and " "size %zd", __FUNCTION__, handle->clientRevCount, size); + } } + } else if (gIsInFrameMJPG) { + + int size_header =0; + ssize_t size_pending =0; + ALOGI("it is MJPG irecv the header"); + camera_header_t buffer_header = {}; + size_header = recv(mClientFd, (char *)&buffer_header, sizeof(camera_header_t), 0); + if (buffer_header.type == CAMERA_DATA) { + uint8_t *mjpeg_buffer = (uint8_t *)malloc(buffer_header.size); + if (mjpeg_buffer == NULL) { + ALOGE(LOG_TAG "%s: buffer allocation failed: %d ", __FUNCTION__, __LINE__); + continue; + } + size_pending = buffer_header.size; + ALOGE("it is MJPG recv buffer size %zd", size_pending); + while (size_pending != 0) { + ssize_t size_data = 0; + ALOGI("it is MJPG recv buffer %zd", size_pending); + size_data = recv(mClientFd, (char *)mjpeg_buffer+size_update, size_pending, 0); + if (size_data < 0) { + //error handling while in preview + ALOGE(LOG_TAG "entered into recv error, break to recover"); + continue; + } + size_update += size_data; + size_pending -= size_data; + if (size_pending == 0) { + handle->clientRevCount++; + size_update = 0; + + ALOGV(LOG_TAG + "[MJPEG] %s: Packet rev %d and " + "size %zd", + __FUNCTION__, handle->clientRevCount, size_data); + break; + } + } + int res = libyuv::MJPGToI420( + mjpeg_buffer, buffer_header.size, static_cast(fbuffer), gCameraMaxWidth, + static_cast(fbuffer + (gCameraMaxWidth * gCameraMaxHeight)), (gCameraMaxWidth / 2), + static_cast(fbuffer + (gCameraMaxWidth * gCameraMaxHeight) + ((gCameraMaxWidth * gCameraMaxHeight) / 4)), (gCameraMaxWidth / 2), + gCameraMaxWidth, gCameraMaxHeight, gCameraMaxWidth, gCameraMaxHeight); + if (res != 0) { + ALOGE("updated fail to convert MJPG to I420 ret %d and sz %d", res, buffer_header.size); + } + free(mjpeg_buffer); + } else + ALOGE("MJPEG received NOT OK"); } else if (gIsInFrameH264) { // default H264 - size_t recv_frame_size = 0; +#ifdef ENABLE_FFMPEG ssize_t size = 0; - if ((size = recv(mClientFd, (char *)&recv_frame_size, sizeof(size_t), + camera_header_t header = {}; + if ((size = recv(mClientFd, (char *)&header, sizeof(camera_header_t), MSG_WAITALL)) > 0) { - ALOGVV("[H264] Received Header %zd bytes. Payload size: %zu", size, - recv_frame_size); - if (recv_frame_size > mSocketBuffer.size()) { + ALOGVV("%s: Received Header %zd bytes. Payload size: %u", __FUNCTION__, + size, header.size); + if (header.type == REQUEST_CAPABILITY) { + ALOGI(LOG_TAG + "%s: [Warning] Capability negotiation was already " + "done for %d camera(s); Can't do re-negotiation again!!!", + __FUNCTION__, mNumOfCamerasRequested); + continue; + } else if (header.type != CAMERA_DATA) { + ALOGE(LOG_TAG "%s: invalid camera_packet_type: %s", __FUNCTION__, + camera_type_to_str(header.type)); + continue; + } + + if (header.size > mSocketBuffer.size()) { // maximum size of a H264 packet in any aggregation packet is 65535 // bytes. Source: https://tools.ietf.org/html/rfc6184#page-13 ALOGE( - "%s Fatal: Unusual H264 packet size detected: %zu! Max is %zu, ...", - __func__, recv_frame_size, mSocketBuffer.size()); + "%s Fatal: Unusual encoded packet size detected: %u! Max is %zu, " + "...", + __func__, header.size, mSocketBuffer.size()); continue; } + // recv frame - if ((size = recv(mClientFd, (char *)mSocketBuffer.data(), recv_frame_size, + if ((size = recv(mClientFd, (char *)mSocketBuffer.data(), header.size, MSG_WAITALL)) > 0) { - mSocketBufferSize = recv_frame_size; - ALOGVV("%s [H264] Camera session state: %s", __func__, - kCameraSessionStateNames.at(mCameraSessionState).c_str()); + if (size < header.size) { + ALOGW("%s : Incomplete data read %zd/%u bytes", __func__, size, + header.size); + size_t remaining_size = header.size; + remaining_size -= size; + while (remaining_size > 0) { + if ((size = recv(mClientFd, (char *)mSocketBuffer.data() + size, + remaining_size, MSG_WAITALL)) > 0) { + remaining_size -= size; + ALOGI("%s : Read-%zd after Incomplete data, remaining-%lu", + __func__, size, remaining_size); + } + } + size = header.size; + } + + mSocketBufferSize = header.size; + ALOGVV("%s: Camera session state: %s", __func__, + kCameraSessionStateNames.at(mCameraSessionState).c_str()); switch (mCameraSessionState) { case CameraSessionState::kCameraOpened: mCameraSessionState = CameraSessionState::kDecodingStarted; - ALOGVV("%s [H264] Decoding started now.", __func__); + ALOGVV("%s: Decoding started now.", __func__); case CameraSessionState::kDecodingStarted: mVideoDecoder->decode(mSocketBuffer.data(), mSocketBufferSize); handle->clientRevCount++; - ALOGVV("%s [H264] Received Payload #%d %zd/%zu bytes", __func__, - handle->clientRevCount, size, recv_frame_size); + ALOGVV("%s: Received Payload #%d %zd/%u bytes", __func__, + handle->clientRevCount, size, header.size); + mSocketBuffer.fill(0); break; case CameraSessionState::kCameraClosed: - mVideoDecoder->flush_decoder(); - mVideoDecoder->destroy(); + ALOGI("%s: Decoding stopping and flushing decoder.", __func__); mCameraSessionState = CameraSessionState::kDecodingStopped; - ALOGI("%s [H264] Decoding stopped now.", __func__); + ALOGI("%s: Decoding stopped now.", __func__); break; case CameraSessionState::kDecodingStopped: - ALOGVV("%s [H264] Decoding is already stopped, skip the packets", - __func__); + ALOGVV("%s: Decoding is already stopped, skip the packets", + __func__); + mSocketBuffer.fill(0); + break; default: - ALOGE("%s [H264] Invalid Camera session state!", __func__); + ALOGE("%s: Invalid Camera session state!", __func__); break; } } } +#endif } else { - ALOGE("%s: only H264, I420 input frames supported", __FUNCTION__); + ALOGE( + "%s: Only H264, I420 Input frames are supported. Check Input format", + __FUNCTION__); } } else { // ALOGE("%s: continue polling..", __FUNCTION__); diff --git a/src/NV21JpegCompressor.cpp b/src/NV21JpegCompressor.cpp index 3348959..771d43f 100644 --- a/src/NV21JpegCompressor.cpp +++ b/src/NV21JpegCompressor.cpp @@ -45,18 +45,18 @@ typedef void (*GetCompressedImageFunc)(JpegStub *stub, void *buff); typedef size_t (*GetCompressedSizeFunc)(JpegStub *stub); NV21JpegCompressor::NV21JpegCompressor() { - const char dlName[] = "/system/vendor/lib64/hw/camera.cic_cloud.jpeg.so"; + const char dlName[] = "/system/vendor/lib64/hw/camera.celadon.jpeg.so"; if (!mDl) { mDl = dlopen(dlName, RTLD_NOW); } if (mDl) { - InitFunc f = (InitFunc)getSymbol(mDl, "JpegStub_init"); - if (f) - (*f)(&mStub); - else - ALOGE("%s: Fatal error: getSymbol(JpegStub_init) failed", __func__); + InitFunc f = (InitFunc)getSymbol(mDl, "JpegStub_init"); + if (f) + (*f)(&mStub); + else + ALOGE("%s: Fatal error: getSymbol(JpegStub_init) failed", __func__); } else { - ALOGE("%s: Fatal error: dlopen(%s) failed", __func__, dlName); + ALOGE("%s: Fatal error: dlopen(%s) failed", __func__, dlName); } } diff --git a/src/VirtualBaseCamera.cpp b/src/VirtualBaseCamera.cpp index 747a429..9d65978 100644 --- a/src/VirtualBaseCamera.cpp +++ b/src/VirtualBaseCamera.cpp @@ -61,6 +61,12 @@ status_t VirtualBaseCamera::getCameraInfo(struct camera_info *info) { return NO_ERROR; } +status_t VirtualBaseCamera::setTorchMode(const char* camera_id, bool enable){ + ALOGV("%s", __FUNCTION__); + + return OK; +} + status_t VirtualBaseCamera::setCameraFD(int socketFd) { mCameraSocketFD = socketFd; ALOGV("%s mCameraSocketFD = %d", __FUNCTION__, mCameraSocketFD); diff --git a/src/VirtualCamera3.cpp b/src/VirtualCamera3.cpp index 587a881..1285afe 100644 --- a/src/VirtualCamera3.cpp +++ b/src/VirtualCamera3.cpp @@ -58,8 +58,7 @@ VirtualCamera3::~VirtualCamera3() {} * Public API ***************************************************************************/ -status_t VirtualCamera3::Initialize(const char *device_name, const char *frame_dims, - const char *facing_dir) { +status_t VirtualCamera3::Initialize() { ALOGV("%s", __FUNCTION__); mStatus = STATUS_CLOSED; @@ -70,8 +69,8 @@ status_t VirtualCamera3::Initialize(const char *device_name, const char *frame_d * Camera API implementation ***************************************************************************/ -status_t VirtualCamera3::connectCamera(hw_device_t **device) { - ALOGV("%s", __FUNCTION__); +status_t VirtualCamera3::openCamera(hw_device_t **device) { + ALOGV("%s: E", __FUNCTION__); if (device == NULL) return BAD_VALUE; if (mStatus != STATUS_CLOSED) { @@ -81,12 +80,13 @@ status_t VirtualCamera3::connectCamera(hw_device_t **device) { *device = &common; mStatus = STATUS_OPEN; + ALOGI("%s : Camera %d opened successfully..", __FUNCTION__, mCameraID); return NO_ERROR; } status_t VirtualCamera3::closeCamera() { mStatus = STATUS_CLOSED; - ALOGI("%s : Camera session closed successfully!!!", __FUNCTION__); + ALOGI("%s : Camera %d closed successfully..", __FUNCTION__, mCameraID); return NO_ERROR; } @@ -94,6 +94,10 @@ status_t VirtualCamera3::getCameraInfo(struct camera_info *info) { return VirtualBaseCamera::getCameraInfo(info); } +status_t VirtualCamera3::setTorchMode(const char* camera_id, bool enable) { + return VirtualBaseCamera::setTorchMode(camera_id,enable); +} + /**************************************************************************** * Camera Device API implementation. * These methods are called from the camera API callback routines. diff --git a/src/VirtualCameraFactory.cpp b/src/VirtualCameraFactory.cpp index ce2fecd..1ea9fb3 100644 --- a/src/VirtualCameraFactory.cpp +++ b/src/VirtualCameraFactory.cpp @@ -25,11 +25,12 @@ #include "VirtualCameraFactory.h" #include "VirtualFakeCamera3.h" #include "CameraSocketServerThread.h" +#ifdef ENABLE_FFMPEG #include "CGCodec.h" - +#endif #include #include - +#include "VirtualBuffer.h" extern camera_module_t HAL_MODULE_INFO_SYM; /* @@ -42,6 +43,7 @@ namespace android { bool gIsInFrameI420; bool gIsInFrameH264; +bool gIsInFrameMJPG; bool gUseVaapi; void VirtualCameraFactory::readSystemProperties() { @@ -52,100 +54,85 @@ void VirtualCameraFactory::readSystemProperties() { property_get("ro.vendor.camera.in_frame_format.i420", prop_val, "false"); gIsInFrameI420 = !strcmp(prop_val, "true"); - + //D TODO property_get("ro.vendor.camera.decode.vaapi", prop_val, "false"); gUseVaapi = !strcmp(prop_val, "true"); + gIsInFrameH264 = false; + gIsInFrameI420 = false; + gIsInFrameMJPG = false; ALOGI("%s - gIsInFrameH264: %d, gIsInFrameI420: %d, gUseVaapi: %d", __func__, gIsInFrameH264, gIsInFrameI420, gUseVaapi); } VirtualCameraFactory::VirtualCameraFactory() : mVirtualCameras(nullptr), - mVirtualCameraNum(0), - mFakeCameraNum(0), + mNumOfCamerasSupported(0), mConstructedOK(false), mCallbacks(nullptr) { - /* - * Figure out how many cameras need to be created, so we can allocate the - * array of virtual cameras before populating it. - */ - int virtualCamerasSize = 0; - - mCameraSessionState = socket::CameraSessionState::kNone; - - waitForRemoteSfFakeCameraPropertyAvailable(); - // Fake Cameras - if (isFakeCameraEmulationOn(/* backCamera */ true)) { - mFakeCameraNum++; - } - if (isFakeCameraEmulationOn(/* backCamera */ false)) { - mFakeCameraNum++; - } - virtualCamerasSize += mFakeCameraNum; - - /* - * We have the number of cameras we need to create, now allocate space for - * them. - */ - mVirtualCameras = new VirtualBaseCamera *[virtualCamerasSize]; - if (mVirtualCameras == nullptr) { - ALOGE("%s: Unable to allocate virtual camera array for %d entries", __FUNCTION__, - mVirtualCameraNum); - return; - } - if (mVirtualCameras != nullptr) { - for (int n = 0; n < virtualCamerasSize; n++) { - mVirtualCameras[n] = nullptr; - } - } - readSystemProperties(); if (gIsInFrameH264) { - // create decoder + // Create decoder to decode H264/H265 input frames. ALOGV("%s Creating decoder.", __func__); - mDecoder = std::make_shared(); } - // create socket server who push packets to decoder - createSocketServer(mDecoder); + // Create socket server which is used to communicate with client device. + createSocketServer(); ALOGV("%s socket server created: ", __func__); + pthread_mutex_lock(&mCapReadLock); + pthread_cond_wait(&mSignalCapRead, &mCapReadLock); - // Create fake cameras, if enabled. - if (isFakeCameraEmulationOn(/* backCamera */ true)) { - createFakeCamera(mSocketServer, mDecoder, /* backCamera */ true); - } - if (isFakeCameraEmulationOn(/* backCamera */ false)) { - createFakeCamera(mSocketServer, mDecoder, /* backCamera */ false); - } - - ALOGI("%d cameras are being virtual. %d of them are fake cameras.", mVirtualCameraNum, - mFakeCameraNum); - + pthread_mutex_unlock(&mCapReadLock); +//constructVirtualCamera(); mConstructedOK = true; } -bool VirtualCameraFactory::createSocketServer(std::shared_ptr decoder) { - ALOGV("%s: E", __FUNCTION__); +bool VirtualCameraFactory::constructVirtualCamera() { + ALOGV("%s: Enter old %d new %d", __FUNCTION__, mNumOfCamerasSupported, gMaxNumOfCamerasSupported); - char id[PROPERTY_VALUE_MAX] = {0}; - if (property_get("ro.boot.container.id", id, "") > 0) { - mSocketServer = - std::make_shared(id, decoder, std::ref(mCameraSessionState)); + // Allocate space for each cameras requested. + if(mVirtualCameras != NULL) { + for(int i = 0; i < mNumOfCamerasSupported; i++) { + if(mCallbacks != nullptr) { + mCallbacks->camera_device_status_change(mCallbacks, mVirtualCameras[i]->mCameraID, CAMERA_DEVICE_STATUS_NOT_PRESENT); + } else { + ALOGE("%s : Fail to update camera status to camera server\n", __FUNCTION__); + } + } + delete mVirtualCameras; + mVirtualCameras = NULL; + } + mNumOfCamerasSupported = gMaxNumOfCamerasSupported; - mSocketServer->run("FrontBackCameraSocketServerThread"); - } else - ALOGE("%s: FATAL: container id is not set!!", __func__); + // Allocate space for each cameras requested. + mVirtualCameras = new VirtualBaseCamera *[mNumOfCamerasSupported]; + if (mVirtualCameras == nullptr) { + ALOGE("%s: Unable to allocate virtual camera array", __FUNCTION__); + return false; + } else { + for (int n = 0; n < mNumOfCamerasSupported; n++) { + mVirtualCameras[n] = nullptr; + } + } + ALOGI("%s: Total number of cameras supported: %d", __FUNCTION__, mNumOfCamerasSupported); + return true; +} +bool VirtualCameraFactory::createSocketServer() { + ALOGV("%s: E", __FUNCTION__); - ALOGV("%s: X", __FUNCTION__); + mCameraSessionState = socket::CameraSessionState::kNone; + char id[PROPERTY_VALUE_MAX] = {0}; + mSocketServer = + std::make_shared(id, std::ref(mCameraSessionState)); + mSocketServer->run("FrontBackCameraSocketServerThread"); // TODO need to return false if error. return true; } VirtualCameraFactory::~VirtualCameraFactory() { if (mVirtualCameras != nullptr) { - for (int n = 0; n < mVirtualCameraNum; n++) { + for (int n = 0; n < mNumOfCamerasSupported; n++) { if (mVirtualCameras[n] != nullptr) { delete mVirtualCameras[n]; } @@ -177,13 +164,13 @@ int VirtualCameraFactory::cameraDeviceOpen(int cameraId, hw_device_t **device) { return -EINVAL; } - if (cameraId < 0 || cameraId >=getVirtualCameraNum()) { + if (cameraId < 0 || cameraId >= getVirtualCameraNum()) { ALOGE("%s: Camera id %d is out of bounds (%d)", __FUNCTION__, cameraId, getVirtualCameraNum()); return -ENODEV; } - return mVirtualCameras[cameraId]->connectCamera(device); + return mVirtualCameras[cameraId]->openCamera(device); } int VirtualCameraFactory::getCameraInfo(int cameraId, struct camera_info *info) { @@ -202,6 +189,11 @@ int VirtualCameraFactory::getCameraInfo(int cameraId, struct camera_info *info) return mVirtualCameras[cameraId]->getCameraInfo(info); } +int VirtualCameraFactory::setTorchMode(const char* camera_id, bool enable){ + ALOGI("%s: ", __FUNCTION__); + enable = !enable; + return -ENOSYS; +} int VirtualCameraFactory::setCallbacks(const camera_module_callbacks_t *callbacks) { ALOGV("%s: callbacks = %p", __FUNCTION__, callbacks); @@ -247,6 +239,9 @@ int VirtualCameraFactory::get_number_of_cameras() { int VirtualCameraFactory::get_camera_info(int camera_id, struct camera_info *info) { return gVirtualCameraFactory.getCameraInfo(camera_id, info); } +int VirtualCameraFactory::set_torch_mode(const char* camera_id, bool enable){ + return gVirtualCameraFactory.setTorchMode(camera_id, enable); +} int VirtualCameraFactory::set_callbacks(const camera_module_callbacks_t *callbacks) { return gVirtualCameraFactory.setCallbacks(callbacks); @@ -265,107 +260,32 @@ int VirtualCameraFactory::open_legacy(const struct hw_module_t *module, const ch /******************************************************************************** * Internal API *******************************************************************************/ - -void VirtualCameraFactory::createFakeCamera(std::shared_ptr socket_server, - std::shared_ptr decoder, - bool backCamera) { - int halVersion = getCameraHalVersion(backCamera); - - /* - * Create and initialize the fake camera, using the index into - * mVirtualCameras as the camera ID. - */ - switch (halVersion) { - case 1: - case 2: - ALOGE("%s: Unuspported Camera HAL version. Only HAL version 3 is supported.", __func__); - break; - case 3: { - mVirtualCameras[mVirtualCameraNum] = - new VirtualFakeCamera3(mVirtualCameraNum, backCamera, &HAL_MODULE_INFO_SYM.common, - socket_server, decoder, std::ref(mCameraSessionState)); - } break; - default: - ALOGE("%s: Unknown %s camera hal version requested: %d", __FUNCTION__, - backCamera ? "back" : "front", halVersion); - } - - if (mVirtualCameras[mVirtualCameraNum] == nullptr) { +void VirtualCameraFactory::createVirtualRemoteCamera( + std::shared_ptr socket_server, + int cameraId) { + ALOGV("%s: E", __FUNCTION__); + mVirtualCameras[cameraId] = + new VirtualFakeCamera3(cameraId, &HAL_MODULE_INFO_SYM.common, socket_server, + std::ref(mCameraSessionState)); + if (mVirtualCameras[cameraId] == nullptr) { ALOGE("%s: Unable to instantiate fake camera class", __FUNCTION__); } else { - ALOGV("%s: %s camera device version is %d", __FUNCTION__, backCamera ? "Back" : "Front", - halVersion); - status_t res = mVirtualCameras[mVirtualCameraNum]->Initialize(nullptr, nullptr, nullptr); + status_t res = mVirtualCameras[cameraId]->Initialize(); if (res == NO_ERROR) { + ALOGI("%s: Initialization for %s Camera ID: %d completed successfully..", __FUNCTION__, + gCameraFacingBack ? "Back" : "Front", cameraId); // Camera creation and initialization was successful. - mVirtualCameraNum++; } else { ALOGE("%s: Unable to initialize %s camera %d: %s (%d)", __FUNCTION__, - backCamera ? "back" : "front", mVirtualCameraNum, strerror(-res), res); - delete mVirtualCameras[mVirtualCameraNum]; - } - } -} - -void VirtualCameraFactory::waitForRemoteSfFakeCameraPropertyAvailable() { - /* - * Camera service may start running before remote-props sets - * remote.sf.fake_camera to any of the follwing four values: - * "none,front,back,both"; so we need to wait. - * - * android/camera/camera-service.c - * bug: 30768229 - */ - int numAttempts = 100; - char prop[PROPERTY_VALUE_MAX]; - bool timeout = true; - for (int i = 0; i < numAttempts; ++i) { - if (property_get("remote.sf.fake_camera", prop, nullptr) != 0) { - timeout = false; - break; + gCameraFacingBack ? "back" : "front", cameraId, strerror(-res), res); + delete mVirtualCameras[cameraId]; } - usleep(5000); } - if (timeout) { - ALOGE("timeout (%dms) waiting for property remote.sf.fake_camera to be set\n", - 5 * numAttempts); - } -} - -bool VirtualCameraFactory::isFakeCameraEmulationOn(bool backCamera) { - /* - * Defined by 'remote.sf.fake_camera' boot property. If the property exists, - * and if it's set to 'both', then fake cameras are used to emulate both - * sides. If it's set to 'back' or 'front', then a fake camera is used only - * to emulate the back or front camera, respectively. - */ - char prop[PROPERTY_VALUE_MAX]; - if ((property_get("remote.sf.fake_camera", prop, nullptr) > 0) && - (!strcmp(prop, "both") || !strcmp(prop, backCamera ? "back" : "front"))) { - return true; + if(mCallbacks != nullptr) { + mCallbacks->camera_device_status_change(mCallbacks, cameraId, CAMERA_DEVICE_STATUS_PRESENT); } else { - return false; - } -} - -int VirtualCameraFactory::getCameraHalVersion(bool backCamera) { - /* - * Defined by 'remote.sf.front_camera_hal_version' and - * 'remote.sf.back_camera_hal_version' boot properties. If the property - * doesn't exist, it is assumed we are working with HAL v1. - */ - char prop[PROPERTY_VALUE_MAX]; - const char *propQuery = backCamera ? "remote.sf.back_camera_hal" : "remote.sf.front_camera_hal"; - if (property_get(propQuery, prop, nullptr) > 0) { - char *propEnd = prop; - int val = strtol(prop, &propEnd, 10); - if (*propEnd == '\0') { - return val; - } - // Badly formatted property. It should just be a number. - ALOGE("remote.sf.back_camera_hal is not a number: %s", prop); + ALOGE("%s : Fail to update camera status to camera server\n", __FUNCTION__); } - return 3; } /******************************************************************************** diff --git a/src/VirtualCameraHal.cpp b/src/VirtualCameraHal.cpp index 5d83909..29b361e 100644 --- a/src/VirtualCameraHal.cpp +++ b/src/VirtualCameraHal.cpp @@ -32,7 +32,11 @@ camera_module_t HAL_MODULE_INFO_SYM = { .common = { .tag = HARDWARE_MODULE_TAG, - .module_api_version = CAMERA_MODULE_API_VERSION_2_3, + //the camera module api version is changed to 2.4 as the android expects the + //api version to be 2.4 and higher for android version greater than Q + // this fix was added as part of VTS cases execution + //.module_api_version = CAMERA_MODULE_API_VERSION_2_3, + .module_api_version = CAMERA_MODULE_API_VERSION_2_4, .hal_api_version = HARDWARE_HAL_API_VERSION, .id = CAMERA_HARDWARE_MODULE_ID, .name = "Virtual Camera Module", @@ -43,6 +47,7 @@ camera_module_t HAL_MODULE_INFO_SYM = { }, .get_number_of_cameras = android::VirtualCameraFactory::get_number_of_cameras, .get_camera_info = android::VirtualCameraFactory::get_camera_info, + .set_torch_mode = android::VirtualCameraFactory::set_torch_mode, .set_callbacks = android::VirtualCameraFactory::set_callbacks, .get_vendor_tag_ops = android::VirtualCameraFactory::get_vendor_tag_ops, .open_legacy = android::VirtualCameraFactory::open_legacy}; diff --git a/src/VirtualFakeCamera3.cpp b/src/VirtualFakeCamera3.cpp index 55cd0ac..c309e8a 100644 --- a/src/VirtualFakeCamera3.cpp +++ b/src/VirtualFakeCamera3.cpp @@ -22,6 +22,7 @@ #include //#define LOG_NNDEBUG 0 +#define LOG_NDEBUG 0 #define LOG_TAG "VirtualFakeCamera3: " #include #include @@ -40,21 +41,24 @@ #include #include #include "VirtualBuffer.h" - #if defined(LOG_NNDEBUG) && LOG_NNDEBUG == 0 #define ALOGVV ALOGV #else #define ALOGVV(...) ((void)0) #endif -#define MAX_TIMEOUT_FOR_CAMERA_CLOSE_SESSION 12 //12ms - using namespace std; using namespace chrono; using namespace chrono_literals; - +buffer_handle_t bufferHandle; +buffer_handle_t bufferHandle1; +buffer_handle_t bufferHandle2; +buffer_handle_t bufferHandle_3; namespace android { +int32_t gSrcWidth; +int32_t gSrcHeight; + using namespace socket; /** * Constants for camera capabilities @@ -63,18 +67,13 @@ using namespace socket; const int64_t USEC = 1000LL; const int64_t MSEC = USEC * 1000LL; -const int32_t VirtualFakeCamera3::kAvailableFormats[] = { - HAL_PIXEL_FORMAT_RAW16, HAL_PIXEL_FORMAT_BLOB, HAL_PIXEL_FORMAT_RGBA_8888, - HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - // These are handled by YCbCr_420_888 - // HAL_PIXEL_FORMAT_YV12, - // HAL_PIXEL_FORMAT_YCrCb_420_SP, - HAL_PIXEL_FORMAT_YCbCr_420_888, HAL_PIXEL_FORMAT_Y16}; - -const uint32_t VirtualFakeCamera3::kAvailableRawSizes[4] = { - 640, 480, - // 1280, 720 - // mSensorWidth, mSensorHeight +const int32_t VirtualFakeCamera3::kHalSupportedFormats[] = { + HAL_PIXEL_FORMAT_BLOB, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, // defined as RGB32 + HAL_PIXEL_FORMAT_RGBA_8888, // RGB32 + HAL_PIXEL_FORMAT_YCbCr_420_888, // NV12 + HAL_PIXEL_FORMAT_YCrCb_420_SP, // NV21 + // HAL_PIXEL_FORMAT_YV12 /* Not supporting now*/ }; /** @@ -96,18 +95,13 @@ const float VirtualFakeCamera3::kExposureWanderMax = 1; /** * Camera device lifecycle methods */ - -VirtualFakeCamera3::VirtualFakeCamera3(int cameraId, bool facingBack, struct hw_module_t *module, +VirtualFakeCamera3::VirtualFakeCamera3(int cameraId, struct hw_module_t *module, std::shared_ptr socket_server, - std::shared_ptr decoder, std::atomic &state) : VirtualCamera3(cameraId, module), - mFacingBack(facingBack), mSocketServer(socket_server), - mDecoder(decoder), - mCameraSessionState{state} { - ALOGI("Constructing virtual fake camera 3: ID %d, facing %s", mCameraID, - facingBack ? "back" : "front"); + mCameraSessionState{state} { + ALOGI("Constructing virtual fake camera 3: for ID %d", mCameraID); mControlMode = ANDROID_CONTROL_MODE_AUTO; mFacePriority = false; @@ -121,9 +115,18 @@ VirtualFakeCamera3::VirtualFakeCamera3(int cameraId, bool facingBack, struct hw_ mAeTargetExposureTime = kNormalExposureTime; mAeCurrentExposureTime = kNormalExposureTime; mAeCurrentSensitivity = kNormalSensitivity; - mSensorWidth = 640; - mSensorHeight = 480; + mSensorWidth = 0; + mSensorHeight = 0; + mSrcWidth = gCameraMaxWidth; + mSrcHeight = gCameraMaxHeight; + mCodecType = 0; + mDecoderResolution = 0; + mFacingBack = false; + mDecoderInitDone = false; mInputStream = NULL; + mSensor = NULL; + mReadoutThread = NULL; + mJpegCompressor = NULL; } VirtualFakeCamera3::~VirtualFakeCamera3() { @@ -134,8 +137,7 @@ VirtualFakeCamera3::~VirtualFakeCamera3() { } } -status_t VirtualFakeCamera3::Initialize(const char *device_name, const char *frame_dims, - const char *facing_dir) { +status_t VirtualFakeCamera3::Initialize() { ALOGVV("%s: E", __FUNCTION__); status_t res; @@ -156,60 +158,132 @@ status_t VirtualFakeCamera3::Initialize(const char *device_name, const char *fra return res; } - return VirtualCamera3::Initialize(nullptr, nullptr, nullptr); + return VirtualCamera3::Initialize(); } -status_t VirtualFakeCamera3::sendCommandToClient(socket::CameraOperation operation) { +status_t VirtualFakeCamera3::openCamera(hw_device_t **device) { + ALOGI(LOG_TAG "%s: E", __FUNCTION__); + Mutex::Autolock l(mLock); + + return VirtualCamera3::openCamera(device); +} + +uint32_t VirtualFakeCamera3::setDecoderResolution(uint32_t resolution) { + ALOGVV(LOG_TAG "%s: E", __FUNCTION__); + uint32_t res = 0; + switch (resolution) { + case DECODER_SUPPORTED_RESOLUTION_480P: + res = (uint32_t)FrameResolution::k480p; + break; + case DECODER_SUPPORTED_RESOLUTION_720P: + res = (uint32_t)FrameResolution::k720p; + break; + case DECODER_SUPPORTED_RESOLUTION_1080P: + res = (uint32_t)FrameResolution::k1080p; + break; + default: + ALOGI("%s: Selected default 480p resolution!!!", __func__); + res = (uint32_t)FrameResolution::k480p; + break; + } + + ALOGI("%s: Resolution selected for decoder init is %s", __func__, resolution_to_str(res)); + return res; +} +status_t VirtualFakeCamera3::sendCommandToClient(camera_cmd_t cmd) { ALOGI("%s E", __func__); - socket::CameraConfig camera_config = {}; - camera_config.operation = operation; + status_t status = INVALID_OPERATION; + size_t config_cmd_packet_size = sizeof(camera_header_t) + sizeof(camera_config_cmd_t); + camera_config_cmd_t config_cmd = {}; + config_cmd.version = CAMERA_VHAL_VERSION_2; + config_cmd.cmd = cmd; + char prop_val[PROPERTY_VALUE_MAX] = {'\0'}; + property_get("vendor.camera.app.name", prop_val, "false"); + + config_cmd.config.cameraId = mCameraID; + strncpy(config_cmd.config.pkg_name, prop_val, PROPERTY_VALUE_MAX); + config_cmd.config.codec_type = mCodecType; + config_cmd.config.resolution = mDecoderResolution; + + camera_packet_t *config_cmd_packet = NULL; int client_fd = mSocketServer->getClientFd(); if (client_fd < 0) { ALOGE("%s: We're not connected to client yet!", __FUNCTION__); - return INVALID_OPERATION; + return status; } - ALOGI("%s: Camera client fd %d!", __FUNCTION__, client_fd); - if (send(client_fd, &camera_config, sizeof(camera_config), 0) < 0) { - ALOGE(LOG_TAG "%s: Failed to send Camera Open command to client, err %s ", __FUNCTION__, - strerror(errno)); - return INVALID_OPERATION; + + config_cmd_packet = (camera_packet_t *)malloc(config_cmd_packet_size); + if (config_cmd_packet == NULL) { + ALOGE(LOG_TAG "%s: config camera_packet_t allocation failed: %d ", __FUNCTION__, __LINE__); + goto out; } - std::string cmd_str = - (operation == socket::CameraOperation::kClose) ? "CloseCamera" : "OpenCamera"; - ALOGI("%s: Sent cmd %s to client %d!", __FUNCTION__, cmd_str.c_str(), client_fd); - return OK; + config_cmd_packet->header.type = CAMERA_CONFIG; + config_cmd_packet->header.size = sizeof(camera_config_cmd_t); + memcpy(config_cmd_packet->payload, &config_cmd, sizeof(camera_config_cmd_t)); + + ALOGI("%s: Camera client fd %d! camera id %d", __FUNCTION__, client_fd, config_cmd.config.cameraId); +#if 0 + if(write(client_fd, config_cmd_packet, config_cmd_packet_size) < 0) { + ALOGE(LOG_TAG "%s: Failed to send Camera %s command to client, err %s ", __FUNCTION__, + (cmd == camera_cmd_t::CMD_CLOSE) ? "CloseCamera" : "OpenCamera", strerror(errno)); + goto out; + } + +#else + if (send(client_fd, config_cmd_packet, config_cmd_packet_size, 0) < 0) { + ALOGE(LOG_TAG "%s: Failed to send Camera %s command to client, err %s ", __FUNCTION__, + (cmd == camera_cmd_t::CMD_CLOSE) ? "CloseCamera" : "OpenCamera", strerror(errno)); + goto out; + } +#endif + ALOGI("%s: Sent cmd %s to client %d!", __FUNCTION__, + (cmd == camera_cmd_t::CMD_CLOSE) ? "CloseCamera" : "OpenCamera", client_fd); + status = OK; +out: + free(config_cmd_packet); + return status; } -status_t VirtualFakeCamera3::connectCamera(hw_device_t **device) { +status_t VirtualFakeCamera3::connectCamera() { ALOGI(LOG_TAG "%s: E", __FUNCTION__); - Mutex::Autolock l(mLock); if (gIsInFrameH264) { const char *device_name = gUseVaapi ? "vaapi" : nullptr; +#ifdef ENABLE_FFMPEG // initialize decoder - if (mDecoder->init(VideoCodecType::kH264, FrameResolution::k480p, device_name, 0) < 0) { + if (mDecoder->init((android::socket::FrameResolution)mDecoderResolution, mCodecType, + device_name, 0) < 0) { ALOGE("%s VideoDecoder init failed. %s decoding", __func__, !device_name ? "SW" : device_name); } else { + mDecoderInitDone = true; ALOGI("%s VideoDecoder init done. Device: %s", __func__, !device_name ? "SW" : device_name); } +#endif } + else + mDecoderInitDone = true; ALOGI("%s Calling sendCommandToClient", __func__); status_t ret; - if ((ret = sendCommandToClient(socket::CameraOperation::kOpen)) != OK) { + if ((ret = sendCommandToClient(camera_cmd_t::CMD_OPEN)) != OK) { ALOGE("%s sendCommandToClient failed", __func__); return ret; } ALOGI("%s Called sendCommandToClient", __func__); - mCameraSessionState = socket::CameraSessionState::kCameraOpened; - + mCameraSessionState = CameraSessionState::kCameraOpened; + mSrcWidth = gCameraMaxWidth; + mSrcHeight = gCameraMaxHeight; // create sensor who gets decoded frames and forwards them to framework - mSensor = new Sensor(mSensorWidth, mSensorHeight, mDecoder); +#ifdef ENABLE_FFMPEG + mSensor = new Sensor(mSrcWidth, mSrcHeight, mDecoder); +#else + mSensor = new Sensor(mSrcWidth, mSrcHeight); +#endif mSensor->setSensorListener(this); status_t res = mSensor->startUp(); @@ -236,7 +310,7 @@ status_t VirtualFakeCamera3::connectCamera(hw_device_t **device) { mAeCurrentExposureTime = kNormalExposureTime; mAeCurrentSensitivity = kNormalSensitivity; - return VirtualCamera3::connectCamera(device); + return OK; } /** @@ -260,9 +334,12 @@ status_t VirtualFakeCamera3::closeCamera() { ALOGE(LOG_TAG " %s: wait:..", __FUNCTION__); std::this_thread::sleep_for(2500ms); } - mprocessCaptureRequestFlag = false; + if (mSensor == NULL) { + return VirtualCamera3::closeCamera(); + } + { Mutex::Autolock l(mLock); if (mStatus == STATUS_CLOSED) return OK; @@ -270,7 +347,6 @@ status_t VirtualFakeCamera3::closeCamera() { auto ret = mSensor->shutDown(); if (ret != NO_ERROR) { ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, ret); - return ret; } mSensor.clear(); @@ -293,44 +369,53 @@ status_t VirtualFakeCamera3::closeCamera() { ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); handle->reset(); - ALOGI("%s VideoBuffers are reset", __func__); - - // Set state to CameraClosed, so that SocketServerThread stops decoding. - mCameraSessionState = socket::CameraSessionState::kCameraClosed; + ALOGI("%s: Camera input buffers are reset", __func__); if (gIsInFrameH264) { - int waitForCameraClose = 0; - while (mCameraSessionState != socket::CameraSessionState::kDecodingStopped) { - std::this_thread::sleep_for(2ms); - waitForCameraClose += 2; // 2 corresponds to 2ms - if (waitForCameraClose == MAX_TIMEOUT_FOR_CAMERA_CLOSE_SESSION) - break; - } + // Set state to CameraClosed, so that SocketServerThread stops decoding. + mCameraSessionState = socket::CameraSessionState::kCameraClosed; +#ifdef ENABLE_FFMPEG + mDecoder->flush_decoder(); + mDecoder->destroy(); +#endif ALOGI("%s Decoding is stopped, now send CLOSE command to client", __func__); } // Send close command to client - status_t ret = sendCommandToClient(socket::CameraOperation::kClose); + status_t ret = sendCommandToClient(camera_cmd_t::CMD_CLOSE); if (ret != OK) { ALOGE("%s sendCommandToClient failed", __func__); - return ret; } + // Set NULL or Zero to some local members which would be updated in the + // next configure_streams call to support Dynamic multi-resolution. + mSrcWidth = 0; + mSrcHeight = 0; + mDecoderResolution = 0; + mDecoderInitDone = false; + mSensor = NULL; + mReadoutThread = NULL; + mJpegCompressor = NULL; + mSocketServer->size_update = 0; return VirtualCamera3::closeCamera(); } status_t VirtualFakeCamera3::getCameraInfo(struct camera_info *info) { - info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT; - info->orientation = gVirtualCameraFactory.getFakeCameraOrientation(); return VirtualCamera3::getCameraInfo(info); } +status_t VirtualFakeCamera3::setTorchMode(const char* camera_id, bool enable){ + return VirtualCamera3::setTorchMode(camera_id,enable); +} + + /** * Camera3 interface methods */ status_t VirtualFakeCamera3::configureStreams(camera3_stream_configuration *streamList) { Mutex::Autolock l(mLock); + status_t res; if (mStatus != STATUS_OPEN && mStatus != STATUS_READY) { ALOGE("%s: Cannot configure streams in state %d", __FUNCTION__, mStatus); @@ -368,9 +453,9 @@ status_t VirtualFakeCamera3::configureStreams(camera3_stream_configuration *stre ALOGI( " %s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x " - "width %d, height %d", + "width %d, height %d, rotation %d", __FUNCTION__, newStream, i, newStream->stream_type, newStream->usage, newStream->format, - newStream->width, newStream->height); + newStream->width, newStream->height, newStream->rotation); if (newStream->stream_type == CAMERA3_STREAM_INPUT || newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) { @@ -399,8 +484,9 @@ status_t VirtualFakeCamera3::configureStreams(camera3_stream_configuration *stre } bool validFormat = false; - for (size_t f = 0; f < sizeof(kAvailableFormats) / sizeof(kAvailableFormats[0]); f++) { - if (newStream->format == kAvailableFormats[f]) { + for (size_t f = 0; f < sizeof(kHalSupportedFormats) / sizeof(kHalSupportedFormats[0]); + f++) { + if (newStream->format == kHalSupportedFormats[f]) { validFormat = true; break; } @@ -409,14 +495,26 @@ status_t VirtualFakeCamera3::configureStreams(camera3_stream_configuration *stre ALOGE("%s: Unsupported stream format 0x%x requested", __FUNCTION__, newStream->format); return BAD_VALUE; } + + if (mSrcWidth < newStream->width && mSrcHeight < newStream->height) { + // Update app's res request to local variable. + mSrcWidth = newStream->width; + mSrcHeight = newStream->height; + // Update globally for clearing used buffers properly. + gSrcWidth = mSrcWidth; + gSrcHeight = mSrcHeight; + } } mInputStream = inputStream; + ALOGI("%s: Camera current input resolution is %dx%d", __FUNCTION__, mSrcWidth, mSrcHeight); + /** * Initially mark all existing streams as not alive */ for (StreamIterator s = mStreams.begin(); s != mStreams.end(); ++s) { PrivateStreamInfo *privStream = static_cast((*s)->priv); + if(privStream != NULL) privStream->alive = false; } @@ -448,7 +546,11 @@ status_t VirtualFakeCamera3::configureStreams(camera3_stream_configuration *stre newStream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE; ALOGE("%s: GRALLOC0", __FUNCTION__); #else - ALOGE("%s: GRALLOC1", __FUNCTION__); + newStream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN; + ALOGE("%s: GRALLOC1 GRALLOC_USAGE_SW_WRITE_OFTEN", __FUNCTION__); + //WA: configure usage when requrested for buffer overlay, WA provided during vts run + // cases of configure single stream and flush + // newStream->usage = 0x100; #endif break; case CAMERA3_STREAM_INPUT: @@ -463,11 +565,17 @@ status_t VirtualFakeCamera3::configureStreams(camera3_stream_configuration *stre #ifndef USE_GRALLOC1 if (newStream->usage & GRALLOC_USAGE_HW_CAMERA_WRITE) { #endif - if (newStream->usage & GRALLOC_USAGE_HW_TEXTURE) { + if ((newStream->usage & GRALLOC_USAGE_HW_TEXTURE) || + (newStream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)) { + // Both preview and video capture output format would + // be RGB32 always if it is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED. newStream->format = HAL_PIXEL_FORMAT_RGBA_8888; - } else if (newStream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) { - newStream->format = HAL_PIXEL_FORMAT_YCbCr_420_888; - } else { + } + //TODO: present in old VHAL, need to check video usecase + //else if (newStream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) { + // newStream->format = HAL_PIXEL_FORMAT_YCbCr_420_888; + //} + else { newStream->format = HAL_PIXEL_FORMAT_RGB_888; } #ifndef USE_GRALLOC1 @@ -494,6 +602,23 @@ status_t VirtualFakeCamera3::configureStreams(camera3_stream_configuration *stre * Can't reuse settings across configure call */ mPrevSettings.clear(); + + /** + * Initialize Camera sensor and Input decoder based on app's res request. + */ + if (!mDecoderInitDone) { + ALOGI("%s: Initializing decoder and sensor for new resolution request!!!", __func__); + res = connectCamera(); + if (res != OK) { + return res; + } + + // Fill the input buffer with black frame to avoid green frame + // while changing the resolution in each request. + ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); + handle->clearBuffer(); + } + return OK; } @@ -833,10 +958,9 @@ const camera_metadata_t *VirtualFakeCamera3::constructDefaultRequestSettings(int } status_t VirtualFakeCamera3::processCaptureRequest(camera3_capture_request *request) { + ALOGVV("%s: E", __FUNCTION__); Mutex::Autolock l(mLock); status_t res; - status_t ret; - uint64_t useflag = 0; mprocessCaptureRequestFlag = true; /** Validation */ @@ -850,6 +974,9 @@ status_t VirtualFakeCamera3::processCaptureRequest(camera3_capture_request *requ return BAD_VALUE; } + ALOGVV("%s: Number of requested buffers = %u, Frame no: %u", __FUNCTION__, + request->num_output_buffers, request->frame_number); + uint32_t frameNumber = request->frame_number; if (request->settings == NULL && mPrevSettings.isEmpty()) { @@ -968,35 +1095,41 @@ status_t VirtualFakeCamera3::processCaptureRequest(camera3_capture_request *requ for (size_t i = 0; i < request->num_output_buffers; i++) { const camera3_stream_buffer &srcBuf = request->output_buffers[i]; StreamBuffer destBuf; + destBuf.streamId = kGenericStreamId; destBuf.width = srcBuf.stream->width; destBuf.height = srcBuf.stream->height; + destBuf.stride = srcBuf.stream->width; + destBuf.dataSpace = srcBuf.stream->data_space; + destBuf.buffer = srcBuf.buffer; + // Set this first to get rid of klocwork warnings. + // It would be overwritten again if it is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED destBuf.format = (srcBuf.stream->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) ? HAL_PIXEL_FORMAT_RGBA_8888 : srcBuf.stream->format; - // Fix ME (dest buffer fixed for 640x480) - // destBuf.width = 640; - // destBuf.height = 480; - // inline with goldfish gralloc + if (srcBuf.stream->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) { #ifndef USE_GRALLOC1 if (srcBuf.stream->usage & GRALLOC_USAGE_HW_CAMERA_WRITE) { #endif - if (srcBuf.stream->usage & GRALLOC_USAGE_HW_TEXTURE) { + if ((srcBuf.stream->usage & GRALLOC_USAGE_HW_TEXTURE) || + (srcBuf.stream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)) { + // Both preview and video capture output format would + // be RGB32 always if it is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED. destBuf.format = HAL_PIXEL_FORMAT_RGBA_8888; - } else if (srcBuf.stream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) { - destBuf.format = HAL_PIXEL_FORMAT_YCbCr_420_888; - } else if ((srcBuf.stream->usage & GRALLOC_USAGE_HW_CAMERA_MASK) == + //TODO: present in old VHAL + // } else if (srcBuf.stream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) { + // destBuf.format = HAL_PIXEL_FORMAT_YCbCr_420_888; + //} + }else if ((srcBuf.stream->usage & GRALLOC_USAGE_HW_CAMERA_MASK) == GRALLOC_USAGE_HW_CAMERA_ZSL) { + // Note: Currently no support for ZSL mode destBuf.format = HAL_PIXEL_FORMAT_RGB_888; } #ifndef USE_GRALLOC1 } #endif } - destBuf.stride = srcBuf.stream->width; - destBuf.dataSpace = srcBuf.stream->data_space; - destBuf.buffer = srcBuf.buffer; if (destBuf.format == HAL_PIXEL_FORMAT_BLOB) { needJpeg = true; @@ -1011,10 +1144,24 @@ status_t VirtualFakeCamera3::processCaptureRequest(camera3_capture_request *requ } if (res == OK) { // Lock buffer for writing - if (srcBuf.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) { - if (destBuf.format == HAL_PIXEL_FORMAT_YCbCr_420_888) { + if (srcBuf.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888 || + srcBuf.stream->format == HAL_PIXEL_FORMAT_YCrCb_420_SP) { + if (destBuf.format == HAL_PIXEL_FORMAT_YCbCr_420_888 || + destBuf.format == HAL_PIXEL_FORMAT_YCrCb_420_SP) { android_ycbcr ycbcr = android_ycbcr(); + bufferHandle2 = native_handle_clone(*(destBuf.buffer)); +#ifdef GRALLOC_MAPPER4 + res = GrallocModule::getInstance().importBuffer(bufferHandle2, &bufferHandle1); + //res = GrallocModule::getInstance().importBuffer(*(destBuf.buffer), &bufferHandle1); + if (res!= OK) { + ALOGV("%s: Gralloc importBuffer failed",__FUNCTION__); + } + res = GrallocModule::getInstance().lock_ycbcr(bufferHandle2, + //res = GrallocModule::getInstance().lock_ycbcr(bufferHandle1, +#else res = GrallocModule::getInstance().lock_ycbcr(*(destBuf.buffer), +#endif + #ifdef USE_GRALLOC1 GRALLOC1_PRODUCER_USAGE_CPU_WRITE, #else @@ -1022,15 +1169,26 @@ status_t VirtualFakeCamera3::processCaptureRequest(camera3_capture_request *requ #endif 0, 0, destBuf.width, destBuf.height, &ycbcr); - // This is only valid because we know that emulator's - // YCbCr_420_888 is really contiguous NV21 under the hood destBuf.img = static_cast(ycbcr.y); } else { ALOGE("Unexpected private format for flexible YUV: 0x%x", destBuf.format); res = INVALID_OPERATION; } } else { +#ifdef GRALLOC_MAPPER4 + bufferHandle_3 = native_handle_clone(*(destBuf.buffer)); + res = GrallocModule::getInstance().importBuffer(bufferHandle_3, &bufferHandle); + //res = GrallocModule::getInstance().importBuffer(*(destBuf.buffer), &bufferHandle); + if (res!= OK) { + ALOGV("%s: Gralloc importBuffer failed",__FUNCTION__); + } + + res = GrallocModule::getInstance().lock(bufferHandle_3, + //res = GrallocModule::getInstance().lock(bufferHandle, +#else res = GrallocModule::getInstance().lock(*(destBuf.buffer), +#endif + #ifdef USE_GRALLOC1 GRALLOC1_PRODUCER_USAGE_CPU_WRITE, #else @@ -1062,8 +1220,23 @@ status_t VirtualFakeCamera3::processCaptureRequest(camera3_capture_request *requ sensorBuffers->push_back(destBuf); buffers->push_back(srcBuf); +#ifdef GRALLOC_MAPPER4 + if (srcBuf.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) + { + GrallocModule::getInstance().unlock(bufferHandle2); + native_handle_close(bufferHandle2); + //GrallocModule::getInstance().release_handle(bufferHandle1); + //GrallocModule::getInstance().unlock(bufferHandle1); + } + else + { + GrallocModule::getInstance().unlock(bufferHandle_3); + native_handle_close(bufferHandle_3); + //GrallocModule::getInstance().release_handle(bufferHandle); + // GrallocModule::getInstance().unlock(bufferHandle); + } +#endif } - /** * Wait for JPEG compressor to not be busy, if needed */ @@ -1212,29 +1385,46 @@ bool VirtualFakeCamera3::hasCapability(AvailableCapabilities cap) { return idx >= 0; } +void VirtualFakeCamera3::setCameraFacingInfo() { + // Updating facing info based on client request. + mFacingBack = gCameraFacingBack; + ALOGI("%s: Camera ID %d is set as %s facing", __func__, mCameraID, + mFacingBack ? "Back" : "Front"); +} + +void VirtualFakeCamera3::setInputCodecType() { + mCodecType = gCodecType; + ALOGI("%s: Selected %s Codec_type for Camera %d", __func__, codec_type_to_str(mCodecType), + mCameraID); +} + +void VirtualFakeCamera3::setMaxSupportedResolution() { + // Updating max sensor supported resolution based on client camera. + // This would be used in sensor related operations and metadata info. + mSensorWidth = gCameraMaxWidth; + mSensorHeight = gCameraMaxHeight; + ALOGI("%s: Maximum supported Resolution of Camera %d: %dx%d", __func__, mCameraID, mSensorWidth, + mSensorHeight); +} + status_t VirtualFakeCamera3::constructStaticInfo() { CameraMetadata info; Vector availableCharacteristicsKeys; status_t res; - - // Find max width/height int32_t width = 0, height = 0; - size_t rawSizeCount = sizeof(kAvailableRawSizes) / sizeof(kAvailableRawSizes[0]); - for (size_t index = 0; index + 1 < rawSizeCount; index += 2) { - if (width <= (int32_t)kAvailableRawSizes[index] && - height <= (int32_t)kAvailableRawSizes[index + 1]) { - width = kAvailableRawSizes[index]; - height = kAvailableRawSizes[index + 1]; - } - } - if (width < 1280 || height < 720) { - width = 640; - height = 480; - } - mSensorWidth = width; - mSensorHeight = height; - ALOGE("%s: [width:height] [%d:%d]", __func__, mSensorWidth, mSensorHeight); + ALOGVV("%s: Updating metadata for Camera %d", __func__, mCameraID); + + // Setting the max supported Camera resolution. + setMaxSupportedResolution(); + // set codec type of the input frame. + setInputCodecType(); + // Set camera facing info. + setCameraFacingInfo(); + + // Updating width and height based on capability info. + width = mSensorWidth; + height = mSensorHeight; #define ADD_STATIC_ENTRY(name, varptr, count) \ availableCharacteristicsKeys.add(name); \ @@ -1263,21 +1453,12 @@ status_t VirtualFakeCamera3::constructStaticInfo() { static const float sensorPhysicalSize[2] = {3.20f, 2.40f}; // mm ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, sensorPhysicalSize, 2); - const int32_t pixelArray[] = {mSensorWidth, mSensorHeight}; + int32_t pixelArray[] = {mSensorWidth, mSensorHeight}; ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, pixelArray, 2); - const int32_t activeArray[] = {0, 0, mSensorWidth, mSensorHeight}; + int32_t activeArray[] = {0, 0, mSensorWidth, mSensorHeight}; ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, activeArray, 4); - char mode[PROPERTY_VALUE_MAX]; - static int32_t orientation = 0; - if ((property_get("persist.remote.camera.orientation", mode, nullptr) > 0) && - (!strcmp(mode, "portrait"))) { - ALOGV("persist.remote.camera.orientation: portrait"); - orientation = 270; - } else { - ALOGV("persist.remote.camera.orientation: landscape"); - orientation = 0; - } + int32_t orientation = gCameraSensorOrientation; ADD_STATIC_ENTRY(ANDROID_SENSOR_ORIENTATION, &orientation, 1); static const uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME; @@ -1310,11 +1491,11 @@ status_t VirtualFakeCamera3::constructStaticInfo() { if (hasCapability(BACKWARD_COMPATIBLE)) { // 5 cm min focus distance for back camera, infinity (fixed focus) for front - const float minFocusDistance = mFacingBack ? 1.0 / 0.05 : 0.0; + float minFocusDistance = mFacingBack ? 1.0 / 0.05 : 0.0; ADD_STATIC_ENTRY(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, &minFocusDistance, 1); // 5 m hyperfocal distance for back camera, infinity (fixed focus) for front - const float hyperFocalDistance = mFacingBack ? 1.0 / 5.0 : 0.0; + float hyperFocalDistance = mFacingBack ? 1.0 / 5.0 : 0.0; ADD_STATIC_ENTRY(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, &hyperFocalDistance, 1); static const float apertures = 2.8f; @@ -1384,7 +1565,7 @@ status_t VirtualFakeCamera3::constructStaticInfo() { sizeof(lensRadialDistortion) / sizeof(float)); } - const uint8_t lensFacing = mFacingBack ? ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT; + uint8_t lensFacing = mFacingBack ? ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT; ADD_STATIC_ENTRY(ANDROID_LENS_FACING, &lensFacing, 1); // android.flash @@ -1416,43 +1597,41 @@ status_t VirtualFakeCamera3::constructStaticInfo() { // android.scaler - const std::vector availableStreamConfigurationsBasic = { + const std::vector availableStreamConfigurationsDefault = { HAL_PIXEL_FORMAT_BLOB, width, height, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, + }; + + const std::vector availableStreamConfigurations1080p = { HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - 320, - 240, - ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, - HAL_PIXEL_FORMAT_YCbCr_420_888, - 320, - 240, - ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, - HAL_PIXEL_FORMAT_BLOB, - 320, - 240, + 1280, + 720, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, - HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - 176, - 144, + HAL_PIXEL_FORMAT_YCrCb_420_SP, + 1280, + 720, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, HAL_PIXEL_FORMAT_YCbCr_420_888, - 176, - 144, + 1280, + 720, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, HAL_PIXEL_FORMAT_BLOB, - 176, - 144, + 1280, + 720, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, }; - // Always need to include 640x480 in basic formats - const std::vector availableStreamConfigurationsBasic640 = { + const std::vector availableStreamConfigurations720p = { HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 640, 480, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, + HAL_PIXEL_FORMAT_YCrCb_420_SP, + 640, + 480, + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, HAL_PIXEL_FORMAT_YCbCr_420_888, 640, 480, @@ -1460,7 +1639,27 @@ status_t VirtualFakeCamera3::constructStaticInfo() { HAL_PIXEL_FORMAT_BLOB, 640, 480, - ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT}; + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, + }; + + const std::vector availableStreamConfigurations480p = { + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, + 320, + 240, + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, + HAL_PIXEL_FORMAT_YCrCb_420_SP, + 320, + 240, + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, + HAL_PIXEL_FORMAT_YCbCr_420_888, + 320, + 240, + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, + HAL_PIXEL_FORMAT_BLOB, + 320, + 240, + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT, + }; const std::vector availableStreamConfigurationsRaw = { HAL_PIXEL_FORMAT_RAW16, @@ -1487,13 +1686,43 @@ status_t VirtualFakeCamera3::constructStaticInfo() { std::vector availableStreamConfigurations; if (hasCapability(BACKWARD_COMPATIBLE)) { - availableStreamConfigurations.insert(availableStreamConfigurations.end(), - availableStreamConfigurationsBasic.begin(), - availableStreamConfigurationsBasic.end()); - if (width > 640) { + if (width == 1920 && height == 1080) { availableStreamConfigurations.insert(availableStreamConfigurations.end(), - availableStreamConfigurationsBasic640.begin(), - availableStreamConfigurationsBasic640.end()); + availableStreamConfigurationsDefault.begin(), + availableStreamConfigurationsDefault.end()); + + availableStreamConfigurations.insert(availableStreamConfigurations.end(), + availableStreamConfigurations1080p.begin(), + availableStreamConfigurations1080p.end()); + + availableStreamConfigurations.insert(availableStreamConfigurations.end(), + availableStreamConfigurations720p.begin(), + availableStreamConfigurations720p.end()); + + availableStreamConfigurations.insert(availableStreamConfigurations.end(), + availableStreamConfigurations480p.begin(), + availableStreamConfigurations480p.end()); + + } else if (width == 1280 && height == 720) { + availableStreamConfigurations.insert(availableStreamConfigurations.end(), + availableStreamConfigurationsDefault.begin(), + availableStreamConfigurationsDefault.end()); + + availableStreamConfigurations.insert(availableStreamConfigurations.end(), + availableStreamConfigurations720p.begin(), + availableStreamConfigurations720p.end()); + + availableStreamConfigurations.insert(availableStreamConfigurations.end(), + availableStreamConfigurations480p.begin(), + availableStreamConfigurations480p.end()); + } else { // For 480p + availableStreamConfigurations.insert(availableStreamConfigurations.end(), + availableStreamConfigurationsDefault.begin(), + availableStreamConfigurationsDefault.end()); + + availableStreamConfigurations.insert(availableStreamConfigurations.end(), + availableStreamConfigurations480p.begin(), + availableStreamConfigurations480p.end()); } } if (hasCapability(RAW)) { @@ -1512,43 +1741,41 @@ status_t VirtualFakeCamera3::constructStaticInfo() { &availableStreamConfigurations[0], availableStreamConfigurations.size()); } - const std::vector availableMinFrameDurationsBasic = { + const std::vector availableMinFrameDurationsDefault = { HAL_PIXEL_FORMAT_BLOB, width, height, Sensor::kFrameDurationRange[0], + }; + + const std::vector availableMinFrameDurations1080p = { HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - 320, - 240, - Sensor::kFrameDurationRange[0], - HAL_PIXEL_FORMAT_YCbCr_420_888, - 320, - 240, - Sensor::kFrameDurationRange[0], - HAL_PIXEL_FORMAT_BLOB, - 320, - 240, + 1280, + 720, Sensor::kFrameDurationRange[0], - HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - 176, - 144, + HAL_PIXEL_FORMAT_YCrCb_420_SP, + 1280, + 720, Sensor::kFrameDurationRange[0], HAL_PIXEL_FORMAT_YCbCr_420_888, - 176, - 144, + 1280, + 720, Sensor::kFrameDurationRange[0], HAL_PIXEL_FORMAT_BLOB, - 176, - 144, + 1280, + 720, Sensor::kFrameDurationRange[0], }; - // Always need to include 640x480 in basic formats - const std::vector availableMinFrameDurationsBasic640 = { + const std::vector availableMinFrameDurations720p = { HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 640, 480, Sensor::kFrameDurationRange[0], + HAL_PIXEL_FORMAT_YCrCb_420_SP, + 640, + 480, + Sensor::kFrameDurationRange[0], HAL_PIXEL_FORMAT_YCbCr_420_888, 640, 480, @@ -1556,7 +1783,27 @@ status_t VirtualFakeCamera3::constructStaticInfo() { HAL_PIXEL_FORMAT_BLOB, 640, 480, - Sensor::kFrameDurationRange[0]}; + Sensor::kFrameDurationRange[0], + }; + + const std::vector availableMinFrameDurations480p = { + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, + 320, + 240, + Sensor::kFrameDurationRange[0], + HAL_PIXEL_FORMAT_YCrCb_420_SP, + 320, + 240, + Sensor::kFrameDurationRange[0], + HAL_PIXEL_FORMAT_YCbCr_420_888, + 320, + 240, + Sensor::kFrameDurationRange[0], + HAL_PIXEL_FORMAT_BLOB, + 320, + 240, + Sensor::kFrameDurationRange[0], + }; const std::vector availableMinFrameDurationsRaw = { HAL_PIXEL_FORMAT_RAW16, @@ -1583,13 +1830,42 @@ status_t VirtualFakeCamera3::constructStaticInfo() { std::vector availableMinFrameDurations; if (hasCapability(BACKWARD_COMPATIBLE)) { - availableMinFrameDurations.insert(availableMinFrameDurations.end(), - availableMinFrameDurationsBasic.begin(), - availableMinFrameDurationsBasic.end()); - if (width > 640) { + if (width == 1920 && height == 1080) { + availableMinFrameDurations.insert(availableMinFrameDurations.end(), + availableMinFrameDurationsDefault.begin(), + availableMinFrameDurationsDefault.end()); + + availableMinFrameDurations.insert(availableMinFrameDurations.end(), + availableMinFrameDurations1080p.begin(), + availableMinFrameDurations1080p.end()); + + availableMinFrameDurations.insert(availableMinFrameDurations.end(), + availableMinFrameDurations720p.begin(), + availableMinFrameDurations720p.end()); + + availableMinFrameDurations.insert(availableMinFrameDurations.end(), + availableMinFrameDurations480p.begin(), + availableMinFrameDurations480p.end()); + } else if (width == 1280 && height == 720) { availableMinFrameDurations.insert(availableMinFrameDurations.end(), - availableMinFrameDurationsBasic640.begin(), - availableMinFrameDurationsBasic640.end()); + availableMinFrameDurationsDefault.begin(), + availableMinFrameDurationsDefault.end()); + + availableMinFrameDurations.insert(availableMinFrameDurations.end(), + availableMinFrameDurations720p.begin(), + availableMinFrameDurations720p.end()); + + availableMinFrameDurations.insert(availableMinFrameDurations.end(), + availableMinFrameDurations480p.begin(), + availableMinFrameDurations480p.end()); + } else { // For 480p + availableMinFrameDurations.insert(availableMinFrameDurations.end(), + availableMinFrameDurationsDefault.begin(), + availableMinFrameDurationsDefault.end()); + + availableMinFrameDurations.insert(availableMinFrameDurations.end(), + availableMinFrameDurations480p.begin(), + availableMinFrameDurations480p.end()); } } if (hasCapability(RAW)) { @@ -1608,51 +1884,32 @@ status_t VirtualFakeCamera3::constructStaticInfo() { &availableMinFrameDurations[0], availableMinFrameDurations.size()); } - const std::vector availableStallDurationsBasic = { + const std::vector availableStallDurationsDefault = { HAL_PIXEL_FORMAT_BLOB, width, height, Sensor::kFrameDurationRange[0], - HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - 320, - 240, - 0, - HAL_PIXEL_FORMAT_YCbCr_420_888, - 320, - 240, - 0, - HAL_PIXEL_FORMAT_RGBA_8888, - 320, - 240, - 0, - HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - 176, - 144, - 0, - HAL_PIXEL_FORMAT_YCbCr_420_888, - 176, - 144, - 0, - HAL_PIXEL_FORMAT_RGBA_8888, - 176, - 144, - 0, }; - // Always need to include 640x480 in basic formats - const std::vector availableStallDurationsBasic640 = { - HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - 640, - 480, - 0, - HAL_PIXEL_FORMAT_YCbCr_420_888, - 640, - 480, - 0, + const std::vector availableStallDurations1080p = { + HAL_PIXEL_FORMAT_BLOB, + 1280, + 720, + Sensor::kFrameDurationRange[0], + }; + const std::vector availableStallDurations720p = { HAL_PIXEL_FORMAT_BLOB, 640, 480, - Sensor::kFrameDurationRange[0]}; + Sensor::kFrameDurationRange[0], + }; + + const std::vector availableStallDurations480p = { + HAL_PIXEL_FORMAT_BLOB, + 320, + 240, + Sensor::kFrameDurationRange[0], + }; const std::vector availableStallDurationsRaw = {HAL_PIXEL_FORMAT_RAW16, 640, 480, Sensor::kFrameDurationRange[0]}; @@ -1673,13 +1930,42 @@ status_t VirtualFakeCamera3::constructStaticInfo() { std::vector availableStallDurations; if (hasCapability(BACKWARD_COMPATIBLE)) { - availableStallDurations.insert(availableStallDurations.end(), - availableStallDurationsBasic.begin(), - availableStallDurationsBasic.end()); - if (width > 640) { + if (width == 1920 && height == 1080) { + availableStallDurations.insert(availableStallDurations.end(), + availableStallDurationsDefault.begin(), + availableStallDurationsDefault.end()); + + availableStallDurations.insert(availableStallDurations.end(), + availableStallDurations1080p.begin(), + availableStallDurations1080p.end()); + availableStallDurations.insert(availableStallDurations.end(), - availableStallDurationsBasic640.begin(), - availableStallDurationsBasic640.end()); + availableStallDurations720p.begin(), + availableStallDurations720p.end()); + + availableStallDurations.insert(availableStallDurations.end(), + availableStallDurations480p.begin(), + availableStallDurations480p.end()); + } else if (width == 1280 && height == 720) { + availableStallDurations.insert(availableStallDurations.end(), + availableStallDurationsDefault.begin(), + availableStallDurationsDefault.end()); + + availableStallDurations.insert(availableStallDurations.end(), + availableStallDurations720p.begin(), + availableStallDurations720p.end()); + + availableStallDurations.insert(availableStallDurations.end(), + availableStallDurations480p.begin(), + availableStallDurations480p.end()); + } else { // For 480p + availableStallDurations.insert(availableStallDurations.end(), + availableStallDurationsDefault.begin(), + availableStallDurationsDefault.end()); + + availableStallDurations.insert(availableStallDurations.end(), + availableStallDurations480p.begin(), + availableStallDurations480p.end()); } } if (hasCapability(RAW)) { @@ -1865,10 +2151,11 @@ status_t VirtualFakeCamera3::constructStaticInfo() { } // android.info - - const uint8_t supportedHardwareLevel = hasCapability(FULL_LEVEL) - ? ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL - : ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED; + //WA during vts case execution for burst mode, setting limited hardware level + const uint8_t supportedHardwareLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED; + //const uint8_t supportedHardwareLevel = hasCapability(FULL_LEVEL) + // ? ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL + // : ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED; ADD_STATIC_ENTRY(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, &supportedHardwareLevel, /*count*/ 1); @@ -2697,8 +2984,9 @@ bool VirtualFakeCamera3::ReadoutThread::threadLoop() { res); // fallthrough for cleanup } +#ifndef GRALLOC_MAPPER4 GrallocModule::getInstance().unlock(*(buf->buffer)); - +#endif buf->status = goodBuffer ? CAMERA3_BUFFER_STATUS_OK : CAMERA3_BUFFER_STATUS_ERROR; buf->acquire_fence = -1; buf->release_fence = -1; @@ -2788,9 +3076,9 @@ bool VirtualFakeCamera3::ReadoutThread::threadLoop() { void VirtualFakeCamera3::ReadoutThread::onJpegDone(const StreamBuffer &jpegBuffer, bool success) { Mutex::Autolock jl(mJpegLock); - +#ifndef GRALLOC_MAPPER4 GrallocModule::getInstance().unlock(*(jpegBuffer.buffer)); - +#endif mJpegHalBuffer.status = success ? CAMERA3_BUFFER_STATUS_OK : CAMERA3_BUFFER_STATUS_ERROR; mJpegHalBuffer.acquire_fence = -1; mJpegHalBuffer.release_fence = -1; diff --git a/src/fake-pipeline2/Sensor.cpp b/src/fake-pipeline2/Sensor.cpp index 068af5c..a364cbe 100644 --- a/src/fake-pipeline2/Sensor.cpp +++ b/src/fake-pipeline2/Sensor.cpp @@ -25,7 +25,9 @@ #endif #include "fake-pipeline2/Sensor.h" +#ifdef ENABLE_FFMPEG #include "CGCodec.h" +#endif #include #include #include @@ -47,7 +49,7 @@ using namespace std::string_literals; ({ \ size_t rc = 0; \ char filename[64] = {'\0'}; \ - snprintf(filename, sizeof(filename), "/ipc/vHAL_RGBA_%d", dump_index); \ + snprintf(filename, sizeof(filename), "/data/vHAL_RGBA_%d", dump_index); \ FILE *fp = fopen(filename, "w+"); \ if (fp) { \ rc = fwrite(p_addr1, 1, len1, fp); \ @@ -111,16 +113,28 @@ float sqrtf_approx(float r) { return *(float *)(&r_i); } - +#ifdef ENABLE_FFMPEG Sensor::Sensor(uint32_t width, uint32_t height, std::shared_ptr decoder) +#else +Sensor::Sensor(uint32_t width, uint32_t height) +#endif : Thread(false), mResolution{width, height}, mActiveArray{0, 0, width, height}, mRowReadoutTime(kFrameDurationRange[0] / height), mExposureTime(kFrameDurationRange[0] - kMinVerticalBlank), mFrameDuration(kFrameDurationRange[0]), - mScene(width, height, kElectronsPerLuxSecond), - mDecoder{decoder} {} + mScene(width, height, kElectronsPerLuxSecond) +#ifdef ENABLE_FFMPEG + ,mDecoder{decoder} +#endif +{ + // Max supported resolution of the camera sensor. + // It is based on client camera capability info. + mSrcWidth = width; + mSrcHeight = height; + mSrcFrameSize = mSrcWidth * mSrcHeight * BPP_NV12; +} Sensor::~Sensor() { shutDown(); } @@ -234,6 +248,73 @@ status_t Sensor::readyToRun() { return OK; } +//#define CROP_ROTATE +#ifdef CROP_ROTATE +void bufferCropAndRotate(unsigned char * buff, unsigned char * buff_out){ +// +// Original frame Cropped frame Rotated frame Upscale frame +// -------------------- -------- -------------------- +// | | | | | | --------------- | | | | +// | | | | | | | | | | | | +// | | | | =======>> | | =======>> | | =======>> | | | | +// | | | | | | --------------- | | | | +// | | | | | | | | | | +// -------------------- -------- -------------------- +// 640x480 360x480 480x360 640x480 + ALOGI("bufferCropAndRotate"); + std::unique_ptr cropped_buffer; + + int cropped_width = 360; + int cropped_height = 480; + int margin = (640-360)/2; //140 + + int rotated_height = cropped_width; + int rotated_width = cropped_height; + + int rotated_y_stride = rotated_width; + int rotated_uv_stride = rotated_width / 2; + + size_t rotated_size = + rotated_y_stride * rotated_height + rotated_uv_stride * rotated_height; + cropped_buffer.reset(new uint8_t[rotated_size]); + uint8_t* rotated_y_plane = cropped_buffer.get(); + uint8_t* rotated_u_plane = + rotated_y_plane + rotated_y_stride * rotated_height; + uint8_t* rotated_v_plane = + rotated_u_plane + rotated_uv_stride * rotated_height / 2; + //libyuv::RotationMode rotation_mode = libyuv::RotationMode::kRotate90; + libyuv::RotationMode rotation_mode = libyuv::RotationMode::kRotate270; + + int res = libyuv::ConvertToI420( + buff, 640*480*3/2, rotated_y_plane, + rotated_y_stride, rotated_u_plane, rotated_uv_stride, rotated_v_plane, + rotated_uv_stride, margin, 0, 640, + 480, cropped_width, cropped_height, rotation_mode, + libyuv::FourCC::FOURCC_I420); + + if(res){ + ALOGE("critical ConvertToI420 res:%d ", res); + return; + } + + res = libyuv::I420Scale( + rotated_y_plane, rotated_y_stride, rotated_u_plane, rotated_uv_stride, + rotated_v_plane, rotated_uv_stride, rotated_width, rotated_height, + buff_out, 640, + buff_out + 640*480, + 640 / 2, + buff_out + 640*480*5/4, + 640/2, 640, + 480, libyuv::FilterMode::kFilterNone); + + if(res){ + ALOGE("critical I420Scale res:%d ", res); + } + +} +char buffer_recv[640*480*3/2]; +#endif + bool Sensor::threadLoop() { /** * Sensor capture operation main loop. @@ -250,7 +331,6 @@ bool Sensor::threadLoop() { uint32_t gain; Buffers *nextBuffers; uint32_t frameNumber; - bool needJpeg = false; ALOGVV("Sensor Thread stage E :1"); SensorListener *listener = nullptr; { @@ -329,6 +409,10 @@ bool Sensor::threadLoop() { ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); handle->clientBuf[handle->clientRevCount % 1].decoded = false; + #ifdef CROP_ROTATE + char *fbuffer = (char *)handle->clientBuf[handle->clientRevCount % 1].buffer; + bufferCropAndRotate((uint8_t*)fbuffer, (uint8_t*)buffer_recv); + #endif // Might be adding more buffers, so size isn't constant for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) { @@ -356,24 +440,20 @@ bool Sensor::threadLoop() { bAux.streamId = 0; bAux.width = b.width; bAux.height = b.height; - bAux.format = - HAL_PIXEL_FORMAT_YCbCr_420_888; + bAux.format = HAL_PIXEL_FORMAT_YCrCb_420_SP; bAux.stride = b.width; bAux.buffer = nullptr; bAux.img = new uint8_t[b.width * b.height * 3]; - needJpeg = true; mNextCapturedBuffers->push_back(bAux); } else { captureDepthCloud(b.img); } break; + case HAL_PIXEL_FORMAT_YCrCb_420_SP: + captureNV21(b.img, gain, b.width, b.height); + break; case HAL_PIXEL_FORMAT_YCbCr_420_888: - if (!needJpeg) { - captureNV12(b.img, gain, b.width, b.height); - } else { - needJpeg = false; - captureJPEG(b.img, gain, b.width, b.height); - } + captureNV12(b.img, gain, b.width, b.height); break; case HAL_PIXEL_FORMAT_YV12: // TODO: @@ -408,9 +488,8 @@ bool Sensor::threadLoop() { } ALOGVV("Sensor Thread stage X :4"); - ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); - ALOGVV("Frame #%zu cycle took %d ms, target %d ms", handle->decodedFrameNo, - (int)(workDoneRealTime - startRealTime) / 1000000, (int)(frameDuration / 1000000)); + ALOGVV("Frame No: %d took %d ms, target %d ms", frameNumber, + (int)(workDoneRealTime - startRealTime) / 1000000, (int)(frameDuration / 1000000)); return true; }; @@ -458,7 +537,11 @@ void Sensor::dump_yuv(uint8_t *img1, size_t img1_size, uint8_t *img2, size_t img const std::string &filename) { static size_t count = 0; ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); + #ifdef CROP_ROTATE + uint8_t *bufData = (uint8_t *)buffer_recv; + #else uint8_t *bufData = handle->clientBuf[handle->clientRevCount % 1].buffer; + #endif if (++count == 120) return; if (filename.empty()) { @@ -474,8 +557,8 @@ void Sensor::dump_yuv(uint8_t *img1, size_t img1_size, uint8_t *img2, size_t img fwrite(img2, img2_size, 1, f); fclose(f); } - -bool Sensor::getNV12Frames(uint8_t *out_buf, int *out_size, +#ifdef ENABLE_FFMPEG +bool Sensor::getNV12Frames(uint8_t *input_buf, int *camera_input_size, std::chrono::milliseconds timeout_ms /* default 5ms */) { auto cg_video_frame = std::make_shared(); size_t retry_count = 0; @@ -495,7 +578,7 @@ bool Sensor::getNV12Frames(uint8_t *out_buf, int *out_size, break; } else if (retry_count++ <= maxRetryCount) { // decoded frames are not ready ALOGVV("%s retry #%zu get_decoded_frame() not ready, lets wait for %zums", __func__, - retry_count, size_t(timeout_ms.count())); + retry_count, size_t(timeout_ms.count())); std::this_thread::sleep_for(timeout_ms); continue; } else if (retry_count > maxRetryCount) { @@ -507,62 +590,56 @@ bool Sensor::getNV12Frames(uint8_t *out_buf, int *out_size, } } while (true); - cg_video_frame->copy_to_buffer(out_buf, out_size); + cg_video_frame->copy_to_buffer(input_buf, camera_input_size); ALOGVV("%s converted to format: %s size: %d \n", __FUNCTION__, - cg_video_frame->format() == NV12 ? "NV12" : "I420", *out_size); + cg_video_frame->format() == NV12 ? "NV12" : "I420", *camera_input_size); ALOGVV("%s decoded buffers are copied", __func__); return true; } - +#endif void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) { ALOGVV("%s: E", __FUNCTION__); auto *handle = ClientVideoBuffer::getClientInstance(); + #ifdef CROP_ROTATE + uint8_t *bufData = (uint8_t *)buffer_recv; + #else uint8_t *bufData = handle->clientBuf[handle->clientRevCount % 1].buffer; - int out_size; + #endif + int cameraInputDataSize; - if (!gIsInFrameI420 && !gIsInFrameH264) { - ALOGE("%s Exit - only H264, I420 input frames supported", __FUNCTION__); + if (!gIsInFrameI420 && !gIsInFrameH264 && !gIsInFrameMJPG) { + ALOGE("%s Exit - only H264, H265, I420 input frames supported", __FUNCTION__); return; } - // TODO:: handle other resolutions as required - if (width == 320 && height == 240) { - destPrevBufSize = FRAME_SIZE_240P; - } else if (width == 640 && height == 480) { - destPrevBufSize = FRAME_SIZE_480P; - } else { - // TODO: adjust default - destPrevBufSize = FRAME_SIZE_480P; - } - - // Initialize to the size based on resolution. - out_size = destPrevBufSize; + // Initialize the input data size based on client camera resolution. + cameraInputDataSize = mSrcFrameSize; +#ifdef ENABLE_FFMPEG if (gIsInFrameH264) { if (handle->clientBuf[handle->clientRevCount % 1].decoded) { // Note: bufData already assigned in the function start - ALOGVV("%s - Already Decoded", __FUNCTION__); - out_size = destPrevBufSize; - } else { - getNV12Frames(bufData, &out_size); + ALOGVV("%s - Already Decoded Camera Input Frame..", __FUNCTION__); + } else { // This is the default condition in all apps. + // To get the decoded frame. + getNV12Frames(bufData, &cameraInputDataSize); handle->clientBuf[handle->clientRevCount % 1].decoded = true; - - ALOGVV("%s - getNV12Framesout_size: %d\n", __func__, out_size); std::unique_lock ulock(client_buf_mutex); handle->decodedFrameNo++; - ALOGVV("%s Decoded frame #[%zd]", __FUNCTION__, handle->decodedFrameNo); + ALOGVV("%s Decoded Camera Input Frame No: %zd with size of %d", __FUNCTION__, + handle->decodedFrameNo, cameraInputDataSize); ulock.unlock(); } } - +#endif int src_size = mSrcWidth * mSrcHeight; int dstFrameSize = width * height; - // For default 640x480 resolution + // For Max supported Resolution. if (width == (uint32_t)mSrcWidth && height == (uint32_t)mSrcHeight) { - if (gIsInFrameI420) { + if (gIsInFrameI420 || gIsInFrameMJPG) { ALOGVV(LOG_TAG " %s: I420, scaling not required: Size = %dx%d", __FUNCTION__, width, height); const uint8_t *src_y = bufData; @@ -592,9 +669,9 @@ void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t width, uint32_t h dst_stride_abgr, width, height)) { } } + // For upscaling and downscaling all other resolutions below max supported resolution. } else { - // For lower resolutions like 320x240 - if (gIsInFrameI420) { + if (gIsInFrameI420 || gIsInFrameMJPG) { ALOGVV(LOG_TAG " %s: I420, need to scale: Size = %dx%d", __FUNCTION__, width, height); int destFrameSize = width * height; @@ -697,7 +774,7 @@ void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t width, uint32_t h } } - ALOGVV(" %s: Done with converion into img[%p]", __FUNCTION__, img); + ALOGVV(" %s: Captured RGB32 image sucessfully..", __FUNCTION__); // Debug point #if 0 @@ -707,7 +784,6 @@ void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t width, uint32_t h DUMP_RGBA(j, img, 1228800); } #endif - ALOGVV(" %s: X", __FUNCTION__); } void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) { @@ -766,56 +842,55 @@ void Sensor::captureNV12(uint8_t *img, uint32_t gain, uint32_t width, uint32_t h ALOGVV(LOG_TAG "%s: E", __FUNCTION__); ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); + //uint8_t *bufData = handle->clientBuf[handle->clientRevCount % 1].buffer; + #ifdef CROP_ROTATE + uint8_t *bufData = (uint8_t *)buffer_recv; + #else uint8_t *bufData = handle->clientBuf[handle->clientRevCount % 1].buffer; + #endif - ALOGVV(LOG_TAG " %s: bufData[%p] img[%p] resolution[%d:%d]", - __func__, bufData, img, width, height); + int cameraInputDataSize; - int src_size = mSrcWidth * mSrcHeight; - int dstFrameSize = width * height; + ALOGVV(LOG_TAG " %s: bufData[%p] img[%p] resolution[%d:%d]", __func__, bufData, img, width, + height); - int out_size; - - if (!gIsInFrameI420 && !gIsInFrameH264) { + if (!gIsInFrameI420 && !gIsInFrameH264 && !gIsInFrameMJPG) { ALOGE("%s Exit - only H264, I420 input frames supported", __FUNCTION__); return; } - // TODO: handle other resolutions as required - if (width == 320 && height == 240) { - mDstBufSize = FRAME_SIZE_240P; - } else if (width == 640 && height == 480) { - mDstBufSize = FRAME_SIZE_480P; - } else { - // TODO: adjust default - mDstBufSize = FRAME_SIZE_480P; - } - - // Initialize to the size based on resolution. - out_size = mDstBufSize; - + // Initialize the input data size based on client camera resolution. + cameraInputDataSize = mSrcFrameSize; +#ifdef ENABLE_FFMPEG if (gIsInFrameH264) { if (handle->clientBuf[handle->clientRevCount % 1].decoded) { - // Note: bufData already assigned in the function start - ALOGVV("%s - Already Decoded", __FUNCTION__); - out_size = mDstBufSize; + // Already decoded camera input as part of preview frame. + // This is the default condition in most of the apps. + ALOGVV("%s - Already Decoded Camera Input frame..", __FUNCTION__); } else { - getNV12Frames(bufData, &out_size); + // To get the decoded frame for the apps which doesn't have RGBA preview. + getNV12Frames(bufData, &cameraInputDataSize); handle->clientBuf[handle->clientRevCount % 1].decoded = true; - ALOGVV("%s - getNV12Framesout_size: %d\n", __func__, out_size); std::unique_lock ulock(client_buf_mutex); handle->decodedFrameNo++; - ALOGVV("%s Decoded frame #[%zd]", __FUNCTION__, handle->decodedFrameNo); + ALOGVV("%s Decoded Camera Input Frame No: %zd with size of %d", __FUNCTION__, + handle->decodedFrameNo, cameraInputDataSize); ulock.unlock(); } } +#endif + int src_size = mSrcWidth * mSrcHeight; + int dstFrameSize = width * height; - // For default resolotion 640x480p + // For Max supported Resolution. if (width == (uint32_t)mSrcWidth && height == (uint32_t)mSrcHeight) { - if (gIsInFrameI420) { + if (gIsInFrameI420 || gIsInFrameMJPG) { // For I420 input support ALOGVV(LOG_TAG " %s: I420 no scaling required Size = %dx%d", __FUNCTION__, width, height); +#if 1 + memcpy(img, bufData, width * height * 1.5); +#else const uint8_t *src_y = bufData; int src_stride_y = mSrcWidth; const uint8_t *src_u = bufData + src_size; @@ -839,15 +914,16 @@ void Sensor::captureNV12(uint8_t *img, uint32_t gain, uint32_t width, uint32_t h dst_stride_uv, width, height)) { } } +#endif } else { // For NV12 Input support. No Color conversion - ALOGVV(LOG_TAG " %s: H264 to NV12 no scaling required: Size = %dx%d, out_size: %d", - __FUNCTION__, width, height, out_size); - memcpy(img, bufData, out_size); + ALOGVV(LOG_TAG " %s: NV12 frame without scaling and color conversion: Size = %dx%d", + __FUNCTION__, width, height); + memcpy(img, bufData, cameraInputDataSize); } + // For upscaling and downscaling all other resolutions below max supported resolution. } else { - // For lower resoltuions like 320x240p - if (gIsInFrameI420) { + if (gIsInFrameI420 || gIsInFrameMJPG) { // For I420 input support ALOGVV(LOG_TAG " %s: I420 with scaling: Size = %dx%d", __FUNCTION__, width, height); @@ -888,7 +964,9 @@ void Sensor::captureNV12(uint8_t *img, uint32_t gain, uint32_t width, uint32_t h uint8_t *dst_uv = dst_y + width * height; int dst_stride_uv = width; - +#if 1 + memcpy(img, mDstBuf.data(), width * height * (1.5)); +#else if (m_major_version == 1) { ALOGVV(LOG_TAG " %s: [SG1] convert I420 to NV12!", __FUNCTION__); if (int ret = libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, @@ -902,9 +980,11 @@ void Sensor::captureNV12(uint8_t *img, uint32_t gain, uint32_t width, uint32_t h dst_stride_uv, width, height)) { } } +#endif } else { // For NV12 Input support - ALOGVV(LOG_TAG " %s: H264 with scaling Size = %dx%d", __FUNCTION__, width, height); + ALOGVV(LOG_TAG " %s: NV12 frame with scaling to Size = %dx%d", __FUNCTION__, width, + height); const uint8_t *src_y = bufData; int src_stride_y = mSrcWidth; @@ -970,231 +1050,227 @@ void Sensor::captureNV12(uint8_t *img, uint32_t gain, uint32_t width, uint32_t h saveNV21(img, width * height * 3); } #endif - ALOGI(LOG_TAG " %s: Captured NV12 Image sucessfully!!! ", __FUNCTION__); + ALOGVV(LOG_TAG " %s: Captured NV12 image sucessfully..", __FUNCTION__); } -void Sensor::captureJPEG(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) { +void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) { ALOGVV("%s: E", __FUNCTION__); ClientVideoBuffer *handle = ClientVideoBuffer::getClientInstance(); + //uint8_t *bufData = handle->clientBuf[handle->clientRevCount % 1].buffer; + // #ifdef CROP_ROTATE + // uint8_t *bufData = (uint8_t *)buffer_recv; + // #else uint8_t *bufData = handle->clientBuf[handle->clientRevCount % 1].buffer; + //#endif int src_size = mSrcWidth * mSrcHeight; int dstFrameSize = width * height; - int out_size; + int cameraInputDataSize; - if (!gIsInFrameI420 && !gIsInFrameH264) { - ALOGE("%s Exit - only H264, I420 input frames supported", __FUNCTION__); - return; - } - - //TODO: handle other resolutions as required - if (width == 320 && height == 240) { - mDstJpegBufSize = FRAME_SIZE_240P; - } else if (width == 640 && height == 480) { - mDstJpegBufSize = FRAME_SIZE_480P; - } else { - //TODO: adjust default - mDstJpegBufSize = FRAME_SIZE_480P; + if (!gIsInFrameI420 && !gIsInFrameH264 && !gIsInFrameMJPG) { + ALOGE("%s Exit - only H264, H265, I420 input frames supported", __FUNCTION__); + return; } - //Initialize to the size based on resolution. - out_size = mDstJpegBufSize; + // Initialize the input data size based on client camera resolution. + cameraInputDataSize = mSrcFrameSize; +#ifdef ENABLE_FFMPEG if (gIsInFrameH264) { if (handle->clientBuf[handle->clientRevCount % 1].decoded) { - //Note: bufData already assigned in the function start - ALOGVV("%s - Already Decoded", __FUNCTION__); - out_size = mDstJpegBufSize; - } else { - getNV12Frames(bufData, &out_size); - handle->clientBuf[handle->clientRevCount % 1].decoded = true; - ALOGVV("%s - getNV12Framesout_size: %d\n", __func__, out_size); - std::unique_lock ulock(client_buf_mutex); - handle->decodedFrameNo++; - ALOGVV("%s Decoded frame #[%zd]", __FUNCTION__, handle->decodedFrameNo); - ulock.unlock(); - } + // If already decoded camera input frame. + ALOGVV("%s - Already Decoded Camera Input frame", __FUNCTION__); + } else { + // To get the decoded frame. + getNV12Frames(bufData, &cameraInputDataSize); + handle->clientBuf[handle->clientRevCount % 1].decoded = true; + std::unique_lock ulock(client_buf_mutex); + handle->decodedFrameNo++; + ALOGVV("%s Decoded Camera Input Frame No: %zd with size of %d", __FUNCTION__, + handle->decodedFrameNo, cameraInputDataSize); + ulock.unlock(); + } } +#endif //For default resolution 640x480p if (width == (uint32_t)mSrcWidth && height == (uint32_t)mSrcHeight) { - //For I420 input - if (gIsInFrameI420) { - ALOGVV(LOG_TAG "%s: I420 input without scaling required Size = %dx%d for JPEG conversion", - __FUNCTION__, width, height); - - const uint8_t *src_y = bufData; - int src_stride_y = mSrcWidth; - const uint8_t *src_u = bufData + src_size; - int src_stride_u = mSrcWidth >> 1; - const uint8_t *src_v = bufData + src_size + src_size / 4; - int src_stride_v = mSrcWidth >> 1; - - uint8_t *dst_y = img; - int dst_stride_y = width; - uint8_t *dst_vu = dst_y + src_size; - int dst_stride_vu = width; - - if (int ret = libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, - src_stride_v, dst_y, dst_stride_y, dst_vu, - dst_stride_vu, width, height)) { - } - //For NV12 input - } else { - ALOGVV(LOG_TAG "%s: NV12 to NV21 conversion for JPEG conversion: Size = %dx%d", - __FUNCTION__, width, height); - - const uint8_t *src_y = bufData; - int src_stride_y = mSrcWidth; - const uint8_t *src_uv = bufData + src_size; - int src_stride_uv = mSrcWidth; - - uint8_t *dst_y = mDstJpegBuf.data(); - int dst_stride_y = mSrcWidth; - uint8_t *dst_u = mDstJpegBuf.data() + src_size; - int dst_stride_u = mSrcWidth >> 1; - uint8_t *dst_v = mDstJpegBuf.data() + src_size + src_size / 4; - int dst_stride_v = mSrcWidth >> 1; - - if (int ret = libyuv::NV12ToI420(src_y, src_stride_y, src_uv, src_stride_uv, - dst_y,dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v, - mSrcWidth, mSrcHeight)) { - } - - src_y =mDstJpegBuf.data(); - src_stride_y = mSrcWidth; - uint8_t *src_u = mDstJpegBuf.data() + src_size; - int src_stride_u = src_stride_y >> 1; - const uint8_t *src_v = mDstJpegBuf.data() + src_size + src_size / 4; - int src_stride_v = src_stride_y >> 1; - - dst_y = img; - dst_stride_y = width; - uint8_t *dst_vu = dst_y + dstFrameSize; - int dst_stride_vu = width; - - if (int ret = libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, - src_stride_v, dst_y, dst_stride_y, dst_vu, - dst_stride_vu, width, height)) { - } + // For I420 input + if (gIsInFrameI420 || gIsInFrameMJPG) { + ALOGVV(LOG_TAG "%s: I420 to NV21 conversion without scaling: Size = %dx%d", + __FUNCTION__, width, height); + + const uint8_t *src_y = bufData; + int src_stride_y = mSrcWidth; + const uint8_t *src_u = bufData + src_size; + int src_stride_u = mSrcWidth >> 1; + const uint8_t *src_v = bufData + src_size + src_size / 4; + int src_stride_v = mSrcWidth >> 1; + + uint8_t *dst_y = img; + int dst_stride_y = width; + uint8_t *dst_vu = dst_y + src_size; + int dst_stride_vu = width; + + if (int ret = libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_y, dst_stride_y, dst_vu, + dst_stride_vu, width, height)) { + } + // For NV12 input + } else { + ALOGVV(LOG_TAG "%s: NV12 to NV21 conversion without scaling: Size = %dx%d", + __FUNCTION__, width, height); + + const uint8_t *src_y = bufData; + int src_stride_y = mSrcWidth; + const uint8_t *src_uv = bufData + src_size; + int src_stride_uv = mSrcWidth; + + uint8_t *dst_y = mDstJpegBuf.data(); + int dst_stride_y = mSrcWidth; + uint8_t *dst_u = mDstJpegBuf.data() + src_size; + int dst_stride_u = mSrcWidth >> 1; + uint8_t *dst_v = mDstJpegBuf.data() + src_size + src_size / 4; + int dst_stride_v = mSrcWidth >> 1; + + if (int ret = libyuv::NV12ToI420(src_y, src_stride_y, src_uv, src_stride_uv, dst_y, + dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v, + mSrcWidth, mSrcHeight)) { + } + + src_y = mDstJpegBuf.data(); + src_stride_y = mSrcWidth; + uint8_t *src_u = mDstJpegBuf.data() + src_size; + int src_stride_u = src_stride_y >> 1; + const uint8_t *src_v = mDstJpegBuf.data() + src_size + src_size / 4; + int src_stride_v = src_stride_y >> 1; + + dst_y = img; + dst_stride_y = width; + uint8_t *dst_vu = dst_y + dstFrameSize; + int dst_stride_vu = width; + + if (int ret = libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_y, dst_stride_y, dst_vu, + dst_stride_vu, width, height)) { + } } - //For lower resoltuions like 320x240p + // For upscaling and downscaling all other resolutions below max supported resolution. } else { - //For I420 input - if (gIsInFrameI420) { - ALOGVV(LOG_TAG "%s: I420 with scaling: Size = %dx%d for JPEG conversion", - __FUNCTION__, width, height); - - const uint8_t *src_y = bufData; - int src_stride_y = mSrcWidth; - const uint8_t *src_u = bufData + src_size; - int src_stride_u = src_stride_y >> 1; - const uint8_t *src_v = bufData + src_size + src_size / 4; - int src_stride_v = src_stride_y >> 1; - int src_width = mSrcWidth; - int src_height = mSrcHeight; - - uint8_t *dst_y = mDstJpegBuf.data(); - int dst_stride_y = width; - uint8_t *dst_u = mDstJpegBuf.data() + dstFrameSize; - int dst_stride_u = width >> 1; - uint8_t *dst_v = mDstJpegBuf.data() + dstFrameSize + dstFrameSize / 4; - int dst_stride_v = width >> 1; - int dst_width = width; - int dst_height = height; - auto filtering = libyuv::kFilterNone; - - if (int ret = libyuv::I420Scale(src_y, src_stride_y, src_u, src_stride_u, src_v, - src_stride_v, src_width, src_height, dst_y, - dst_stride_y, dst_u, dst_stride_u, dst_v, - dst_stride_v,dst_width, dst_height, filtering)) { - } - - ALOGVV("%s: I420 Scaling done for JPEG conversion", __FUNCTION__); - - src_y = mDstJpegBuf.data(); - src_stride_y = width; - src_u = mDstJpegBuf.data() + dstFrameSize; - src_stride_u = width >> 1; - src_v = mDstJpegBuf.data() + dstFrameSize + dstFrameSize / 4; - src_stride_v = width >> 1; - dst_y = img; - dst_stride_y = width; - - uint8_t *dst_vu = dst_y + width * height; - int dst_stride_vu = width; - - if (int ret = libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, - src_stride_v, dst_y, dst_stride_y, - dst_vu, dst_stride_vu, width, height)) { + // For I420 input + if (gIsInFrameI420 || gIsInFrameMJPG) { + ALOGVV(LOG_TAG "%s: I420 to NV21 with scaling: Size = %dx%d", __FUNCTION__, width, + height); + + const uint8_t *src_y = bufData; + int src_stride_y = mSrcWidth; + const uint8_t *src_u = bufData + src_size; + int src_stride_u = src_stride_y >> 1; + const uint8_t *src_v = bufData + src_size + src_size / 4; + int src_stride_v = src_stride_y >> 1; + int src_width = mSrcWidth; + int src_height = mSrcHeight; + + uint8_t *dst_y = mDstJpegBuf.data(); + int dst_stride_y = width; + uint8_t *dst_u = mDstJpegBuf.data() + dstFrameSize; + int dst_stride_u = width >> 1; + uint8_t *dst_v = mDstJpegBuf.data() + dstFrameSize + dstFrameSize / 4; + int dst_stride_v = width >> 1; + int dst_width = width; + int dst_height = height; + auto filtering = libyuv::kFilterNone; + + if (int ret = libyuv::I420Scale(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, src_width, src_height, dst_y, + dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v, + dst_width, dst_height, filtering)) { } - //For NV12 input - } else { - ALOGVV(LOG_TAG "%s: NV12 input with scaling Size = %dx%d for JPEG conversion", __FUNCTION__, width, height); - - const uint8_t *src_y = bufData; - int src_stride_y = mSrcWidth; - const uint8_t *src_uv = bufData + src_size; - int src_stride_uv = mSrcWidth; - - uint8_t *dst_y = mDstJpegTempBuf.data(); - int dst_stride_y = mSrcWidth; - uint8_t *dst_u = mDstJpegTempBuf.data() + src_size; - int dst_stride_u = mSrcWidth >> 1; - uint8_t *dst_v = mDstJpegTempBuf.data() + src_size + src_size / 4; - int dst_stride_v = mSrcWidth >> 1; - - if (int ret = libyuv::NV12ToI420(src_y, src_stride_y, src_uv, src_stride_uv, dst_y, - dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v, - mSrcWidth, mSrcHeight)) { - } - - src_y = mDstJpegTempBuf.data(); - src_stride_y = mSrcWidth; - uint8_t *src_u = mDstJpegTempBuf.data() + src_size; - int src_stride_u = src_stride_y >> 1; - const uint8_t *src_v = mDstJpegTempBuf.data() + src_size + src_size / 4; - int src_stride_v = src_stride_y >> 1; - int src_width = mSrcWidth; - int src_height = mSrcHeight; - - dst_y = mDstJpegBuf.data(); - dst_stride_y = width; - dst_u = mDstJpegBuf.data() + dstFrameSize; - dst_stride_u = width >> 1; - dst_v = mDstJpegBuf.data() + dstFrameSize + dstFrameSize / 4; - dst_stride_v = width >> 1; - int dst_width = width; - int dst_height = height; - auto filtering = libyuv::kFilterNone; - - if (int ret = libyuv::I420Scale(src_y, src_stride_y, src_u, src_stride_u, src_v, - src_stride_v, src_width, src_height, dst_y, - dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v, - dst_width, dst_height, filtering)) { - } - - src_y = mDstJpegBuf.data(); - src_stride_y = width; - src_u = mDstJpegBuf.data() + dstFrameSize; - src_stride_u = width >> 1; - src_v = mDstJpegBuf.data() + dstFrameSize + dstFrameSize / 4; - src_stride_v = width >> 1; - - dst_y = img; - dst_stride_y = width; - uint8_t *dst_vu = dst_y + dstFrameSize; - int dst_stride_vu = width; - - if (int ret = libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, - src_stride_v, dst_y, dst_stride_y, dst_vu, - dst_stride_vu, width, height)) { - } - } + + src_y = mDstJpegBuf.data(); + src_stride_y = width; + src_u = mDstJpegBuf.data() + dstFrameSize; + src_stride_u = width >> 1; + src_v = mDstJpegBuf.data() + dstFrameSize + dstFrameSize / 4; + src_stride_v = width >> 1; + dst_y = img; + dst_stride_y = width; + + uint8_t *dst_vu = dst_y + width * height; + int dst_stride_vu = width; + + if (int ret = libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_y, dst_stride_y, dst_vu, + dst_stride_vu, width, height)) { + } + // For NV12 input + } else { + ALOGVV(LOG_TAG "%s: NV12 to NV21 conversion with scaling: Size = %dx%d", __FUNCTION__, + width, height); + + const uint8_t *src_y = bufData; + int src_stride_y = mSrcWidth; + const uint8_t *src_uv = bufData + src_size; + int src_stride_uv = mSrcWidth; + + uint8_t *dst_y = mDstJpegTempBuf.data(); + int dst_stride_y = mSrcWidth; + uint8_t *dst_u = mDstJpegTempBuf.data() + src_size; + int dst_stride_u = mSrcWidth >> 1; + uint8_t *dst_v = mDstJpegTempBuf.data() + src_size + src_size / 4; + int dst_stride_v = mSrcWidth >> 1; + + if (int ret = libyuv::NV12ToI420(src_y, src_stride_y, src_uv, src_stride_uv, dst_y, + dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v, + mSrcWidth, mSrcHeight)) { + } + + src_y = mDstJpegTempBuf.data(); + src_stride_y = mSrcWidth; + uint8_t *src_u = mDstJpegTempBuf.data() + src_size; + int src_stride_u = src_stride_y >> 1; + const uint8_t *src_v = mDstJpegTempBuf.data() + src_size + src_size / 4; + int src_stride_v = src_stride_y >> 1; + int src_width = mSrcWidth; + int src_height = mSrcHeight; + + dst_y = mDstJpegBuf.data(); + dst_stride_y = width; + dst_u = mDstJpegBuf.data() + dstFrameSize; + dst_stride_u = width >> 1; + dst_v = mDstJpegBuf.data() + dstFrameSize + dstFrameSize / 4; + dst_stride_v = width >> 1; + int dst_width = width; + int dst_height = height; + auto filtering = libyuv::kFilterNone; + + if (int ret = libyuv::I420Scale(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, src_width, src_height, dst_y, + dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v, + dst_width, dst_height, filtering)) { + } + + src_y = mDstJpegBuf.data(); + src_stride_y = width; + src_u = mDstJpegBuf.data() + dstFrameSize; + src_stride_u = width >> 1; + src_v = mDstJpegBuf.data() + dstFrameSize + dstFrameSize / 4; + src_stride_v = width >> 1; + + dst_y = img; + dst_stride_y = width; + uint8_t *dst_vu = dst_y + dstFrameSize; + int dst_stride_vu = width; + + if (int ret = libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, + src_stride_v, dst_y, dst_stride_y, dst_vu, + dst_stride_vu, width, height)) { + } + } } - ALOGVV("%s: Successfully Converted to NV21 for JPEG Capture!!!", __FUNCTION__); + ALOGVV("%s: Captured NV21 image sucessfully..", __FUNCTION__); } void Sensor::captureDepth(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {