Browse Source

Pixel Buffer Optimization

camera_next
Vitaliy Zarubin 1 year ago
parent
commit
4838cff788
  1. 98
      example/app_run.sh
  2. 2
      example/aurora/rpm/ru.auroraos.flutter_example_packages.spec
  3. 6
      packages/camera/camera_aurora/aurora/3rdpatry/libyuv/AUTHORS
  4. 29
      packages/camera/camera_aurora/aurora/3rdpatry/libyuv/LICENSE
  5. 24
      packages/camera/camera_aurora/aurora/3rdpatry/libyuv/PATENTS
  6. 18
      packages/camera/camera_aurora/aurora/3rdpatry/libyuv/README.md
  7. BIN
      packages/camera/camera_aurora/aurora/3rdpatry/libyuv/aarch64/libyuv.so
  8. BIN
      packages/camera/camera_aurora/aurora/3rdpatry/libyuv/armv7l/libyuv.so
  9. 15
      packages/camera/camera_aurora/aurora/CMakeLists.txt
  10. 9
      packages/camera/camera_aurora/aurora/camera_aurora_plugin.cpp
  11. 5
      packages/camera/camera_aurora/aurora/include/camera_aurora/camera_aurora_plugin.h
  12. 17
      packages/camera/camera_aurora/aurora/include/camera_aurora/texture_camera.h
  13. 20
      packages/camera/camera_aurora/aurora/include/camera_aurora/texture_camera_egl_helper.h
  14. 33
      packages/camera/camera_aurora/aurora/include/camera_aurora/texture_camera_pixels_helper.h
  15. 33
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv.h
  16. 68
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/basic_types.h
  17. 111
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/compare.h
  18. 130
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/compare_row.h
  19. 933
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/convert.h
  20. 2313
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/convert_argb.h
  21. 203
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/convert_from.h
  22. 381
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/convert_from_argb.h
  23. 127
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/cpu_id.h
  24. 1949
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/loongson_intrinsics.h
  25. 246
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/macros_msa.h
  26. 195
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/mjpeg_decoder.h
  27. 1129
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/planar_functions.h
  28. 232
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/rotate.h
  29. 37
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/rotate_argb.h
  30. 224
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/rotate_row.h
  31. 5702
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/row.h
  32. 320
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale.h
  33. 76
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale_argb.h
  34. 42
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale_rgb.h
  35. 1709
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale_row.h
  36. 51
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale_uv.h
  37. 16
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/version.h
  38. 222
      packages/camera/camera_aurora/aurora/include/libyuv/libyuv/video_common.h
  39. 113
      packages/camera/camera_aurora/aurora/texture_camera.cpp
  40. 27
      packages/camera/camera_aurora/aurora/texture_camera_egl_helper.cpp
  41. 77
      packages/camera/camera_aurora/aurora/texture_camera_pixels_helper.cpp

98
example/app_run.sh

@ -0,0 +1,98 @@
#!/bin/bash
# Copyright (c) 2023. Open Mobile Platform LLC.
# License: Proprietary.
## Build example, sign rpm, upload/install/run rpm to device
## Usage
##
## chmod +x ./run.sh
##
## ./run.sh \
## -d <ip>:<password> \
## -s /home/user/sign/folder
## -p ru.auroraos.flutter_example_packages
sudo echo 'Run...';
## Flutter path
FLUTTER="/home/keygenqt/Documents/OMP/OpenSource/flutter/bin/flutter"
## https://developer.auroraos.ru/doc/software_development/psdk/setup
## Install Platform SDK path
## You may not have set the PSDK_DIR environment variable.
## export PSDK_DIR=$HOME/AuroraPlatformSDK/sdks/aurora_psdk
while getopts d:s:p: flag; do
case "${flag}" in
d) device=${OPTARG} ;;
s) sign=${OPTARG} ;;
p) package=${OPTARG} ;;
*)
echo "usage: $0 [-d] [-s] [-p]" >&2
exit 1
;;
esac
done
if [ -z "$package" ]; then
echo "Specify package"
exit 1;
fi
## Update dependency
$FLUTTER pub get
## Generate internationalizing
$FLUTTER pub run build_runner build --delete-conflicting-outputs
## Build aurora example app
{
$FLUTTER build aurora --release
} || {
exit 1;
}
if [ -n "$sign" ]; then
key=$(ls "$sign"/*key.pem)
if [ -z "$key" ]; then
echo "Key *key.pem not found."
exit 1;
fi
cert=$(ls "$sign"/*cert.pem)
if [ -z "$cert" ]; then
echo "Key *cert.pem not found."
exit 1;
fi
## Sign rpm system key
"$PSDK_DIR"/sdk-chroot rpmsign-external sign \
--key "$key" \
--cert "$cert" \
build/aurora/aurora-arm/release/RPMS/*.rpm
fi
if [ -n "$device" ]; then
IFS=':' read -ra ADDR <<< "$device"
D_IP="${ADDR[0]}"
D_PASS="${ADDR[1]}"
# shellcheck disable=SC2012
rpm=$(ls "$PWD"/build/aurora/aurora-arm/release/RPMS/*.rpm | sort -r | head -n 1)
# upload rpm
scp "$rpm" defaultuser@"$D_IP:/home/defaultuser/Downloads"
# install rpm
ssh -t defaultuser@"$D_IP" "echo $D_PASS | devel-su pkcon -y install-local /home/defaultuser/Downloads/$package*.rpm"
# run application
ssh -t defaultuser@"$D_IP" "/usr/bin/$package"
fi

2
example/aurora/rpm/ru.auroraos.flutter_example_packages.spec

@ -1,5 +1,5 @@
%global __provides_exclude_from ^%{_datadir}/%{name}/lib/.*$
%global __requires_exclude ^lib(dconf|flutter-embedder|maliit-glib|appmanifest-.+|.+_platform_plugin)\\.so.*$
%global __requires_exclude ^lib(yuv|dconf|flutter-embedder|maliit-glib|appmanifest-.+|.+_platform_plugin)\\.so.*$
Name: ru.auroraos.flutter_example_packages
Summary: Examples of the flutter plugins for Aurora OS.

6
packages/camera/camera_aurora/aurora/3rdpatry/libyuv/AUTHORS

@ -0,0 +1,6 @@
# Names should be added to this file like so:
# Name or Organization <email address>
Google Inc.
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>

29
packages/camera/camera_aurora/aurora/3rdpatry/libyuv/LICENSE

@ -0,0 +1,29 @@
Copyright 2011 The LibYuv Project Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Google nor the names of its contributors may
be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

24
packages/camera/camera_aurora/aurora/3rdpatry/libyuv/PATENTS

@ -0,0 +1,24 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the LibYuv code package.
Google hereby grants to you a perpetual, worldwide, non-exclusive,
no-charge, irrevocable (except as stated in this section) patent
license to make, have made, use, offer to sell, sell, import,
transfer, and otherwise run, modify and propagate the contents of this
implementation of the LibYuv code package, where such license applies
only to those patent claims, both currently owned by Google and
acquired in the future, licensable by Google that are necessarily
infringed by this implementation of the LibYuv code package. This
grant does not include claims that would be infringed only as a
consequence of further modification of this implementation. If you or
your agent or exclusive licensee institute or order or agree to the
institution of patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that this
implementation of the LibYuv code package or any code incorporated
within this implementation of the LibYuv code package constitutes
direct or contributory patent infringement, or inducement of patent
infringement, then any patent rights granted to you under this License
for this implementation of the LibYuv code package shall terminate as
of the date such litigation is filed.

18
packages/camera/camera_aurora/aurora/3rdpatry/libyuv/README.md

@ -0,0 +1,18 @@
**libyuv** is an open source project that includes YUV scaling and conversion functionality.
* Scale YUV to prepare content for compression, with point, bilinear or box filter.
* Convert to YUV from webcam formats for compression.
* Convert to RGB formats for rendering/effects.
* Rotate by 90/180/270 degrees to adjust for mobile devices in portrait mode.
* Optimized for SSSE3/AVX2 on x86/x64.
* Optimized for Neon on Arm.
* Optimized for MSA on Mips.
### Development
See [Getting started][1] for instructions on how to get started developing.
You can also browse the [docs directory][2] for more documentation.
[1]: ./docs/getting_started.md
[2]: ./docs/

BIN
packages/camera/camera_aurora/aurora/3rdpatry/libyuv/aarch64/libyuv.so

Binary file not shown.

BIN
packages/camera/camera_aurora/aurora/3rdpatry/libyuv/armv7l/libyuv.so

Binary file not shown.

15
packages/camera/camera_aurora/aurora/CMakeLists.txt

@ -10,6 +10,7 @@ project(${PROJECT_NAME} LANGUAGES CXX)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(BUILD_TARGET "armv7l")
set(CMAKE_CXX_FLAGS "-Wall -Wextra -Wno-psabi")
set(CMAKE_CXX_FLAGS_RELEASE "-O3")
@ -22,8 +23,6 @@ pkg_check_modules(GLES REQUIRED IMPORTED_TARGET glesv2)
pkg_check_modules(SC REQUIRED IMPORTED_TARGET streamcamera)
add_library(${PLUGIN_NAME} SHARED
texture_camera_pixels_helper.cpp
texture_camera_egl_helper.cpp
texture_camera.cpp
camera_aurora_plugin.cpp
)
@ -38,3 +37,15 @@ target_include_directories(${PLUGIN_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/inc
target_include_directories(${PLUGIN_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include/${PROJECT_NAME})
target_compile_definitions(${PLUGIN_NAME} PRIVATE PLUGIN_IMPL)
## yuv
add_custom_command(TARGET ${PLUGIN_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_CURRENT_SOURCE_DIR}/3rdpatry/libyuv/${BUILD_TARGET}/libyuv.so
${ROOT_PROJECT_BINARY_DIR}/bundle/lib/libyuv.so)
target_include_directories(${PLUGIN_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include/libyuv)
add_library(yuv SHARED IMPORTED)
set_property(TARGET yuv PROPERTY IMPORTED_LOCATION ${CMAKE_CURRENT_SOURCE_DIR}/3rdpatry/libyuv/${BUILD_TARGET}/libyuv.so)
target_link_libraries(${PLUGIN_NAME} PUBLIC yuv)

9
packages/camera/camera_aurora/aurora/camera_aurora_plugin.cpp

@ -96,7 +96,7 @@ void CameraAuroraPlugin::RegisterMethods(PluginRegistrar &registrar)
onTakePicture(call);
return;
}
if (method == CameraAuroraMethods::StartVideoRecording)
{
onStartVideoRecording(call);
@ -167,10 +167,7 @@ void CameraAuroraPlugin::onDispose(const MethodCall &call)
void CameraAuroraPlugin::onStartCapture(const MethodCall &call)
{
auto width = call.GetArgument<Encodable::Int>("width");
auto height = call.GetArgument<Encodable::Int>("height");
auto state = m_textureCamera->StartCapture(width, height);
auto state = m_textureCamera->StartCapture();
EventChannel(CameraAuroraEvents::StateChanged, MethodCodecType::Standard).SendEvent(state);
@ -186,7 +183,7 @@ void CameraAuroraPlugin::onStopCapture(const MethodCall &call)
void CameraAuroraPlugin::onTakePicture(const MethodCall &call)
{
call.SendSuccessResponse(m_textureCamera->GetImageBase64());
unimplemented(call);
}
void CameraAuroraPlugin::onStartVideoRecording(const MethodCall &call)

5
packages/camera/camera_aurora/aurora/include/camera_aurora/camera_aurora_plugin.h

@ -10,6 +10,9 @@
#include <camera_aurora/texture_camera.h>
#include <QImage>
#include <QtCore>
class PLUGIN_EXPORT CameraAuroraPlugin final
: public QObject,
public PluginInterface
@ -29,7 +32,7 @@ private:
void onDispose(const MethodCall &call);
void onStartCapture(const MethodCall &call);
void onStopCapture(const MethodCall &call);
void onTakePicture(const MethodCall &call);
void onStartVideoRecording(const MethodCall &call);

17
packages/camera/camera_aurora/aurora/include/camera_aurora/texture_camera.h

@ -8,12 +8,6 @@
#include <flutter/plugin-interface.h>
#include <streamcamera/streamcamera.h>
#include <QImage>
#include <QtCore>
#include <EGL/egl.h>
#include <EGL/eglext.h>
typedef std::function<void()> CameraErrorHandler;
class TextureCamera : public Aurora::StreamCamera::CameraListener
@ -25,14 +19,13 @@ public:
void onCameraFrame(std::shared_ptr<Aurora::StreamCamera::GraphicBuffer> buffer) override;
void onCameraParameterChanged(Aurora::StreamCamera::CameraParameter,
const std::string &value) override;
std::vector<Encodable> GetAvailableCameras();
std::map<Encodable, Encodable> Register(std::string cameraName);
std::map<Encodable, Encodable> Unregister();
std::map<Encodable, Encodable> StartCapture(size_t width, size_t height);
std::map<Encodable, Encodable> StartCapture();
void StopCapture();
std::map<Encodable, Encodable> GetState();
std::string GetImageBase64();
private:
bool CreateCamera(std::string cameraName);
@ -40,13 +33,12 @@ private:
private:
TextureRegistrar *m_plugin;
CameraErrorHandler m_onError;
std::string m_error;
Aurora::StreamCamera::CameraManager *m_manager;
std::shared_ptr<Aurora::StreamCamera::Camera> m_camera;
std::shared_ptr<Aurora::StreamCamera::GraphicBuffer> m_buffer;
int64_t m_textureId = 0;
size_t m_captureWidth = 0;
@ -54,7 +46,8 @@ private:
size_t m_viewWidth = 0;
size_t m_viewHeight = 0;
QImage *m_image;
std::shared_ptr<uint8_t> m_bits;
int m_counter = 0;
};
#endif /* TEXTURE_CAMERA_BUFFER_H */

20
packages/camera/camera_aurora/aurora/include/camera_aurora/texture_camera_egl_helper.h

@ -1,20 +0,0 @@
/**
* SPDX-FileCopyrightText: Copyright 2023 Open Mobile Platform LLC <community@omp.ru>
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef TEXTURE_CAMERA_EGL_HELPER_H
#define TEXTURE_CAMERA_EGL_HELPER_H
#include <streamcamera/streamcamera.h>
#include <EGL/egl.h>
#include <EGL/eglext.h>
class TextureCameraEGLHelper
{
public:
static void EGLInit();
static EGLImageKHR EGLCreateImage(const void* handle, void* egl_display, void* egl_contex);
};
#endif /* TEXTURE_CAMERA_EGL_HELPER_H */

33
packages/camera/camera_aurora/aurora/include/camera_aurora/texture_camera_pixels_helper.h

@ -1,33 +0,0 @@
/**
* SPDX-FileCopyrightText: Copyright 2023 Open Mobile Platform LLC <community@omp.ru>
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef TEXTURE_CAMERA_PIXELS_HELPER_H
#define TEXTURE_CAMERA_PIXELS_HELPER_H
#include <flutter/plugin-interface.h>
#include <streamcamera/streamcamera.h>
#include <QImage>
#include <QtCore>
class TextureCameraPixelsHelper
{
public:
static QImage *YUVtoARGB(std::shared_ptr<const Aurora::StreamCamera::YCbCrFrame> frame);
private:
static quint32 yuvToArgb(qint32 y, qint32 rv, qint32 guv, qint32 bu, qint32 a);
static void planarYuv420ToArgb(const uchar *y,
const uchar *u,
const uchar *v,
qint32 yStride,
qint32 uStride,
qint32 vStride,
qint32 uvPixelStride,
quint32 *rgb,
qint32 width,
qint32 height);
};
#endif /* TEXTURE_CAMERA_PIXELS_HELPER_H */

33
packages/camera/camera_aurora/aurora/include/libyuv/libyuv.h

@ -0,0 +1,33 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_H_
#define INCLUDE_LIBYUV_H_
#include "libyuv/basic_types.h"
#include "libyuv/compare.h"
#include "libyuv/convert.h"
#include "libyuv/convert_argb.h"
#include "libyuv/convert_from.h"
#include "libyuv/convert_from_argb.h"
#include "libyuv/cpu_id.h"
#include "libyuv/mjpeg_decoder.h"
#include "libyuv/planar_functions.h"
#include "libyuv/rotate.h"
#include "libyuv/rotate_argb.h"
#include "libyuv/row.h"
#include "libyuv/scale.h"
#include "libyuv/scale_argb.h"
#include "libyuv/scale_row.h"
#include "libyuv/scale_uv.h"
#include "libyuv/version.h"
#include "libyuv/video_common.h"
#endif // INCLUDE_LIBYUV_H_

68
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/basic_types.h

@ -0,0 +1,68 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_BASIC_TYPES_H_
#define INCLUDE_LIBYUV_BASIC_TYPES_H_
#include <stddef.h> // For size_t and NULL
#if !defined(INT_TYPES_DEFINED) && !defined(GG_LONGLONG)
#define INT_TYPES_DEFINED
#if defined(_MSC_VER) && (_MSC_VER < 1600)
#include <sys/types.h> // for uintptr_t on x86
typedef unsigned __int64 uint64_t;
typedef __int64 int64_t;
typedef unsigned int uint32_t;
typedef int int32_t;
typedef unsigned short uint16_t;
typedef short int16_t;
typedef unsigned char uint8_t;
typedef signed char int8_t;
#else
#include <stdint.h> // for uintptr_t and C99 types
#endif // defined(_MSC_VER) && (_MSC_VER < 1600)
// Types are deprecated. Enable this macro for legacy types.
#ifdef LIBYUV_LEGACY_TYPES
typedef uint64_t uint64;
typedef int64_t int64;
typedef uint32_t uint32;
typedef int32_t int32;
typedef uint16_t uint16;
typedef int16_t int16;
typedef uint8_t uint8;
typedef int8_t int8;
#endif // LIBYUV_LEGACY_TYPES
#endif // INT_TYPES_DEFINED
#if !defined(LIBYUV_API)
#if defined(_WIN32) || defined(__CYGWIN__)
#if defined(LIBYUV_BUILDING_SHARED_LIBRARY)
#define LIBYUV_API __declspec(dllexport)
#elif defined(LIBYUV_USING_SHARED_LIBRARY)
#define LIBYUV_API __declspec(dllimport)
#else
#define LIBYUV_API
#endif // LIBYUV_BUILDING_SHARED_LIBRARY
#elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__APPLE__) && \
(defined(LIBYUV_BUILDING_SHARED_LIBRARY) || \
defined(LIBYUV_USING_SHARED_LIBRARY))
#define LIBYUV_API __attribute__((visibility("default")))
#else
#define LIBYUV_API
#endif // __GNUC__
#endif // LIBYUV_API
// TODO(fbarchard): Remove bool macros.
#define LIBYUV_BOOL int
#define LIBYUV_FALSE 0
#define LIBYUV_TRUE 1
#endif // INCLUDE_LIBYUV_BASIC_TYPES_H_

111
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/compare.h

@ -0,0 +1,111 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_COMPARE_H_
#define INCLUDE_LIBYUV_COMPARE_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Compute a hash for specified memory. Seed of 5381 recommended.
LIBYUV_API
uint32_t HashDjb2(const uint8_t* src, uint64_t count, uint32_t seed);
// Hamming Distance
LIBYUV_API
uint64_t ComputeHammingDistance(const uint8_t* src_a,
const uint8_t* src_b,
int count);
// Scan an opaque argb image and return fourcc based on alpha offset.
// Returns FOURCC_ARGB, FOURCC_BGRA, or 0 if unknown.
LIBYUV_API
uint32_t ARGBDetect(const uint8_t* argb,
int stride_argb,
int width,
int height);
// Sum Square Error - used to compute Mean Square Error or PSNR.
LIBYUV_API
uint64_t ComputeSumSquareError(const uint8_t* src_a,
const uint8_t* src_b,
int count);
LIBYUV_API
uint64_t ComputeSumSquareErrorPlane(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height);
static const int kMaxPsnr = 128;
LIBYUV_API
double SumSquareErrorToPsnr(uint64_t sse, uint64_t count);
LIBYUV_API
double CalcFramePsnr(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height);
LIBYUV_API
double I420Psnr(const uint8_t* src_y_a,
int stride_y_a,
const uint8_t* src_u_a,
int stride_u_a,
const uint8_t* src_v_a,
int stride_v_a,
const uint8_t* src_y_b,
int stride_y_b,
const uint8_t* src_u_b,
int stride_u_b,
const uint8_t* src_v_b,
int stride_v_b,
int width,
int height);
LIBYUV_API
double CalcFrameSsim(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height);
LIBYUV_API
double I420Ssim(const uint8_t* src_y_a,
int stride_y_a,
const uint8_t* src_u_a,
int stride_u_a,
const uint8_t* src_v_a,
int stride_v_a,
const uint8_t* src_y_b,
int stride_y_b,
const uint8_t* src_u_b,
int stride_u_b,
const uint8_t* src_v_b,
int stride_v_b,
int width,
int height);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_COMPARE_H_

130
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/compare_row.h

@ -0,0 +1,130 @@
/*
* Copyright 2013 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_COMPARE_ROW_H_
#define INCLUDE_LIBYUV_COMPARE_ROW_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if defined(__pnacl__) || defined(__CLR_VER) || \
(defined(__native_client__) && defined(__x86_64__)) || \
(defined(__i386__) && !defined(__SSE__) && !defined(__clang__))
#define LIBYUV_DISABLE_X86
#endif
#if defined(__native_client__)
#define LIBYUV_DISABLE_NEON
#endif
// MemorySanitizer does not support assembly code yet. http://crbug.com/344505
#if defined(__has_feature)
#if __has_feature(memory_sanitizer)
#define LIBYUV_DISABLE_X86
#endif
#endif
// Visual C 2012 required for AVX2.
#if defined(_M_IX86) && !defined(__clang__) && defined(_MSC_VER) && \
_MSC_VER >= 1700
#define VISUALC_HAS_AVX2 1
#endif // VisualStudio >= 2012
// clang >= 3.4.0 required for AVX2.
#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
#if (__clang_major__ > 3) || (__clang_major__ == 3 && (__clang_minor__ >= 4))
#define CLANG_HAS_AVX2 1
#endif // clang >= 3.4
#endif // __clang__
// The following are available for Visual C and GCC:
#if !defined(LIBYUV_DISABLE_X86) && \
(defined(__x86_64__) || defined(__i386__) || defined(_M_IX86))
#define HAS_HASHDJB2_SSE41
#define HAS_SUMSQUAREERROR_SSE2
#define HAS_HAMMINGDISTANCE_SSE42
#endif
// The following are available for Visual C and clangcl 32 bit:
#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER) && \
!defined(__clang__) && \
(defined(VISUALC_HAS_AVX2) || defined(CLANG_HAS_AVX2))
#define HAS_HASHDJB2_AVX2
#define HAS_SUMSQUAREERROR_AVX2
#endif
// The following are available for GCC and clangcl:
#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__))
#define HAS_HAMMINGDISTANCE_SSSE3
#endif
// The following are available for GCC and clangcl:
#if !defined(LIBYUV_DISABLE_X86) && defined(CLANG_HAS_AVX2) && \
(defined(__x86_64__) || defined(__i386__))
#define HAS_HAMMINGDISTANCE_AVX2
#endif
// The following are available for Neon:
#if !defined(LIBYUV_DISABLE_NEON) && \
(defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
#define HAS_SUMSQUAREERROR_NEON
#define HAS_HAMMINGDISTANCE_NEON
#endif
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#define HAS_HAMMINGDISTANCE_MSA
#define HAS_SUMSQUAREERROR_MSA
#endif
uint32_t HammingDistance_C(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_SSE42(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_SSSE3(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_AVX2(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_MSA(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_C(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_SSE2(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_AVX2(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_MSA(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HashDjb2_C(const uint8_t* src, int count, uint32_t seed);
uint32_t HashDjb2_SSE41(const uint8_t* src, int count, uint32_t seed);
uint32_t HashDjb2_AVX2(const uint8_t* src, int count, uint32_t seed);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_COMPARE_ROW_H_

933
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/convert.h

@ -0,0 +1,933 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_CONVERT_H_
#define INCLUDE_LIBYUV_CONVERT_H_
#include "libyuv/basic_types.h"
#include "libyuv/rotate.h" // For enum RotationMode.
// TODO(fbarchard): fix WebRTC source to include following libyuv headers:
#include "libyuv/convert_argb.h" // For WebRTC I420ToARGB. b/620
#include "libyuv/convert_from.h" // For WebRTC ConvertFromI420. b/620
#include "libyuv/planar_functions.h" // For WebRTC I420Rect, CopyPlane. b/618
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Convert I444 to I420.
LIBYUV_API
int I444ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I444 to NV12.
LIBYUV_API
int I444ToNV12(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I444 to NV21.
LIBYUV_API
int I444ToNV21(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Convert I422 to I420.
LIBYUV_API
int I422ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I422 to I444.
LIBYUV_API
int I422ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I422 to I210.
LIBYUV_API
int I422ToI210(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert MM21 to NV12.
LIBYUV_API
int MM21ToNV12(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert MM21 to I420.
LIBYUV_API
int MM21ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert MM21 to YUY2
LIBYUV_API
int MM21ToYUY2(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height);
// Convert I422 to NV21.
LIBYUV_API
int I422ToNV21(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Copy I420 to I420.
#define I420ToI420 I420Copy
LIBYUV_API
int I420Copy(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I420 to I444.
LIBYUV_API
int I420ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Copy I010 to I010
#define I010ToI010 I010Copy
#define H010ToH010 I010Copy
LIBYUV_API
int I010Copy(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert 10 bit YUV to 8 bit
#define H010ToH420 I010ToI420
LIBYUV_API
int I010ToI420(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H210ToH420 I210ToI420
LIBYUV_API
int I210ToI420(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H210ToH422 I210ToI422
LIBYUV_API
int I210ToI422(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H410ToH444 I410ToI444
LIBYUV_API
int I410ToI444(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H012ToH420 I012ToI420
LIBYUV_API
int I012ToI420(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H212ToH422 I212ToI422
LIBYUV_API
int I212ToI422(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H412ToH444 I412ToI444
LIBYUV_API
int I412ToI444(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define I412ToI012 I410ToI010
#define H410ToH010 I410ToI010
#define H412ToH012 I410ToI010
LIBYUV_API
int I410ToI010(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
#define I212ToI012 I210ToI010
#define H210ToH010 I210ToI010
#define H212ToH012 I210ToI010
LIBYUV_API
int I210ToI010(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I010 to I410
LIBYUV_API
int I010ToI410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I012 to I412
#define I012ToI412 I010ToI410
// Convert I210 to I410
LIBYUV_API
int I210ToI410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I212 to I412
#define I212ToI412 I210ToI410
// Convert I010 to P010
LIBYUV_API
int I010ToP010(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I210 to P210
LIBYUV_API
int I210ToP210(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I012 to P012
LIBYUV_API
int I012ToP012(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I212 to P212
LIBYUV_API
int I212ToP212(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I400 (grey) to I420.
LIBYUV_API
int I400ToI420(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I400 (grey) to NV21.
LIBYUV_API
int I400ToNV21(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
#define J400ToJ420 I400ToI420
// Convert NV12 to I420.
LIBYUV_API
int NV12ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert NV21 to I420.
LIBYUV_API
int NV21ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_vu,
int src_stride_vu,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert NV12 to NV24.
LIBYUV_API
int NV12ToNV24(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert NV16 to NV24.
LIBYUV_API
int NV16ToNV24(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert P010 to P410.
LIBYUV_API
int P010ToP410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_uv,
int src_stride_uv,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert P012 to P412.
#define P012ToP412 P010ToP410
// Convert P016 to P416.
#define P016ToP416 P010ToP410
// Convert P210 to P410.
LIBYUV_API
int P210ToP410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_uv,
int src_stride_uv,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert P212 to P412.
#define P212ToP412 P210ToP410
// Convert P216 to P416.
#define P216ToP416 P210ToP410
// Convert YUY2 to I420.
LIBYUV_API
int YUY2ToI420(const uint8_t* src_yuy2,
int src_stride_yuy2,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert UYVY to I420.
LIBYUV_API
int UYVYToI420(const uint8_t* src_uyvy,
int src_stride_uyvy,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert AYUV to NV12.
LIBYUV_API
int AYUVToNV12(const uint8_t* src_ayuv,
int src_stride_ayuv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert AYUV to NV21.
LIBYUV_API
int AYUVToNV21(const uint8_t* src_ayuv,
int src_stride_ayuv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Convert Android420 to I420.
LIBYUV_API
int Android420ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
int src_pixel_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// ARGB little endian (bgra in memory) to I420.
LIBYUV_API
int ARGBToI420(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// BGRA little endian (argb in memory) to I420.
LIBYUV_API
int BGRAToI420(const uint8_t* src_bgra,
int src_stride_bgra,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// ABGR little endian (rgba in memory) to I420.
LIBYUV_API
int ABGRToI420(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGBA little endian (abgr in memory) to I420.
LIBYUV_API
int RGBAToI420(const uint8_t* src_rgba,
int src_stride_rgba,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB little endian (bgr in memory) to I420.
LIBYUV_API
int RGB24ToI420(const uint8_t* src_rgb24,
int src_stride_rgb24,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB little endian (bgr in memory) to J420.
LIBYUV_API
int RGB24ToJ420(const uint8_t* src_rgb24,
int src_stride_rgb24,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB big endian (rgb in memory) to I420.
LIBYUV_API
int RAWToI420(const uint8_t* src_raw,
int src_stride_raw,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB big endian (rgb in memory) to J420.
LIBYUV_API
int RAWToJ420(const uint8_t* src_raw,
int src_stride_raw,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB16 (RGBP fourcc) little endian to I420.
LIBYUV_API
int RGB565ToI420(const uint8_t* src_rgb565,
int src_stride_rgb565,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB15 (RGBO fourcc) little endian to I420.
LIBYUV_API
int ARGB1555ToI420(const uint8_t* src_argb1555,
int src_stride_argb1555,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB12 (R444 fourcc) little endian to I420.
LIBYUV_API
int ARGB4444ToI420(const uint8_t* src_argb4444,
int src_stride_argb4444,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB little endian (bgr in memory) to J400.
LIBYUV_API
int RGB24ToJ400(const uint8_t* src_rgb24,
int src_stride_rgb24,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// RGB big endian (rgb in memory) to J400.
LIBYUV_API
int RAWToJ400(const uint8_t* src_raw,
int src_stride_raw,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// src_width/height provided by capture.
// dst_width/height for clipping determine final size.
LIBYUV_API
int MJPGToI420(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int src_width,
int src_height,
int dst_width,
int dst_height);
// JPEG to NV21
LIBYUV_API
int MJPGToNV21(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int src_width,
int src_height,
int dst_width,
int dst_height);
// JPEG to NV12
LIBYUV_API
int MJPGToNV12(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int src_width,
int src_height,
int dst_width,
int dst_height);
// Query size of MJPG in pixels.
LIBYUV_API
int MJPGSize(const uint8_t* sample,
size_t sample_size,
int* width,
int* height);
// Convert camera sample to I420 with cropping, rotation and vertical flip.
// "src_size" is needed to parse MJPG.
// "dst_stride_y" number of bytes in a row of the dst_y plane.
// Normally this would be the same as dst_width, with recommended alignment
// to 16 bytes for better efficiency.
// If rotation of 90 or 270 is used, stride is affected. The caller should
// allocate the I420 buffer according to rotation.
// "dst_stride_u" number of bytes in a row of the dst_u plane.
// Normally this would be the same as (dst_width + 1) / 2, with
// recommended alignment to 16 bytes for better efficiency.
// If rotation of 90 or 270 is used, stride is affected.
// "crop_x" and "crop_y" are starting position for cropping.
// To center, crop_x = (src_width - dst_width) / 2
// crop_y = (src_height - dst_height) / 2
// "src_width" / "src_height" is size of src_frame in pixels.
// "src_height" can be negative indicating a vertically flipped image source.
// "crop_width" / "crop_height" is the size to crop the src to.
// Must be less than or equal to src_width/src_height
// Cropping parameters are pre-rotation.
// "rotation" can be 0, 90, 180 or 270.
// "fourcc" is a fourcc. ie 'I420', 'YUY2'
// Returns 0 for successful; -1 for invalid parameter. Non-zero for failure.
LIBYUV_API
int ConvertToI420(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int crop_x,
int crop_y,
int src_width,
int src_height,
int crop_width,
int crop_height,
enum RotationMode rotation,
uint32_t fourcc);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_CONVERT_H_

2313
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/convert_argb.h

File diff suppressed because it is too large Load Diff

203
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/convert_from.h

@ -0,0 +1,203 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_CONVERT_FROM_H_
#define INCLUDE_LIBYUV_CONVERT_FROM_H_
#include "libyuv/basic_types.h"
#include "libyuv/rotate.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// See Also convert.h for conversions from formats to I420.
// Convert 8 bit YUV to 10 bit.
#define H420ToH010 I420ToI010
LIBYUV_API
int I420ToI010(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert 8 bit YUV to 12 bit.
#define H420ToH012 I420ToI012
LIBYUV_API
int I420ToI012(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
LIBYUV_API
int I420ToI422(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
LIBYUV_API
int I420ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Copy to I400. Source can be I420, I422, I444, I400, NV12 or NV21.
LIBYUV_API
int I400Copy(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
int width,
int height);
LIBYUV_API
int I420ToNV12(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
LIBYUV_API
int I420ToNV21(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
LIBYUV_API
int I420ToYUY2(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height);
LIBYUV_API
int I420ToUYVY(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_uyvy,
int dst_stride_uyvy,
int width,
int height);
// The following are from convert_argb.h
// DEPRECATED: The prototypes will be removed in future. Use convert_argb.h
// Convert I420 to ARGB.
LIBYUV_API
int I420ToARGB(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height);
// Convert I420 to ABGR.
LIBYUV_API
int I420ToABGR(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_abgr,
int dst_stride_abgr,
int width,
int height);
// Convert I420 to specified format.
// "dst_sample_stride" is bytes in a row for the destination. Pass 0 if the
// buffer has contiguous rows. Can be negative. A multiple of 16 is optimal.
LIBYUV_API
int ConvertFromI420(const uint8_t* y,
int y_stride,
const uint8_t* u,
int u_stride,
const uint8_t* v,
int v_stride,
uint8_t* dst_sample,
int dst_sample_stride,
int width,
int height,
uint32_t fourcc);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_CONVERT_FROM_H_

381
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/convert_from_argb.h

@ -0,0 +1,381 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_
#define INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Copy ARGB to ARGB.
#define ARGBToARGB ARGBCopy
LIBYUV_API
int ARGBCopy(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height);
// Convert ARGB To BGRA.
LIBYUV_API
int ARGBToBGRA(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_bgra,
int dst_stride_bgra,
int width,
int height);
// Convert ARGB To ABGR.
LIBYUV_API
int ARGBToABGR(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_abgr,
int dst_stride_abgr,
int width,
int height);
// Convert ARGB To RGBA.
LIBYUV_API
int ARGBToRGBA(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_rgba,
int dst_stride_rgba,
int width,
int height);
// Aliases
#define ARGBToAB30 ABGRToAR30
#define ABGRToAB30 ARGBToAR30
// Convert ABGR To AR30.
LIBYUV_API
int ABGRToAR30(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_ar30,
int dst_stride_ar30,
int width,
int height);
// Convert ARGB To AR30.
LIBYUV_API
int ARGBToAR30(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_ar30,
int dst_stride_ar30,
int width,
int height);
// Aliases
#define ABGRToRGB24 ARGBToRAW
#define ABGRToRAW ARGBToRGB24
// Convert ARGB To RGB24.
LIBYUV_API
int ARGBToRGB24(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_rgb24,
int dst_stride_rgb24,
int width,
int height);
// Convert ARGB To RAW.
LIBYUV_API
int ARGBToRAW(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_raw,
int dst_stride_raw,
int width,
int height);
// Convert ARGB To RGB565.
LIBYUV_API
int ARGBToRGB565(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_rgb565,
int dst_stride_rgb565,
int width,
int height);
// Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
// Values in dither matrix from 0 to 7 recommended.
// The order of the dither matrix is first byte is upper left.
// TODO(fbarchard): Consider pointer to 2d array for dither4x4.
// const uint8_t(*dither)[4][4];
LIBYUV_API
int ARGBToRGB565Dither(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_rgb565,
int dst_stride_rgb565,
const uint8_t* dither4x4,
int width,
int height);
// Convert ARGB To ARGB1555.
LIBYUV_API
int ARGBToARGB1555(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb1555,
int dst_stride_argb1555,
int width,
int height);
// Convert ARGB To ARGB4444.
LIBYUV_API
int ARGBToARGB4444(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb4444,
int dst_stride_argb4444,
int width,
int height);
// Convert ARGB To I444.
LIBYUV_API
int ARGBToI444(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert ARGB to AR64.
LIBYUV_API
int ARGBToAR64(const uint8_t* src_argb,
int src_stride_argb,
uint16_t* dst_ar64,
int dst_stride_ar64,
int width,
int height);
// Convert ABGR to AB64.
#define ABGRToAB64 ARGBToAR64
// Convert ARGB to AB64.
LIBYUV_API
int ARGBToAB64(const uint8_t* src_argb,
int src_stride_argb,
uint16_t* dst_ab64,
int dst_stride_ab64,
int width,
int height);
// Convert ABGR to AR64.
#define ABGRToAR64 ARGBToAB64
// Convert ARGB To I422.
LIBYUV_API
int ARGBToI422(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert ARGB To I420. (also in convert.h)
LIBYUV_API
int ARGBToI420(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert ARGB to J420. (JPeg full range I420).
LIBYUV_API
int ARGBToJ420(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_yj,
int dst_stride_yj,
uint8_t* dst_uj,
int dst_stride_uj,
uint8_t* dst_vj,
int dst_stride_vj,
int width,
int height);
// Convert ARGB to J422.
LIBYUV_API
int ARGBToJ422(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_yj,
int dst_stride_yj,
uint8_t* dst_uj,
int dst_stride_uj,
uint8_t* dst_vj,
int dst_stride_vj,
int width,
int height);
// Convert ARGB to J400. (JPeg full range).
LIBYUV_API
int ARGBToJ400(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// Convert ABGR to J420. (JPeg full range I420).
LIBYUV_API
int ABGRToJ420(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_yj,
int dst_stride_yj,
uint8_t* dst_uj,
int dst_stride_uj,
uint8_t* dst_vj,
int dst_stride_vj,
int width,
int height);
// Convert ABGR to J422.
LIBYUV_API
int ABGRToJ422(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_yj,
int dst_stride_yj,
uint8_t* dst_uj,
int dst_stride_uj,
uint8_t* dst_vj,
int dst_stride_vj,
int width,
int height);
// Convert ABGR to J400. (JPeg full range).
LIBYUV_API
int ABGRToJ400(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// Convert RGBA to J400. (JPeg full range).
LIBYUV_API
int RGBAToJ400(const uint8_t* src_rgba,
int src_stride_rgba,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// Convert ARGB to I400.
LIBYUV_API
int ARGBToI400(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
int width,
int height);
// Convert ARGB to G. (Reverse of J400toARGB, which replicates G back to ARGB)
LIBYUV_API
int ARGBToG(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_g,
int dst_stride_g,
int width,
int height);
// Convert ARGB To NV12.
LIBYUV_API
int ARGBToNV12(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert ARGB To NV21.
LIBYUV_API
int ARGBToNV21(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Convert ABGR To NV12.
LIBYUV_API
int ABGRToNV12(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert ABGR To NV21.
LIBYUV_API
int ABGRToNV21(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Convert ARGB To YUY2.
LIBYUV_API
int ARGBToYUY2(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height);
// Convert ARGB To UYVY.
LIBYUV_API
int ARGBToUYVY(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_uyvy,
int dst_stride_uyvy,
int width,
int height);
// RAW to JNV21 full range NV21
LIBYUV_API
int RAWToJNV21(const uint8_t* src_raw,
int src_stride_raw,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_

127
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/cpu_id.h

@ -0,0 +1,127 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_CPU_ID_H_
#define INCLUDE_LIBYUV_CPU_ID_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Internal flag to indicate cpuid requires initialization.
static const int kCpuInitialized = 0x1;
// These flags are only valid on ARM processors.
static const int kCpuHasARM = 0x2;
static const int kCpuHasNEON = 0x4;
// 0x8 reserved for future ARM flag.
// These flags are only valid on x86 processors.
static const int kCpuHasX86 = 0x10;
static const int kCpuHasSSE2 = 0x20;
static const int kCpuHasSSSE3 = 0x40;
static const int kCpuHasSSE41 = 0x80;
static const int kCpuHasSSE42 = 0x100; // unused at this time.
static const int kCpuHasAVX = 0x200;
static const int kCpuHasAVX2 = 0x400;
static const int kCpuHasERMS = 0x800;
static const int kCpuHasFMA3 = 0x1000;
static const int kCpuHasF16C = 0x2000;
static const int kCpuHasGFNI = 0x4000;
static const int kCpuHasAVX512BW = 0x8000;
static const int kCpuHasAVX512VL = 0x10000;
static const int kCpuHasAVX512VNNI = 0x20000;
static const int kCpuHasAVX512VBMI = 0x40000;
static const int kCpuHasAVX512VBMI2 = 0x80000;
static const int kCpuHasAVX512VBITALG = 0x100000;
static const int kCpuHasAVX512VPOPCNTDQ = 0x200000;
// These flags are only valid on MIPS processors.
static const int kCpuHasMIPS = 0x400000;
static const int kCpuHasMSA = 0x800000;
// These flags are only valid on LOONGARCH processors.
static const int kCpuHasLOONGARCH = 0x2000000;
static const int kCpuHasLSX = 0x4000000;
static const int kCpuHasLASX = 0x8000000;
// Optional init function. TestCpuFlag does an auto-init.
// Returns cpu_info flags.
LIBYUV_API
int InitCpuFlags(void);
// Detect CPU has SSE2 etc.
// Test_flag parameter should be one of kCpuHas constants above.
// Returns non-zero if instruction set is detected
static __inline int TestCpuFlag(int test_flag) {
LIBYUV_API extern int cpu_info_;
#ifdef __ATOMIC_RELAXED
int cpu_info = __atomic_load_n(&cpu_info_, __ATOMIC_RELAXED);
#else
int cpu_info = cpu_info_;
#endif
return (!cpu_info ? InitCpuFlags() : cpu_info) & test_flag;
}
// Internal function for parsing /proc/cpuinfo.
LIBYUV_API
int ArmCpuCaps(const char* cpuinfo_name);
LIBYUV_API
int MipsCpuCaps(const char* cpuinfo_name);
// For testing, allow CPU flags to be disabled.
// ie MaskCpuFlags(~kCpuHasSSSE3) to disable SSSE3.
// MaskCpuFlags(-1) to enable all cpu specific optimizations.
// MaskCpuFlags(1) to disable all cpu specific optimizations.
// MaskCpuFlags(0) to reset state so next call will auto init.
// Returns cpu_info flags.
LIBYUV_API
int MaskCpuFlags(int enable_flags);
// Sets the CPU flags to |cpu_flags|, bypassing the detection code. |cpu_flags|
// should be a valid combination of the kCpuHas constants above and include
// kCpuInitialized. Use this method when running in a sandboxed process where
// the detection code might fail (as it might access /proc/cpuinfo). In such
// cases the cpu_info can be obtained from a non sandboxed process by calling
// InitCpuFlags() and passed to the sandboxed process (via command line
// parameters, IPC...) which can then call this method to initialize the CPU
// flags.
// Notes:
// - when specifying 0 for |cpu_flags|, the auto initialization is enabled
// again.
// - enabling CPU features that are not supported by the CPU will result in
// undefined behavior.
// TODO(fbarchard): consider writing a helper function that translates from
// other library CPU info to libyuv CPU info and add a .md doc that explains
// CPU detection.
static __inline void SetCpuFlags(int cpu_flags) {
LIBYUV_API extern int cpu_info_;
#ifdef __ATOMIC_RELAXED
__atomic_store_n(&cpu_info_, cpu_flags, __ATOMIC_RELAXED);
#else
cpu_info_ = cpu_flags;
#endif
}
// Low level cpuid for X86. Returns zeros on other CPUs.
// eax is the info type that you want.
// ecx is typically the cpu number, and should normally be zero.
LIBYUV_API
void CpuId(int info_eax, int info_ecx, int* cpu_info);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_CPU_ID_H_

1949
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/loongson_intrinsics.h

File diff suppressed because it is too large Load Diff

246
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/macros_msa.h

@ -0,0 +1,246 @@
/*
* Copyright 2016 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_MACROS_MSA_H_
#define INCLUDE_LIBYUV_MACROS_MSA_H_
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#include <msa.h>
#include <stdint.h>
#if (__mips_isa_rev >= 6)
#define LW(psrc) \
({ \
const uint8_t* psrc_lw_m = (const uint8_t*)(psrc); \
uint32_t val_m; \
asm volatile("lw %[val_m], %[psrc_lw_m] \n" \
: [val_m] "=r"(val_m) \
: [psrc_lw_m] "m"(*psrc_lw_m)); \
val_m; \
})
#if (__mips == 64)
#define LD(psrc) \
({ \
const uint8_t* psrc_ld_m = (const uint8_t*)(psrc); \
uint64_t val_m = 0; \
asm volatile("ld %[val_m], %[psrc_ld_m] \n" \
: [val_m] "=r"(val_m) \
: [psrc_ld_m] "m"(*psrc_ld_m)); \
val_m; \
})
#else // !(__mips == 64)
#define LD(psrc) \
({ \
const uint8_t* psrc_ld_m = (const uint8_t*)(psrc); \
uint32_t val0_m, val1_m; \
uint64_t val_m = 0; \
val0_m = LW(psrc_ld_m); \
val1_m = LW(psrc_ld_m + 4); \
val_m = (uint64_t)(val1_m); /* NOLINT */ \
val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); /* NOLINT */ \
val_m = (uint64_t)(val_m | (uint64_t)val0_m); /* NOLINT */ \
val_m; \
})
#endif // (__mips == 64)
#define SW(val, pdst) \
({ \
uint8_t* pdst_sw_m = (uint8_t*)(pdst); /* NOLINT */ \
uint32_t val_m = (val); \
asm volatile("sw %[val_m], %[pdst_sw_m] \n" \
: [pdst_sw_m] "=m"(*pdst_sw_m) \
: [val_m] "r"(val_m)); \
})
#if (__mips == 64)
#define SD(val, pdst) \
({ \
uint8_t* pdst_sd_m = (uint8_t*)(pdst); /* NOLINT */ \
uint64_t val_m = (val); \
asm volatile("sd %[val_m], %[pdst_sd_m] \n" \
: [pdst_sd_m] "=m"(*pdst_sd_m) \
: [val_m] "r"(val_m)); \
})
#else // !(__mips == 64)
#define SD(val, pdst) \
({ \
uint8_t* pdst_sd_m = (uint8_t*)(pdst); /* NOLINT */ \
uint32_t val0_m, val1_m; \
val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \
val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \
SW(val0_m, pdst_sd_m); \
SW(val1_m, pdst_sd_m + 4); \
})
#endif // !(__mips == 64)
#else // !(__mips_isa_rev >= 6)
#define LW(psrc) \
({ \
uint8_t* psrc_lw_m = (uint8_t*)(psrc); \
uint32_t val_lw_m; \
\
__asm__ volatile( \
"lwr %[val_lw_m], 0(%[psrc_lw_m]) \n\t" \
"lwl %[val_lw_m], 3(%[psrc_lw_m]) \n\t" \
\
: [val_lw_m] "=&r"(val_lw_m) \
: [psrc_lw_m] "r"(psrc_lw_m)); \
\
val_lw_m; \
})
#if (__mips == 64)
#define LD(psrc) \
({ \
uint8_t* psrc_ld_m = (uint8_t*)(psrc); \
uint64_t val_ld_m = 0; \
\
__asm__ volatile( \
"ldr %[val_ld_m], 0(%[psrc_ld_m]) \n\t" \
"ldl %[val_ld_m], 7(%[psrc_ld_m]) \n\t" \
\
: [val_ld_m] "=&r"(val_ld_m) \
: [psrc_ld_m] "r"(psrc_ld_m)); \
\
val_ld_m; \
})
#else // !(__mips == 64)
#define LD(psrc) \
({ \
const uint8_t* psrc_ld_m = (const uint8_t*)(psrc); \
uint32_t val0_m, val1_m; \
uint64_t val_m = 0; \
val0_m = LW(psrc_ld_m); \
val1_m = LW(psrc_ld_m + 4); \
val_m = (uint64_t)(val1_m); /* NOLINT */ \
val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); /* NOLINT */ \
val_m = (uint64_t)(val_m | (uint64_t)val0_m); /* NOLINT */ \
val_m; \
})
#endif // (__mips == 64)
#define SW(val, pdst) \
({ \
uint8_t* pdst_sw_m = (uint8_t*)(pdst); /* NOLINT */ \
uint32_t val_m = (val); \
asm volatile("usw %[val_m], %[pdst_sw_m] \n" \
: [pdst_sw_m] "=m"(*pdst_sw_m) \
: [val_m] "r"(val_m)); \
})
#define SD(val, pdst) \
({ \
uint8_t* pdst_sd_m = (uint8_t*)(pdst); /* NOLINT */ \
uint32_t val0_m, val1_m; \
val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \
val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \
SW(val0_m, pdst_sd_m); \
SW(val1_m, pdst_sd_m + 4); \
})
#endif // (__mips_isa_rev >= 6)
// TODO(fbarchard): Consider removing __VAR_ARGS versions.
#define LD_B(RTYPE, psrc) *((RTYPE*)(psrc)) /* NOLINT */
#define LD_UB(...) LD_B(const v16u8, __VA_ARGS__)
#define LD_H(RTYPE, psrc) *((RTYPE*)(psrc)) /* NOLINT */
#define LD_UH(...) LD_H(const v8u16, __VA_ARGS__)
#define ST_B(RTYPE, in, pdst) *((RTYPE*)(pdst)) = (in) /* NOLINT */
#define ST_UB(...) ST_B(v16u8, __VA_ARGS__)
#define ST_H(RTYPE, in, pdst) *((RTYPE*)(pdst)) = (in) /* NOLINT */
#define ST_UH(...) ST_H(v8u16, __VA_ARGS__)
/* Description : Load two vectors with 16 'byte' sized elements
Arguments : Inputs - psrc, stride
Outputs - out0, out1
Return Type - as per RTYPE
Details : Load 16 byte elements in 'out0' from (psrc)
Load 16 byte elements in 'out1' from (psrc + stride)
*/
#define LD_B2(RTYPE, psrc, stride, out0, out1) \
{ \
out0 = LD_B(RTYPE, (psrc)); \
out1 = LD_B(RTYPE, (psrc) + stride); \
}
#define LD_UB2(...) LD_B2(const v16u8, __VA_ARGS__)
#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \
{ \
LD_B2(RTYPE, (psrc), stride, out0, out1); \
LD_B2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
}
#define LD_UB4(...) LD_B4(const v16u8, __VA_ARGS__)
/* Description : Store two vectors with stride each having 16 'byte' sized
elements
Arguments : Inputs - in0, in1, pdst, stride
Details : Store 16 byte elements from 'in0' to (pdst)
Store 16 byte elements from 'in1' to (pdst + stride)
*/
#define ST_B2(RTYPE, in0, in1, pdst, stride) \
{ \
ST_B(RTYPE, in0, (pdst)); \
ST_B(RTYPE, in1, (pdst) + stride); \
}
#define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \
{ \
ST_B2(RTYPE, in0, in1, (pdst), stride); \
ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
}
#define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
/* Description : Store vectors of 8 halfword elements with stride
Arguments : Inputs - in0, in1, pdst, stride
Details : Store 8 halfword elements from 'in0' to (pdst)
Store 8 halfword elements from 'in1' to (pdst + stride)
*/
#define ST_H2(RTYPE, in0, in1, pdst, stride) \
{ \
ST_H(RTYPE, in0, (pdst)); \
ST_H(RTYPE, in1, (pdst) + stride); \
}
#define ST_UH2(...) ST_H2(v8u16, __VA_ARGS__)
// TODO(fbarchard): Consider using __msa_vshf_b and __msa_ilvr_b directly.
/* Description : Shuffle byte vector elements as per mask vector
Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
Outputs - out0, out1
Return Type - as per RTYPE
Details : Byte elements from 'in0' & 'in1' are copied selectively to
'out0' as per control vector 'mask0'
*/
#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
{ \
out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \
out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
}
#define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
/* Description : Interleave both left and right half of input vectors
Arguments : Inputs - in0, in1
Outputs - out0, out1
Return Type - as per RTYPE
Details : Right half of byte elements from 'in0' and 'in1' are
interleaved and written to 'out0'
*/
#define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
}
#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
#endif /* !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa) */
#endif // INCLUDE_LIBYUV_MACROS_MSA_H_

195
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/mjpeg_decoder.h

@ -0,0 +1,195 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_MJPEG_DECODER_H_
#define INCLUDE_LIBYUV_MJPEG_DECODER_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
// NOTE: For a simplified public API use convert.h MJPGToI420().
struct jpeg_common_struct;
struct jpeg_decompress_struct;
struct jpeg_source_mgr;
namespace libyuv {
#ifdef __cplusplus
extern "C" {
#endif
LIBYUV_BOOL ValidateJpeg(const uint8_t* sample, size_t sample_size);
#ifdef __cplusplus
} // extern "C"
#endif
static const uint32_t kUnknownDataSize = 0xFFFFFFFF;
enum JpegSubsamplingType {
kJpegYuv420,
kJpegYuv422,
kJpegYuv444,
kJpegYuv400,
kJpegUnknown
};
struct Buffer {
const uint8_t* data;
int len;
};
struct BufferVector {
Buffer* buffers;
int len;
int pos;
};
struct SetJmpErrorMgr;
// MJPEG ("Motion JPEG") is a pseudo-standard video codec where the frames are
// simply independent JPEG images with a fixed huffman table (which is omitted).
// It is rarely used in video transmission, but is common as a camera capture
// format, especially in Logitech devices. This class implements a decoder for
// MJPEG frames.
//
// See http://tools.ietf.org/html/rfc2435
class LIBYUV_API MJpegDecoder {
public:
typedef void (*CallbackFunction)(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows);
static const int kColorSpaceUnknown;
static const int kColorSpaceGrayscale;
static const int kColorSpaceRgb;
static const int kColorSpaceYCbCr;
static const int kColorSpaceCMYK;
static const int kColorSpaceYCCK;
MJpegDecoder();
~MJpegDecoder();
// Loads a new frame, reads its headers, and determines the uncompressed
// image format.
// Returns LIBYUV_TRUE if image looks valid and format is supported.
// If return value is LIBYUV_TRUE, then the values for all the following
// getters are populated.
// src_len is the size of the compressed mjpeg frame in bytes.
LIBYUV_BOOL LoadFrame(const uint8_t* src, size_t src_len);
// Returns width of the last loaded frame in pixels.
int GetWidth();
// Returns height of the last loaded frame in pixels.
int GetHeight();
// Returns format of the last loaded frame. The return value is one of the
// kColorSpace* constants.
int GetColorSpace();
// Number of color components in the color space.
int GetNumComponents();
// Sample factors of the n-th component.
int GetHorizSampFactor(int component);
int GetVertSampFactor(int component);
int GetHorizSubSampFactor(int component);
int GetVertSubSampFactor(int component);
// Public for testability.
int GetImageScanlinesPerImcuRow();
// Public for testability.
int GetComponentScanlinesPerImcuRow(int component);
// Width of a component in bytes.
int GetComponentWidth(int component);
// Height of a component.
int GetComponentHeight(int component);
// Width of a component in bytes with padding for DCTSIZE. Public for testing.
int GetComponentStride(int component);
// Size of a component in bytes.
int GetComponentSize(int component);
// Call this after LoadFrame() if you decide you don't want to decode it
// after all.
LIBYUV_BOOL UnloadFrame();
// Decodes the entire image into a one-buffer-per-color-component format.
// dst_width must match exactly. dst_height must be <= to image height; if
// less, the image is cropped. "planes" must have size equal to at least
// GetNumComponents() and they must point to non-overlapping buffers of size
// at least GetComponentSize(i). The pointers in planes are incremented
// to point to after the end of the written data.
// TODO(fbarchard): Add dst_x, dst_y to allow specific rect to be decoded.
LIBYUV_BOOL DecodeToBuffers(uint8_t** planes, int dst_width, int dst_height);
// Decodes the entire image and passes the data via repeated calls to a
// callback function. Each call will get the data for a whole number of
// image scanlines.
// TODO(fbarchard): Add dst_x, dst_y to allow specific rect to be decoded.
LIBYUV_BOOL DecodeToCallback(CallbackFunction fn,
void* opaque,
int dst_width,
int dst_height);
// The helper function which recognizes the jpeg sub-sampling type.
static JpegSubsamplingType JpegSubsamplingTypeHelper(
int* subsample_x,
int* subsample_y,
int number_of_components);
private:
void AllocOutputBuffers(int num_outbufs);
void DestroyOutputBuffers();
LIBYUV_BOOL StartDecode();
LIBYUV_BOOL FinishDecode();
void SetScanlinePointers(uint8_t** data);
LIBYUV_BOOL DecodeImcuRow();
int GetComponentScanlinePadding(int component);
// A buffer holding the input data for a frame.
Buffer buf_;
BufferVector buf_vec_;
jpeg_decompress_struct* decompress_struct_;
jpeg_source_mgr* source_mgr_;
SetJmpErrorMgr* error_mgr_;
// LIBYUV_TRUE iff at least one component has scanline padding. (i.e.,
// GetComponentScanlinePadding() != 0.)
LIBYUV_BOOL has_scanline_padding_;
// Temporaries used to point to scanline outputs.
int num_outbufs_; // Outermost size of all arrays below.
uint8_t*** scanlines_;
int* scanlines_sizes_;
// Temporary buffer used for decoding when we can't decode directly to the
// output buffers. Large enough for just one iMCU row.
uint8_t** databuf_;
int* databuf_strides_;
};
} // namespace libyuv
#endif // __cplusplus
#endif // INCLUDE_LIBYUV_MJPEG_DECODER_H_

1129
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/planar_functions.h

File diff suppressed because it is too large Load Diff

232
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/rotate.h

@ -0,0 +1,232 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_ROTATE_H_
#define INCLUDE_LIBYUV_ROTATE_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Supported rotation.
typedef enum RotationMode {
kRotate0 = 0, // No rotation.
kRotate90 = 90, // Rotate 90 degrees clockwise.
kRotate180 = 180, // Rotate 180 degrees.
kRotate270 = 270, // Rotate 270 degrees clockwise.
// Deprecated.
kRotateNone = 0,
kRotateClockwise = 90,
kRotateCounterClockwise = 270,
} RotationModeEnum;
// Rotate I420 frame.
LIBYUV_API
int I420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode);
// Rotate I422 frame.
LIBYUV_API
int I422Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode);
// Rotate I444 frame.
LIBYUV_API
int I444Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode);
// Rotate NV12 input and store in I420.
LIBYUV_API
int NV12ToI420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode);
// Convert Android420 to I420 with rotation.
// "rotation" can be 0, 90, 180 or 270.
LIBYUV_API
int Android420ToI420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
int src_pixel_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode rotation);
// Rotate a plane by 0, 90, 180, or 270.
LIBYUV_API
int RotatePlane(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height,
enum RotationMode mode);
// Rotate planes by 90, 180, 270. Deprecated.
LIBYUV_API
void RotatePlane90(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
LIBYUV_API
void RotatePlane180(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
LIBYUV_API
void RotatePlane270(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
// Rotations for when U and V are interleaved.
// These functions take one UV input pointer and
// split the data into two buffers while
// rotating them.
// width and height expected to be half size for NV12.
LIBYUV_API
int SplitRotateUV(const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode);
LIBYUV_API
void SplitRotateUV90(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
LIBYUV_API
void SplitRotateUV180(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
LIBYUV_API
void SplitRotateUV270(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
// The 90 and 270 functions are based on transposes.
// Doing a transpose with reversing the read/write
// order will result in a rotation by +- 90 degrees.
// Deprecated.
LIBYUV_API
void TransposePlane(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
LIBYUV_API
void SplitTransposeUV(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_ROTATE_H_

37
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/rotate_argb.h

@ -0,0 +1,37 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_ROTATE_ARGB_H_
#define INCLUDE_LIBYUV_ROTATE_ARGB_H_
#include "libyuv/basic_types.h"
#include "libyuv/rotate.h" // For RotationMode.
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Rotate ARGB frame
LIBYUV_API
int ARGBRotate(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int src_width,
int src_height,
enum RotationMode mode);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_ROTATE_ARGB_H_

224
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/rotate_row.h

@ -0,0 +1,224 @@
/*
* Copyright 2013 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_ROTATE_ROW_H_
#define INCLUDE_LIBYUV_ROTATE_ROW_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if defined(__pnacl__) || defined(__CLR_VER) || \
(defined(__native_client__) && defined(__x86_64__)) || \
(defined(__i386__) && !defined(__SSE__) && !defined(__clang__))
#define LIBYUV_DISABLE_X86
#endif
#if defined(__native_client__)
#define LIBYUV_DISABLE_NEON
#endif
// MemorySanitizer does not support assembly code yet. http://crbug.com/344505
#if defined(__has_feature)
#if __has_feature(memory_sanitizer)
#define LIBYUV_DISABLE_X86
#endif
#endif
// The following are available for Visual C 32 bit:
#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER) && \
!defined(__clang__)
#define HAS_TRANSPOSEWX8_SSSE3
#define HAS_TRANSPOSEUVWX8_SSE2
#endif
// The following are available for GCC 32 or 64 bit:
#if !defined(LIBYUV_DISABLE_X86) && (defined(__i386__) || defined(__x86_64__))
#define HAS_TRANSPOSEWX8_SSSE3
#endif
// The following are available for 64 bit GCC:
#if !defined(LIBYUV_DISABLE_X86) && defined(__x86_64__)
#define HAS_TRANSPOSEWX8_FAST_SSSE3
#define HAS_TRANSPOSEUVWX8_SSE2
#endif
#if !defined(LIBYUV_DISABLE_NEON) && \
(defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
#define HAS_TRANSPOSEWX8_NEON
#define HAS_TRANSPOSEUVWX8_NEON
#endif
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#define HAS_TRANSPOSEWX16_MSA
#define HAS_TRANSPOSEUVWX16_MSA
#endif
#if !defined(LIBYUV_DISABLE_LSX) && defined(__loongarch_sx)
#define HAS_TRANSPOSEWX16_LSX
#define HAS_TRANSPOSEUVWX16_LSX
#endif
void TransposeWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
void TransposeWx8_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Fast_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Any_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Any_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Fast_Any_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_Any_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_Any_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeUVWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
void TransposeUVWx8_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_SSE2(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_Any_SSE2(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_Any_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_Any_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_Any_LSX(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_ROTATE_ROW_H_

5702
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/row.h

File diff suppressed because it is too large Load Diff

320
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale.h

@ -0,0 +1,320 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_SCALE_H_
#define INCLUDE_LIBYUV_SCALE_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Supported filtering.
typedef enum FilterMode {
kFilterNone = 0, // Point sample; Fastest.
kFilterLinear = 1, // Filter horizontally only.
kFilterBilinear = 2, // Faster than box, but lower quality scaling down.
kFilterBox = 3 // Highest quality.
} FilterModeEnum;
// Scale a YUV plane.
LIBYUV_API
void ScalePlane(const uint8_t* src,
int src_stride,
int src_width,
int src_height,
uint8_t* dst,
int dst_stride,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
void ScalePlane_16(const uint16_t* src,
int src_stride,
int src_width,
int src_height,
uint16_t* dst,
int dst_stride,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Sample is expected to be in the low 12 bits.
LIBYUV_API
void ScalePlane_12(const uint16_t* src,
int src_stride,
int src_width,
int src_height,
uint16_t* dst,
int dst_stride,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scales a YUV 4:2:0 image from the src width and height to the
// dst width and height.
// If filtering is kFilterNone, a simple nearest-neighbor algorithm is
// used. This produces basic (blocky) quality at the fastest speed.
// If filtering is kFilterBilinear, interpolation is used to produce a better
// quality image, at the expense of speed.
// If filtering is kFilterBox, averaging is used to produce ever better
// quality image, at further expense of speed.
// Returns 0 if successful.
LIBYUV_API
int I420Scale(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I420Scale_16(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I420Scale_12(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scales a YUV 4:4:4 image from the src width and height to the
// dst width and height.
// If filtering is kFilterNone, a simple nearest-neighbor algorithm is
// used. This produces basic (blocky) quality at the fastest speed.
// If filtering is kFilterBilinear, interpolation is used to produce a better
// quality image, at the expense of speed.
// If filtering is kFilterBox, averaging is used to produce ever better
// quality image, at further expense of speed.
// Returns 0 if successful.
LIBYUV_API
int I444Scale(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I444Scale_16(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I444Scale_12(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scales a YUV 4:2:2 image from the src width and height to the
// dst width and height.
// If filtering is kFilterNone, a simple nearest-neighbor algorithm is
// used. This produces basic (blocky) quality at the fastest speed.
// If filtering is kFilterBilinear, interpolation is used to produce a better
// quality image, at the expense of speed.
// If filtering is kFilterBox, averaging is used to produce ever better
// quality image, at further expense of speed.
// Returns 0 if successful.
LIBYUV_API
int I422Scale(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I422Scale_16(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I422Scale_12(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scales an NV12 image from the src width and height to the
// dst width and height.
// If filtering is kFilterNone, a simple nearest-neighbor algorithm is
// used. This produces basic (blocky) quality at the fastest speed.
// If filtering is kFilterBilinear, interpolation is used to produce a better
// quality image, at the expense of speed.
// kFilterBox is not supported for the UV channel and will be treated as
// bilinear.
// Returns 0 if successful.
LIBYUV_API
int NV12Scale(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
int src_width,
int src_height,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int dst_width,
int dst_height,
enum FilterMode filtering);
#ifdef __cplusplus
// Legacy API. Deprecated.
LIBYUV_API
int Scale(const uint8_t* src_y,
const uint8_t* src_u,
const uint8_t* src_v,
int src_stride_y,
int src_stride_u,
int src_stride_v,
int src_width,
int src_height,
uint8_t* dst_y,
uint8_t* dst_u,
uint8_t* dst_v,
int dst_stride_y,
int dst_stride_u,
int dst_stride_v,
int dst_width,
int dst_height,
LIBYUV_BOOL interpolate);
// For testing, allow disabling of specialized scalers.
LIBYUV_API
void SetUseReferenceImpl(LIBYUV_BOOL use);
#endif // __cplusplus
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_SCALE_H_

76
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale_argb.h

@ -0,0 +1,76 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_SCALE_ARGB_H_
#define INCLUDE_LIBYUV_SCALE_ARGB_H_
#include "libyuv/basic_types.h"
#include "libyuv/scale.h" // For FilterMode
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
LIBYUV_API
int ARGBScale(const uint8_t* src_argb,
int src_stride_argb,
int src_width,
int src_height,
uint8_t* dst_argb,
int dst_stride_argb,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Clipped scale takes destination rectangle coordinates for clip values.
LIBYUV_API
int ARGBScaleClip(const uint8_t* src_argb,
int src_stride_argb,
int src_width,
int src_height,
uint8_t* dst_argb,
int dst_stride_argb,
int dst_width,
int dst_height,
int clip_x,
int clip_y,
int clip_width,
int clip_height,
enum FilterMode filtering);
// Scale with YUV conversion to ARGB and clipping.
LIBYUV_API
int YUVToARGBScaleClip(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint32_t src_fourcc,
int src_width,
int src_height,
uint8_t* dst_argb,
int dst_stride_argb,
uint32_t dst_fourcc,
int dst_width,
int dst_height,
int clip_x,
int clip_y,
int clip_width,
int clip_height,
enum FilterMode filtering);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_SCALE_ARGB_H_

42
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale_rgb.h

@ -0,0 +1,42 @@
/*
* Copyright 2022 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_SCALE_RGB_H_
#define INCLUDE_LIBYUV_SCALE_RGB_H_
#include "libyuv/basic_types.h"
#include "libyuv/scale.h" // For FilterMode
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// RGB can be RAW, RGB24 or YUV24
// RGB scales 24 bit images by converting a row at a time to ARGB
// and using ARGB row functions to scale, then convert to RGB.
// TODO(fbarchard): Allow input/output formats to be specified.
LIBYUV_API
int RGBScale(const uint8_t* src_rgb,
int src_stride_rgb,
int src_width,
int src_height,
uint8_t* dst_rgb,
int dst_stride_rgb,
int dst_width,
int dst_height,
enum FilterMode filtering);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_SCALE_UV_H_

1709
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale_row.h

File diff suppressed because it is too large Load Diff

51
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/scale_uv.h

@ -0,0 +1,51 @@
/*
* Copyright 2020 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_SCALE_UV_H_
#define INCLUDE_LIBYUV_SCALE_UV_H_
#include "libyuv/basic_types.h"
#include "libyuv/scale.h" // For FilterMode
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
LIBYUV_API
int UVScale(const uint8_t* src_uv,
int src_stride_uv,
int src_width,
int src_height,
uint8_t* dst_uv,
int dst_stride_uv,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scale a 16 bit UV image.
// This function is currently incomplete, it can't handle all cases.
LIBYUV_API
int UVScale_16(const uint16_t* src_uv,
int src_stride_uv,
int src_width,
int src_height,
uint16_t* dst_uv,
int dst_stride_uv,
int dst_width,
int dst_height,
enum FilterMode filtering);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_SCALE_UV_H_

16
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/version.h

@ -0,0 +1,16 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 1848
#endif // INCLUDE_LIBYUV_VERSION_H_

222
packages/camera/camera_aurora/aurora/include/libyuv/libyuv/video_common.h

@ -0,0 +1,222 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Common definitions for video, including fourcc and VideoFormat.
#ifndef INCLUDE_LIBYUV_VIDEO_COMMON_H_
#define INCLUDE_LIBYUV_VIDEO_COMMON_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
//////////////////////////////////////////////////////////////////////////////
// Definition of FourCC codes
//////////////////////////////////////////////////////////////////////////////
// Convert four characters to a FourCC code.
// Needs to be a macro otherwise the OS X compiler complains when the kFormat*
// constants are used in a switch.
#ifdef __cplusplus
#define FOURCC(a, b, c, d) \
((static_cast<uint32_t>(a)) | (static_cast<uint32_t>(b) << 8) | \
(static_cast<uint32_t>(c) << 16) | /* NOLINT */ \
(static_cast<uint32_t>(d) << 24)) /* NOLINT */
#else
#define FOURCC(a, b, c, d) \
(((uint32_t)(a)) | ((uint32_t)(b) << 8) | /* NOLINT */ \
((uint32_t)(c) << 16) | ((uint32_t)(d) << 24)) /* NOLINT */
#endif
// Some pages discussing FourCC codes:
// http://www.fourcc.org/yuv.php
// http://v4l2spec.bytesex.org/spec/book1.htm
// http://developer.apple.com/quicktime/icefloe/dispatch020.html
// http://msdn.microsoft.com/library/windows/desktop/dd206750.aspx#nv12
// http://people.xiph.org/~xiphmont/containers/nut/nut4cc.txt
// FourCC codes grouped according to implementation efficiency.
// Primary formats should convert in 1 efficient step.
// Secondary formats are converted in 2 steps.
// Auxilliary formats call primary converters.
enum FourCC {
// 10 Primary YUV formats: 5 planar, 2 biplanar, 2 packed.
FOURCC_I420 = FOURCC('I', '4', '2', '0'),
FOURCC_I422 = FOURCC('I', '4', '2', '2'),
FOURCC_I444 = FOURCC('I', '4', '4', '4'),
FOURCC_I400 = FOURCC('I', '4', '0', '0'),
FOURCC_NV21 = FOURCC('N', 'V', '2', '1'),
FOURCC_NV12 = FOURCC('N', 'V', '1', '2'),
FOURCC_YUY2 = FOURCC('Y', 'U', 'Y', '2'),
FOURCC_UYVY = FOURCC('U', 'Y', 'V', 'Y'),
FOURCC_I010 = FOURCC('I', '0', '1', '0'), // bt.601 10 bit 420
FOURCC_I210 = FOURCC('I', '2', '1', '0'), // bt.601 10 bit 422
// 1 Secondary YUV format: row biplanar. deprecated.
FOURCC_M420 = FOURCC('M', '4', '2', '0'),
// 13 Primary RGB formats: 4 32 bpp, 2 24 bpp, 3 16 bpp, 1 10 bpc 2 64 bpp
FOURCC_ARGB = FOURCC('A', 'R', 'G', 'B'),
FOURCC_BGRA = FOURCC('B', 'G', 'R', 'A'),
FOURCC_ABGR = FOURCC('A', 'B', 'G', 'R'),
FOURCC_AR30 = FOURCC('A', 'R', '3', '0'), // 10 bit per channel. 2101010.
FOURCC_AB30 = FOURCC('A', 'B', '3', '0'), // ABGR version of 10 bit
FOURCC_AR64 = FOURCC('A', 'R', '6', '4'), // 16 bit per channel.
FOURCC_AB64 = FOURCC('A', 'B', '6', '4'), // ABGR version of 16 bit
FOURCC_24BG = FOURCC('2', '4', 'B', 'G'),
FOURCC_RAW = FOURCC('r', 'a', 'w', ' '),
FOURCC_RGBA = FOURCC('R', 'G', 'B', 'A'),
FOURCC_RGBP = FOURCC('R', 'G', 'B', 'P'), // rgb565 LE.
FOURCC_RGBO = FOURCC('R', 'G', 'B', 'O'), // argb1555 LE.
FOURCC_R444 = FOURCC('R', '4', '4', '4'), // argb4444 LE.
// 1 Primary Compressed YUV format.
FOURCC_MJPG = FOURCC('M', 'J', 'P', 'G'),
// 14 Auxiliary YUV variations: 3 with U and V planes are swapped, 1 Alias.
FOURCC_YV12 = FOURCC('Y', 'V', '1', '2'),
FOURCC_YV16 = FOURCC('Y', 'V', '1', '6'),
FOURCC_YV24 = FOURCC('Y', 'V', '2', '4'),
FOURCC_YU12 = FOURCC('Y', 'U', '1', '2'), // Linux version of I420.
FOURCC_J420 =
FOURCC('J', '4', '2', '0'), // jpeg (bt.601 full), unofficial fourcc
FOURCC_J422 =
FOURCC('J', '4', '2', '2'), // jpeg (bt.601 full), unofficial fourcc
FOURCC_J444 =
FOURCC('J', '4', '4', '4'), // jpeg (bt.601 full), unofficial fourcc
FOURCC_J400 =
FOURCC('J', '4', '0', '0'), // jpeg (bt.601 full), unofficial fourcc
FOURCC_F420 = FOURCC('F', '4', '2', '0'), // bt.709 full, unofficial fourcc
FOURCC_F422 = FOURCC('F', '4', '2', '2'), // bt.709 full, unofficial fourcc
FOURCC_F444 = FOURCC('F', '4', '4', '4'), // bt.709 full, unofficial fourcc
FOURCC_H420 = FOURCC('H', '4', '2', '0'), // bt.709, unofficial fourcc
FOURCC_H422 = FOURCC('H', '4', '2', '2'), // bt.709, unofficial fourcc
FOURCC_H444 = FOURCC('H', '4', '4', '4'), // bt.709, unofficial fourcc
FOURCC_U420 = FOURCC('U', '4', '2', '0'), // bt.2020, unofficial fourcc
FOURCC_U422 = FOURCC('U', '4', '2', '2'), // bt.2020, unofficial fourcc
FOURCC_U444 = FOURCC('U', '4', '4', '4'), // bt.2020, unofficial fourcc
FOURCC_F010 = FOURCC('F', '0', '1', '0'), // bt.709 full range 10 bit 420
FOURCC_H010 = FOURCC('H', '0', '1', '0'), // bt.709 10 bit 420
FOURCC_U010 = FOURCC('U', '0', '1', '0'), // bt.2020 10 bit 420
FOURCC_F210 = FOURCC('F', '2', '1', '0'), // bt.709 full range 10 bit 422
FOURCC_H210 = FOURCC('H', '2', '1', '0'), // bt.709 10 bit 422
FOURCC_U210 = FOURCC('U', '2', '1', '0'), // bt.2020 10 bit 422
FOURCC_P010 = FOURCC('P', '0', '1', '0'),
FOURCC_P210 = FOURCC('P', '2', '1', '0'),
// 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical fourcc.
FOURCC_IYUV = FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
FOURCC_YU16 = FOURCC('Y', 'U', '1', '6'), // Alias for I422.
FOURCC_YU24 = FOURCC('Y', 'U', '2', '4'), // Alias for I444.
FOURCC_YUYV = FOURCC('Y', 'U', 'Y', 'V'), // Alias for YUY2.
FOURCC_YUVS = FOURCC('y', 'u', 'v', 's'), // Alias for YUY2 on Mac.
FOURCC_HDYC = FOURCC('H', 'D', 'Y', 'C'), // Alias for UYVY.
FOURCC_2VUY = FOURCC('2', 'v', 'u', 'y'), // Alias for UYVY on Mac.
FOURCC_JPEG = FOURCC('J', 'P', 'E', 'G'), // Alias for MJPG.
FOURCC_DMB1 = FOURCC('d', 'm', 'b', '1'), // Alias for MJPG on Mac.
FOURCC_BA81 = FOURCC('B', 'A', '8', '1'), // Alias for BGGR.
FOURCC_RGB3 = FOURCC('R', 'G', 'B', '3'), // Alias for RAW.
FOURCC_BGR3 = FOURCC('B', 'G', 'R', '3'), // Alias for 24BG.
FOURCC_CM32 = FOURCC(0, 0, 0, 32), // Alias for BGRA kCMPixelFormat_32ARGB
FOURCC_CM24 = FOURCC(0, 0, 0, 24), // Alias for RAW kCMPixelFormat_24RGB
FOURCC_L555 = FOURCC('L', '5', '5', '5'), // Alias for RGBO.
FOURCC_L565 = FOURCC('L', '5', '6', '5'), // Alias for RGBP.
FOURCC_5551 = FOURCC('5', '5', '5', '1'), // Alias for RGBO.
// deprecated formats. Not supported, but defined for backward compatibility.
FOURCC_I411 = FOURCC('I', '4', '1', '1'),
FOURCC_Q420 = FOURCC('Q', '4', '2', '0'),
FOURCC_RGGB = FOURCC('R', 'G', 'G', 'B'),
FOURCC_BGGR = FOURCC('B', 'G', 'G', 'R'),
FOURCC_GRBG = FOURCC('G', 'R', 'B', 'G'),
FOURCC_GBRG = FOURCC('G', 'B', 'R', 'G'),
FOURCC_H264 = FOURCC('H', '2', '6', '4'),
// Match any fourcc.
FOURCC_ANY = -1,
};
enum FourCCBpp {
// Canonical fourcc codes used in our code.
FOURCC_BPP_I420 = 12,
FOURCC_BPP_I422 = 16,
FOURCC_BPP_I444 = 24,
FOURCC_BPP_I411 = 12,
FOURCC_BPP_I400 = 8,
FOURCC_BPP_NV21 = 12,
FOURCC_BPP_NV12 = 12,
FOURCC_BPP_YUY2 = 16,
FOURCC_BPP_UYVY = 16,
FOURCC_BPP_M420 = 12, // deprecated
FOURCC_BPP_Q420 = 12,
FOURCC_BPP_ARGB = 32,
FOURCC_BPP_BGRA = 32,
FOURCC_BPP_ABGR = 32,
FOURCC_BPP_RGBA = 32,
FOURCC_BPP_AR30 = 32,
FOURCC_BPP_AB30 = 32,
FOURCC_BPP_AR64 = 64,
FOURCC_BPP_AB64 = 64,
FOURCC_BPP_24BG = 24,
FOURCC_BPP_RAW = 24,
FOURCC_BPP_RGBP = 16,
FOURCC_BPP_RGBO = 16,
FOURCC_BPP_R444 = 16,
FOURCC_BPP_RGGB = 8,
FOURCC_BPP_BGGR = 8,
FOURCC_BPP_GRBG = 8,
FOURCC_BPP_GBRG = 8,
FOURCC_BPP_YV12 = 12,
FOURCC_BPP_YV16 = 16,
FOURCC_BPP_YV24 = 24,
FOURCC_BPP_YU12 = 12,
FOURCC_BPP_J420 = 12,
FOURCC_BPP_J400 = 8,
FOURCC_BPP_H420 = 12,
FOURCC_BPP_H422 = 16,
FOURCC_BPP_I010 = 15,
FOURCC_BPP_I210 = 20,
FOURCC_BPP_H010 = 15,
FOURCC_BPP_H210 = 20,
FOURCC_BPP_P010 = 15,
FOURCC_BPP_P210 = 20,
FOURCC_BPP_MJPG = 0, // 0 means unknown.
FOURCC_BPP_H264 = 0,
FOURCC_BPP_IYUV = 12,
FOURCC_BPP_YU16 = 16,
FOURCC_BPP_YU24 = 24,
FOURCC_BPP_YUYV = 16,
FOURCC_BPP_YUVS = 16,
FOURCC_BPP_HDYC = 16,
FOURCC_BPP_2VUY = 16,
FOURCC_BPP_JPEG = 1,
FOURCC_BPP_DMB1 = 1,
FOURCC_BPP_BA81 = 8,
FOURCC_BPP_RGB3 = 24,
FOURCC_BPP_BGR3 = 24,
FOURCC_BPP_CM32 = 32,
FOURCC_BPP_CM24 = 24,
// Match any fourcc.
FOURCC_BPP_ANY = 0, // 0 means unknown.
};
// Converts fourcc aliases into canonical ones.
LIBYUV_API uint32_t CanonicalFourCC(uint32_t fourcc);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_VIDEO_COMMON_H_

113
packages/camera/camera_aurora/aurora/texture_camera.cpp

@ -3,24 +3,20 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <camera_aurora/texture_camera.h>
#include <camera_aurora/texture_camera_egl_helper.h>
#include <camera_aurora/texture_camera_pixels_helper.h>
#include <flutter/platform-data.h>
#include <flutter/platform-methods.h>
#include <libyuv/libyuv.h>
#include <QtCore>
#include <QBuffer>
TextureCamera::TextureCamera(TextureRegistrar *plugin, const CameraErrorHandler &onError)
: m_plugin(plugin)
, m_onError(onError)
, m_buffer(nullptr)
, m_image(nullptr)
, m_manager(StreamCameraManager())
{
TextureCameraEGLHelper::EGLInit();
}
{}
std::vector<Encodable> TextureCamera::GetAvailableCameras()
{
@ -42,21 +38,6 @@ std::vector<Encodable> TextureCamera::GetAvailableCameras()
return cameras;
}
std::string TextureCamera::GetImageBase64()
{
if (m_image && m_camera) {
Aurora::StreamCamera::CameraInfo info;
if (m_camera->getInfo(info)) {
QBuffer qbuffer;
qbuffer.open(QIODevice::WriteOnly);
QImage rotatedImg = m_image->transformed(QMatrix().rotate(info.mountAngle));
rotatedImg.save(&qbuffer, "JPEG");
return qbuffer.data().toBase64().toStdString();
}
}
return "";
}
std::map<Encodable, Encodable> TextureCamera::GetState()
{
Aurora::StreamCamera::CameraInfo info;
@ -110,10 +91,10 @@ void TextureCamera::SendError(std::string error)
m_onError();
}
std::map<Encodable, Encodable> TextureCamera::StartCapture(size_t width, size_t height)
std::map<Encodable, Encodable> TextureCamera::StartCapture()
{
m_viewWidth = width;
m_viewHeight = height;
auto displayWidth = PlatformMethods::GetDisplayWidth();
auto displayHeight = PlatformMethods::GetDisplayHeight();
if (m_camera) {
if (m_camera->captureStarted()) {
@ -124,20 +105,23 @@ std::map<Encodable, Encodable> TextureCamera::StartCapture(size_t width, size_t
std::vector<Aurora::StreamCamera::CameraCapability> caps;
if (m_manager->queryCapabilities(info.id, caps)) {
for(unsigned int i = 0; i< caps.size(); i++) {
if (width + height <= caps[i].width + caps[i].height || caps.size() == i - 1) {
m_captureWidth = caps[i].width;
m_captureHeight = caps[i].height;
if (!m_camera->startCapture(caps[i])) {
Unregister();
SendError("Stream camera error start capture");
}
auto cap = caps.back();
for(unsigned int i = caps.size()-1; i >= 0; i--) { // @todo comparison of unsigned expression >= 0 is always true
if (displayWidth + displayHeight >= caps[i].width + caps[i].height) { // @todo comparison of integer expressions of different signedness: ‘int’ and ‘unsigned int’
cap = caps[i];
break;
}
}
m_captureWidth = cap.width;
m_captureHeight = cap.height;
if (!m_camera->startCapture(cap)) {
Unregister();
SendError("Stream camera error start capture");
}
}
}
}
@ -155,32 +139,19 @@ void TextureCamera::StopCapture()
std::map<Encodable, Encodable> TextureCamera::Register(std::string cameraName)
{
m_textureId = m_plugin->RegisterTexture(
[this]([[maybe_unused]] size_t width, [[maybe_unused]] size_t height, void* egl_display, void* egl_context) -> std::optional<TextureVariant> {
if (m_buffer) {
if (m_buffer->handleType == Aurora::StreamCamera::HandleType::EGL) {
auto eglImage = TextureCameraEGLHelper::EGLCreateImage(m_buffer->handle, egl_display, egl_context);
if (eglImage != EGL_NO_IMAGE_KHR) {
return std::make_optional(TextureVariant(FlutterEGLImage{
eglImage,
m_buffer->width,
m_buffer->height,
}));
}
} else {
m_image = TextureCameraPixelsHelper::YUVtoARGB(m_buffer->mapYCbCr());
auto pixels = static_cast<uint8_t *>(m_image->bits());
return std::make_optional(TextureVariant(FlutterPixelBuffer{
pixels,
m_buffer->width,
m_buffer->height,
}));
}
[this]([[maybe_unused]] size_t width, [[maybe_unused]] size_t height) -> std::optional<TextureVariant> {
if (m_bits) {
return std::make_optional(TextureVariant(FlutterPixelBuffer{
m_bits,
m_captureWidth,
m_captureHeight
}));
}
return std::nullopt;
});
if (CreateCamera(cameraName) && m_viewWidth != 0 && m_viewHeight != 0) {
StartCapture(m_viewWidth, m_viewHeight);
if (CreateCamera(cameraName)) {
StartCapture();
}
return GetState();
@ -191,11 +162,11 @@ std::map<Encodable, Encodable> TextureCamera::Unregister()
StopCapture();
m_error = "";
m_counter = 0;
m_textureId = 0;
m_captureWidth = 0;
m_captureHeight = 0;
m_buffer = nullptr;
m_camera = nullptr;
m_bits = nullptr;
m_plugin->UnregisterTexture(m_textureId);
@ -204,7 +175,29 @@ std::map<Encodable, Encodable> TextureCamera::Unregister()
void TextureCamera::onCameraFrame(std::shared_ptr<Aurora::StreamCamera::GraphicBuffer> buffer)
{
m_buffer = buffer;
m_counter += 1;
if (m_counter < 0) m_counter = 0;
if (m_counter %3 == 0) return;
auto frame = buffer->mapYCbCr();
auto bits = std::shared_ptr<uint8_t>((uint8_t *) malloc(frame->width * 4 * frame->height), free);
libyuv::Android420ToARGB(
frame->y,
frame->yStride,
frame->cr,
frame->cStride,
frame->cb,
frame->cStride,
frame->chromaStep,
bits.get(),
frame->width * 4,
frame->width,
frame->height
);
m_bits = bits;
m_plugin->MarkTextureAvailable(m_textureId);
}
@ -217,5 +210,5 @@ void TextureCamera::onCameraError(const std::string &errorDescription)
void TextureCamera::onCameraParameterChanged([[maybe_unused]] Aurora::StreamCamera::CameraParameter parameter,
const std::string &value)
{
std::cout << "onCameraParameterChanged: " << value << std::endl;
std::cout << "onCameraParameterChanged: " << value << std::endl;
}

27
packages/camera/camera_aurora/aurora/texture_camera_egl_helper.cpp

@ -1,27 +0,0 @@
/**
* SPDX-FileCopyrightText: Copyright 2023 Open Mobile Platform LLC <community@omp.ru>
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <camera_aurora/texture_camera_egl_helper.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
static PFNEGLCREATEIMAGEKHRPROC eglCreateImageKHR;
void TextureCameraEGLHelper::EGLInit()
{
eglCreateImageKHR = reinterpret_cast<PFNEGLCREATEIMAGEKHRPROC>(
eglGetProcAddress("eglCreateImageKHR"));
}
EGLImageKHR TextureCameraEGLHelper::EGLCreateImage(const void *handle, void *egl_display, void *egl_context)
{
GLint eglImgAttrs[] = {EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE, EGL_NONE};
return eglCreateImageKHR((EGLDisplay) egl_display,
(EGLContext) egl_context,
EGL_NATIVE_BUFFER_ANDROID,
(EGLClientBuffer) handle,
eglImgAttrs);
}

77
packages/camera/camera_aurora/aurora/texture_camera_pixels_helper.cpp

@ -1,77 +0,0 @@
/**
* SPDX-FileCopyrightText: Copyright 2023 Open Mobile Platform LLC <community@omp.ru>
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <camera_aurora/texture_camera_pixels_helper.h>
QImage *TextureCameraPixelsHelper::YUVtoARGB(std::shared_ptr<const Aurora::StreamCamera::YCbCrFrame> frame)
{
QSize size(frame->width, frame->height);
QImage *image = new QImage(size, QImage::Format_RGBA8888);
planarYuv420ToArgb(frame->y,
frame->cr,
frame->cb,
frame->yStride,
frame->cStride,
frame->cStride,
frame->chromaStep,
reinterpret_cast<quint32 *>(image->bits()),
frame->width,
frame->height);
return image;
}
quint32 TextureCameraPixelsHelper::yuvToArgb(qint32 y, qint32 rv, qint32 guv, qint32 bu, qint32 a = 255)
{
qint32 yy = (y - 16) * 298;
return (a << 24)
| qBound(0, (yy + rv) >> 8, 255) << 16
| qBound(0, (yy - guv) >> 8, 255) << 8
| qBound(0, (yy + bu) >> 8, 255);
}
void TextureCameraPixelsHelper::planarYuv420ToArgb(const uchar *y,
const uchar *u,
const uchar *v,
qint32 yStride,
qint32 uStride,
qint32 vStride,
qint32 uvPixelStride,
quint32 *rgb,
qint32 width,
qint32 height)
{
quint32 *rgb0 = rgb;
quint32 *rgb1 = rgb + width;
for (qint32 j = 0; j < height; j += 2) {
const uchar *lineY0 = y;
const uchar *lineY1 = y + yStride;
const uchar *lineU = u;
const uchar *lineV = v;
for (qint32 i = 0; i < width; i += 2) {
const qint32 uu = *lineU - 128;
const qint32 vv = *lineV - 128;
const qint32 rv = 409 * vv + 128;
const qint32 guv = 100 * uu + 208 * vv + 128;
const qint32 bu = 516 * uu + 128;
lineU += uvPixelStride;
lineV += uvPixelStride;
*rgb0++ = yuvToArgb(*lineY0++, rv, guv, bu);
*rgb0++ = yuvToArgb(*lineY0++, rv, guv, bu);
*rgb1++ = yuvToArgb(*lineY1++, rv, guv, bu);
*rgb1++ = yuvToArgb(*lineY1++, rv, guv, bu);
}
y += yStride << 1;
u += uStride;
v += vStride;
rgb0 += width;
rgb1 += width;
}
}
Loading…
Cancel
Save