Camera2+GlSurfaceView

基本概念:

屏幕方向

ScreenOrientation

ScreenOrientation:0度

Orientation
使用Android的GLSurfaceView显示时,得出结论:
摄像头后置:摄像头ID “0”
Surface.ROTATION_0 画面需要逆时针旋转90度,同时调整GLSurfaceView的显示比例
Surface.ROTATION_90 画面方向无需处理
Surface.ROTATION_180 画面方向无需处理
Surface.ROTATION_270 湖面需要顺时针旋转180度,同时调整GLSurfaceView的显示比例

Camera2打开相机

1.仅用来显示:获取当前显示屏幕的DisplayWidth,DisplayHeight(屏幕的宽高)
2.仅用来显示:获取需要打开摄像头的Preview的Size属性(cameraCharacteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)),获取其中最符合屏幕比例且分辨率大的PreViewSize
3.通过CameraManager.OpenCamera()方法打开相机设置
4.通过CaptureRequest.Builde设置相机的Target(可以设置多个)
    例如:通过GLES生成OES纹理并绑定到SurfaceTexture上,通过并将SurfaceTexture设置宽高(1,2步骤获取)后,添加到Surface上,最后将Surface添加到 CaptureRequest.Builde.addTarget(surface);
    例如:创建ImageReader,并将其Surface(getSurface()),添加到CaptureRequest.Builde.addTarget(surface);
    结论:CaptureRequest.Builde.addTarget(Surface)方法的意义在于将目标Surface(提前设置宽高)传递给相机,相机生产方可以同时将不同分辨率的数据传递给不同消费者。
    消费者:TextureView,GLSurfaceView,ImageReader等

显示相机画面

1.通过GLSurfaceView显示相机画面(绑定OES纹理,自定义GLSurfaceView.Renderer)即可,画面方向以及比例可以通过顶点坐标或者相机Matrix来进行调节
2.通过TextureView显示相机画面,将TextureView.getSurfaceTexture()方法,设置target同理,可以对TextureView进行旋转,平移操作,保证画面比例正常

ImageReader创建

1.ImageReader的可以用来获取相机的帧数据,通过对帧数据的处理,实现对象检测,人脸检测,等需要单帧处理的场景
2.在很多官方例子中便可以看到,例如:
    2.1 TFLite ObjectDetect 中将Image数据转成Bitmap然后传递给识别接口
    2.2 OpenCV Face Detect中将Image数据转成Mat,然后将灰度Mat传递给识别接口
    2.3 人脸识别以及landmark中通过将Image数据转换成为NV21类型,传递给识别接口
3. 显示相机画面时部分Orientation进行旋转操作,同理ImageReader获取到的接口也需要进行处理,方可得到正确方向的数据。

ImageReader转码

  1. OpenCV转码(来源于OpenCV SDK官方实现 ImageFormat.YUV_420_888转Mat)
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    public class Yuv4208882Mat implements CvCameraViewFrame {

    public Mat gray() {
    Image.Plane[] planes = mImage.getPlanes();
    int w = mImage.getWidth();
    int h = mImage.getHeight();
    assert (planes[0].getPixelStride() == 1);
    ByteBuffer y_plane = planes[0].getBuffer();
    int y_plane_step = planes[0].getRowStride();
    mGray = new Mat(h, w, CvType.CV_8UC1, y_plane, y_plane_step);
    return mGray;
    }

    public Mat rgba() {
    Image.Plane[] planes = mImage.getPlanes();
    int w = mImage.getWidth();
    int h = mImage.getHeight();
    int chromaPixelStride = planes[1].getPixelStride();


    if (chromaPixelStride == 2) { // Chroma channels are interleaved
    assert (planes[0].getPixelStride() == 1);
    assert (planes[2].getPixelStride() == 2);
    ByteBuffer y_plane = planes[0].getBuffer();
    int y_plane_step = planes[0].getRowStride();
    ByteBuffer uv_plane1 = planes[1].getBuffer();
    int uv_plane1_step = planes[1].getRowStride();
    ByteBuffer uv_plane2 = planes[2].getBuffer();
    int uv_plane2_step = planes[2].getRowStride();
    Mat y_mat = new Mat(h, w, CvType.CV_8UC1, y_plane, y_plane_step);
    Mat uv_mat1 = new Mat(h / 2, w / 2, CvType.CV_8UC2, uv_plane1, uv_plane1_step);
    Mat uv_mat2 = new Mat(h / 2, w / 2, CvType.CV_8UC2, uv_plane2, uv_plane2_step);
    long addr_diff = uv_mat2.dataAddr() - uv_mat1.dataAddr();
    if (addr_diff > 0) {
    assert (addr_diff == 1);
    Imgproc.cvtColorTwoPlane(y_mat, uv_mat1, mRgba, Imgproc.COLOR_YUV2RGBA_NV12);
    } else {
    assert (addr_diff == -1);
    Imgproc.cvtColorTwoPlane(y_mat, uv_mat2, mRgba, Imgproc.COLOR_YUV2RGBA_NV21);
    }
    return mRgba;
    } else { // Chroma channels are not interleaved
    byte[] yuv_bytes = new byte[w * (h + h / 2)];
    ByteBuffer y_plane = planes[0].getBuffer();
    ByteBuffer u_plane = planes[1].getBuffer();
    ByteBuffer v_plane = planes[2].getBuffer();

    int yuv_bytes_offset = 0;

    int y_plane_step = planes[0].getRowStride();
    if (y_plane_step == w) {
    y_plane.get(yuv_bytes, 0, w * h);
    yuv_bytes_offset = w * h;
    } else {
    int padding = y_plane_step - w;
    for (int i = 0; i < h; i++) {
    y_plane.get(yuv_bytes, yuv_bytes_offset, w);
    yuv_bytes_offset += w;
    if (i < h - 1) {
    y_plane.position(y_plane.position() + padding);
    }
    }
    assert (yuv_bytes_offset == w * h);
    }

    int chromaRowStride = planes[1].getRowStride();
    int chromaRowPadding = chromaRowStride - w / 2;

    if (chromaRowPadding == 0) {
    // When the row stride of the chroma channels equals their width, we can copy
    // the entire channels in one go
    u_plane.get(yuv_bytes, yuv_bytes_offset, w * h / 4);
    yuv_bytes_offset += w * h / 4;
    v_plane.get(yuv_bytes, yuv_bytes_offset, w * h / 4);
    } else {
    // When not equal, we need to copy the channels row by row
    for (int i = 0; i < h / 2; i++) {
    u_plane.get(yuv_bytes, yuv_bytes_offset, w / 2);
    yuv_bytes_offset += w / 2;
    if (i < h / 2 - 1) {
    u_plane.position(u_plane.position() + chromaRowPadding);
    }
    }
    for (int i = 0; i < h / 2; i++) {
    v_plane.get(yuv_bytes, yuv_bytes_offset, w / 2);
    yuv_bytes_offset += w / 2;
    if (i < h / 2 - 1) {
    v_plane.position(v_plane.position() + chromaRowPadding);
    }
    }
    }

    Mat yuv_mat = new Mat(h + h / 2, w, CvType.CV_8UC1);
    yuv_mat.put(0, 0, yuv_bytes);
    Imgproc.cvtColor(yuv_mat, mRgba, Imgproc.COLOR_YUV2RGBA_I420, 4);
    return mRgba;
    }
    }

    public Yuv4208882Mat(Image image) {
    super();
    mImage = image;
    mRgba = new Mat();
    mGray = new Mat();
    }

    public void release() {
    mRgba.release();
    mGray.release();
    }


    private Image mImage;
    private Mat mRgba;
    private Mat mGray;
    }
  1. 例子TFLite Object Detector(ImageFormat.YUV_420_888转Bitmap)

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    private void saveBitmapFromBytes(Image image) {
    Image.Plane[] planes = image.getPlanes();
    byte[][] yuvBytes = new byte[3][];
    fillBytes(planes, yuvBytes);
    int yRowStride = planes[0].getRowStride();
    int uvRowStride = planes[1].getRowStride();
    int uvPixelStride = planes[1].getPixelStride();
    ImageUtils.convertYUV420ToARGB8888(
    yuvBytes[0],
    yuvBytes[1],
    yuvBytes[2],
    mPreviewSize.getWidth(),
    mPreviewSize.getHeight(),
    yRowStride,
    uvRowStride,
    uvPixelStride,
    rgbBytes);
    Bitmap rgbFrameBitmap = Bitmap.createBitmap(mPreviewSize.getWidth(), mPreviewSize.getHeight(), Bitmap.Config.ARGB_8888);
    rgbFrameBitmap.setPixels(rgbBytes, 0, mPreviewSize.getWidth(), 0, 0, mPreviewSize.getWidth(), mPreviewSize.getHeight());
    File file = new File(activity.getExternalCacheDir() + "/" + System.currentTimeMillis() + "_compress.jpeg");
    if (!file.exists()) {
    file.getParentFile().mkdirs();
    try {
    file.createNewFile();
    } catch (IOException e) {
    e.printStackTrace();
    Logger.e(TAG, "[imageAvailableListenerDetect]");
    return;
    }
    }
    try {
    rgbFrameBitmap.compress(Bitmap.CompressFormat.JPEG, 10, new FileOutputStream(file));
    } catch (FileNotFoundException e) {
    e.printStackTrace();
    }
    }


  2. 例子3 部分人脸识别(ImageFormat.YUV_420_888转NV21)

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    public static byte[] getDataFromImage(Image image, int colorFormat) {
    if (colorFormat != COLOR_FormatI420 && colorFormat != COLOR_FormatNV21) {
    throw new IllegalArgumentException("only support COLOR_FormatI420 " + "and COLOR_FormatNV21");
    }
    if (!isImageFormatSupported(image)) {
    throw new RuntimeException("can't convert Image to byte array, format " + image.getFormat());
    }
    Rect crop = image.getCropRect();
    int format = image.getFormat();
    int width = crop.width();
    int height = crop.height();
    Image.Plane[] planes = image.getPlanes();
    byte[] data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8];
    byte[] rowData = new byte[planes[0].getRowStride()];
    int channelOffset = 0;
    int outputStride = 1;
    for (int i = 0; i < planes.length; i++) {
    switch (i) {
    case 0:
    channelOffset = 0;
    outputStride = 1;
    break;
    case 1:
    if (colorFormat == COLOR_FormatI420) {
    channelOffset = width * height;
    outputStride = 1;
    } else if (colorFormat == COLOR_FormatNV21) {
    channelOffset = width * height + 1;
    outputStride = 2;
    }
    break;
    case 2:
    if (colorFormat == COLOR_FormatI420) {
    channelOffset = (int) (width * height * 1.25);
    outputStride = 1;
    } else if (colorFormat == COLOR_FormatNV21) {
    channelOffset = width * height;
    outputStride = 2;
    }
    break;
    }
    ByteBuffer buffer = planes[i].getBuffer();
    int rowStride = planes[i].getRowStride();
    int pixelStride = planes[i].getPixelStride();
    int shift = (i == 0) ? 0 : 1;
    int w = width >> shift;
    int h = height >> shift;
    buffer.position(rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift));
    for (int row = 0; row < h; row++) {
    int length;
    if (pixelStride == 1 && outputStride == 1) {
    length = w;
    buffer.get(data, channelOffset, length);
    channelOffset += length;
    } else {
    length = (w - 1) * pixelStride + 1;
    buffer.get(rowData, 0, length);
    for (int col = 0; col < w; col++) {
    data[channelOffset] = rowData[col * pixelStride];
    channelOffset += outputStride;
    }
    }
    if (row < h - 1) {
    buffer.position(buffer.position() + rowStride - length);
    }
    }
    }
    return data;
    }

    总结:Image的转码核心在于提取Y,U,V3个通道的数据后按照规定数据进行转换或者重新排列即可

参照例子

  1. OpenCV Android SDK 4.5.2 (自行下载使用)
  2. TFLite Object Detect