0

Implement VP8 encoder for chromoting

Added EncoderVp8 with test for chromoting.

TEST=remoting_unittests
BUG=50235

Review URL: http://codereview.chromium.org/3005036

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@60854 0039d316-1c4b-4281-b951-d872f2087c98
This commit is contained in:
hclam@chromium.org
2010-09-28 22:19:48 +00:00
parent b045a74052
commit df10bf173f
12 changed files with 310 additions and 169 deletions

2
.gitignore vendored

@ -74,6 +74,8 @@
/third_party/hunspell_dictionaries
/third_party/icu
/third_party/libjingle/source
/third_party/libvpx/lib
/third_party/libvpx/include
/third_party/libsrtp/src
/third_party/libvpx
/third_party/lighttpd

@ -25,6 +25,14 @@ bool InitializeMediaLibrary(const FilePath& module_dir);
// Returns true if OpenMAX was successfully initialized and loaded.
bool InitializeOpenMaxLibrary(const FilePath& module_dir);
// This is temporary to get the address of vpx_codec_vp8_cx_algo in FFmpeg.
// This method should only be called after media library is loaded.
// TODO(hclam): Remove this after we have a getter function for the same
// purpose in libvpx.
// See bug: http://code.google.com/p/webm/issues/detail?id=169
void* GetVp8CxAlgoAddress();
} // namespace media
#endif // MEDIA_BASE_MEDIA_H_

@ -10,6 +10,7 @@
#include "base/file_path.h"
#include "base/logging.h"
#include "base/native_library.h"
#include "base/path_service.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "third_party/ffmpeg/ffmpeg_stubs.h"
@ -69,6 +70,9 @@ std::string GetDSOName(tp_ffmpeg::StubModules stub_key) {
} // namespace
// Address of vpx_codec_vp8_cx_algo.
static void* vp8_cx_algo_address = NULL;
// Attempts to initialize the media library (loading DSOs, etc.).
// Returns true if everything was successfully initialized, false otherwise.
bool InitializeMediaLibrary(const FilePath& module_dir) {
@ -85,7 +89,18 @@ bool InitializeMediaLibrary(const FilePath& module_dir) {
paths[module].push_back(path.value());
}
return tp_ffmpeg::InitializeStubs(paths);
bool ret = tp_ffmpeg::InitializeStubs(paths);
// TODO(hclam): This is temporary to obtain address of
// vpx_codec_vp8_cx_algo. This should be removed once libvpx has a
// getter method for it.
base::NativeLibrary sumo_lib =
base::LoadNativeLibrary(module_dir.Append(sumo_name));
if (sumo_lib) {
vp8_cx_algo_address = base::GetFunctionPointerFromNativeLibrary(
sumo_lib, "vpx_codec_vp8_cx_algo");
}
return ret;
}
#if defined(OS_LINUX)
@ -114,4 +129,8 @@ bool InitializeOpenMaxLibrary(const FilePath& module_dir) {
}
#endif
void* GetVp8CxAlgoAddress() {
return vp8_cx_algo_address;
}
} // namespace media

@ -8,6 +8,7 @@
#include "base/file_path.h"
#include "base/logging.h"
#include "base/native_library.h"
#include "base/path_service.h"
#include "base/scoped_ptr.h"
@ -47,6 +48,9 @@ FilePath::CharType* GetDLLName(FFmpegDLLKeys dll_key) {
} // namespace
// Address of vpx_codec_vp8_cx_algo.
static void* vp8_cx_algo_address = NULL;
// Attempts to initialize the media library (loading DLLs, DSOs, etc.).
// Returns true if everything was successfully initialized, false otherwise.
bool InitializeMediaLibrary(const FilePath& base_path) {
@ -78,6 +82,16 @@ bool InitializeMediaLibrary(const FilePath& base_path) {
#endif
}
// TODO(hclam): This is temporary to obtain address of
// vpx_codec_vp8_cx_algo. This should be removed once libvpx has a
// getter method for it.
base::NativeLibrary avcodec_lib =
base::LoadNativeLibrary(FilePath(GetDLLName(media::FILE_LIBAVCODEC)));
if (avcodec_lib) {
vp8_cx_algo_address = base::GetFunctionPointerFromNativeLibrary(
avcodec_lib, "vpx_codec_vp8_cx_algo");
}
// Check that we loaded all libraries successfully. We only need to check the
// last array element because the loop above will break without initializing
// it on any prior error.
@ -97,4 +111,8 @@ bool InitializeOpenMaxLibrary(const FilePath& module_dir) {
return false;
}
void* GetVp8CxAlgoAddress() {
return vp8_cx_algo_address;
}
} // namespace media

@ -78,9 +78,10 @@ class EncoderMessageTester {
~EncoderMessageTester() {
EXPECT_EQ(begin_rect_, end_rect_);
EXPECT_GT(begin_rect_, 0);
EXPECT_EQ(kWaitingForBeginRect, state_);
if (strict_){
EXPECT_EQ(begin_rect_, added_rects_);
if (strict_) {
EXPECT_EQ(added_rects_, begin_rect_);
}
}

@ -5,129 +5,190 @@
#include "base/logging.h"
#include "media/base/callback.h"
#include "media/base/data_buffer.h"
#include "remoting/host/encoder_vp8.h"
#include "media/base/media.h"
#include "remoting/base/capture_data.h"
#include "remoting/base/encoder_vp8.h"
extern "C" {
// TODO(garykac): Rix with correct path to vp8 header.
#include "remoting/third_party/on2/include/vp8cx.h"
#define VPX_CODEC_DISABLE_COMPAT 1
#include "third_party/libvpx/include/vpx/vpx_codec.h"
#include "third_party/libvpx/include/vpx/vpx_encoder.h"
#include "third_party/libvpx/include/vpx/vp8cx.h"
}
namespace remoting {
EncoderVp8::EncoderVp8()
: initialized_(false),
codec_(NULL),
image_(NULL),
last_timestamp_(0) {
}
EncoderVp8::~EncoderVp8() {
if (initialized_) {
vpx_codec_err_t ret = vpx_codec_destroy(codec_.get());
DCHECK(ret == VPX_CODEC_OK) << "Failed to destroy codec";
}
}
bool EncoderVp8::Init() {
// TODO(hclam): Now always assume we receive YV12. May need to extend this
// so we can do color space conversion manually.
image_.fmt = IMG_FMT_YV12;
image_.w = width_;
image_.h = height_;
bool EncoderVp8::Init(int width, int height) {
codec_.reset(new vpx_codec_ctx_t());
image_.reset(new vpx_image_t());
memset(image_.get(), 0, sizeof(vpx_image_t));
on2_codec_enc_cfg_t config;
on2_codec_err_t result = on2_codec_enc_config_default(&on2_codec_vp8_cx_algo,
&config, 0);
image_->fmt = VPX_IMG_FMT_YV12;
// TODO(hclam): Adjust the parameters.
config.g_w = width_;
config.g_h = height_;
config.g_pass = ON2_RC_ONE_PASS;
// libvpx seems to require both to be assigned.
image_->d_w = width;
image_->w = width;
image_->d_h = height;
image_->h = height;
vpx_codec_enc_cfg_t config;
const vpx_codec_iface_t* algo =
(const vpx_codec_iface_t*)media::GetVp8CxAlgoAddress();
vpx_codec_err_t ret = vpx_codec_enc_config_default(algo, &config, 0);
if (ret != VPX_CODEC_OK)
return false;
// TODO(hclam): Tune the parameters to better suit the application.
config.rc_target_bitrate = width * height * config.rc_target_bitrate
/ config.g_w / config.g_h;
config.g_w = width;
config.g_h = height;
config.g_pass = VPX_RC_ONE_PASS;
config.g_profile = 1;
config.g_threads = 2;
config.rc_target_bitrate = 1000000;
config.rc_min_quantizer = 0;
config.rc_max_quantizer = 15;
config.g_timebase.num = 1;
config.g_timebase.den = 30;
if (on2_codec_enc_init(&codec_, &on2_codec_vp8_cx_algo, &config, 0))
if (vpx_codec_enc_init(codec_.get(), algo, &config, 0))
return false;
on2_codec_control_(&codec_, VP8E_SET_CPUUSED, -15);
return true;
}
void EncoderVp8::Encode(const DirtyRects& dirty_rects,
const uint8** input_data,
const int* strides,
bool key_frame,
UpdateStreamPacketHeader* header,
scoped_refptr<media::DataBuffer>* output_data,
bool* encode_done,
Task* data_available_task) {
// This will allow the task be called when this method exits.
media::AutoTaskRunner task(data_available_task);
*encode_done = false;
bool EncoderVp8::PrepareImage(scoped_refptr<CaptureData> capture_data) {
if (!yuv_image_.get()) {
const int plane_size = capture_data->width() * capture_data->height();
// TODO(hclam): We only initialize the encoder once. We may have to
// allow encoder be initialized with difference sizes.
if (!initialized_) {
if (!Init()) {
LOG(ERROR) << "Can't initialize VP8 encoder";
return;
}
initialized_ = true;
// YUV image size is 1.5 times of a plane. Multiplication is performed first
// to avoid rounding error.
const int size = plane_size * 3 / 2;
yuv_image_.reset(new uint8[size]);
// Reset image value to 128 so we just need to fill in the y plane.
memset(yuv_image_.get(), 128, size);
// Fill in the information for |image_|.
unsigned char* image = reinterpret_cast<unsigned char*>(yuv_image_.get());
image_->planes[0] = image;
image_->planes[1] = image + plane_size;
// The V plane starts from 1.25 of the plane size.
image_->planes[2] = image + plane_size + plane_size / 4;
// In YV12 Y plane has full width, UV plane has half width because of
// subsampling.
image_->stride[0] = image_->w;
image_->stride[1] = image_->w / 2;
image_->stride[2] = image_->w / 2;
}
// Assume the capturer has done the color space conversion.
if (!input_data || !strides)
return;
// And then do RGB->YUV conversion.
// Currently we just produce the Y channel as the average of RGB. This will
// give a gray scale image after conversion.
// TODO(hclam): Implement the actual color space conversion.
DCHECK(capture_data->pixel_format() == PixelFormatRgb32)
<< "Only RGB32 is supported";
uint8* in = capture_data->data_planes().data[0];
const int in_stride = capture_data->data_planes().strides[0];
uint8* out = yuv_image_.get();
const int out_stride = image_->stride[0];
for (int i = 0; i < capture_data->height(); ++i) {
for (int j = 0; j < capture_data->width(); ++j) {
// Since the input pixel format is RGB32, there are 4 bytes per pixel.
uint8* pixel = in + 4 * j;
out[j] = (pixel[0] + pixel[1] + pixel[2]) / 3;
}
in += in_stride;
out += out_stride;
}
return true;
}
image_.planes[0] = (unsigned char*)input_data[0];
image_.planes[1] = (unsigned char*)input_data[1];
image_.planes[2] = (unsigned char*)input_data[2];
image_.stride[0] = strides[0];
image_.stride[1] = strides[1];
image_.stride[2] = strides[2];
void EncoderVp8::Encode(scoped_refptr<CaptureData> capture_data,
bool key_frame,
DataAvailableCallback* data_available_callback) {
if (!initialized_) {
bool ret = Init(capture_data->width(), capture_data->height());
// TODO(hclam): Handle error better.
DCHECK(ret) << "Initialization of encoder failed";
initialized_ = ret;
}
if (!PrepareImage(capture_data)) {
NOTREACHED() << "Can't image data for encoding";
}
// Do the actual encoding.
if (on2_codec_encode(&codec_, &image_,
last_timestamp_, 1, 0, ON2_DL_REALTIME)) {
return;
}
vpx_codec_err_t ret = vpx_codec_encode(codec_.get(), image_.get(),
last_timestamp_,
1, 0, VPX_DL_REALTIME);
DCHECK(ret == VPX_CODEC_OK) << "Encoding error: "
<< vpx_codec_err_to_string(ret)
<< "\n"
<< "Details: "
<< vpx_codec_error(codec_.get())
<< "\n"
<< vpx_codec_error_detail(codec_.get());
// TODO(hclam): fix this.
last_timestamp_ += 100;
// Read the encoded data.
on2_codec_iter_t iter = NULL;
vpx_codec_iter_t iter = NULL;
bool got_data = false;
// TODO(hclam: We assume one frame of input will get exactly one frame of
// output. This assumption may not be valid.
// TODO(hclam): Make sure we get exactly one frame from the packet.
// TODO(hclam): We should provide the output buffer to avoid one copy.
ChromotingHostMessage* message = new ChromotingHostMessage();
UpdateStreamPacketMessage* packet = message->mutable_update_stream_packet();
// Prepare the begin rect.
packet->mutable_begin_rect()->set_x(0);
packet->mutable_begin_rect()->set_y(0);
packet->mutable_begin_rect()->set_width(capture_data->width());
packet->mutable_begin_rect()->set_height(capture_data->height());
packet->mutable_begin_rect()->set_encoding(EncodingVp8);
packet->mutable_begin_rect()->set_pixel_format(PixelFormatYv12);
while (!got_data) {
on2_codec_cx_pkt_t* packet = on2_codec_get_cx_data(&codec_, &iter);
const vpx_codec_cx_pkt_t* packet = vpx_codec_get_cx_data(codec_.get(),
&iter);
if (!packet)
continue;
switch (packet->kind) {
case ON2_CODEC_CX_FRAME_PKT:
case VPX_CODEC_CX_FRAME_PKT:
got_data = true;
*encode_done = true;
*output_data = new media::DataBuffer(packet->data.frame.sz);
memcpy((*output_data)->GetWritableData(),
packet->data.frame.buf,
packet->data.frame.sz);
message->mutable_update_stream_packet()->mutable_rect_data()->set_data(
packet->data.frame.buf, packet->data.frame.sz);
break;
default:
break;
}
}
return;
}
void EncoderVp8::SetSize(int width, int height) {
width_ = width;
height_ = height;
}
void EncoderVp8::SetPixelFormat(PixelFormat pixel_format) {
pixel_format_ = pixel_format;
// Enter the end rect.
message->mutable_update_stream_packet()->mutable_end_rect();
data_available_callback->Run(
message,
EncodingStarting | EncodingInProgress | EncodingEnded);
delete data_available_callback;
}
} // namespace remoting

@ -5,20 +5,11 @@
#ifndef REMOTING_BASE_ENCODER_VP8_H_
#define REMOTING_BASE_ENCODER_VP8_H_
#include "remoting/host/encoder.h"
#include "remoting/base/encoder.h"
#include "remoting/base/protocol/chromotocol.pb.h"
extern "C" {
// TODO(garykac): fix this link with the correct path to on2
#include "remoting/third_party/on2/include/on2_encoder.h"
} // extern "C"
namespace media {
class DataBuffer;
} // namespace media
typedef struct vpx_codec_ctx vpx_codec_ctx_t;
typedef struct vpx_image vpx_image_t;
namespace remoting {
@ -28,31 +19,27 @@ class EncoderVp8 : public Encoder {
EncoderVp8();
virtual ~EncoderVp8();
virtual void Encode(const DirtyRects& dirty_rects,
const uint8** input_data,
const int* strides,
virtual void Encode(scoped_refptr<CaptureData> capture_data,
bool key_frame,
UpdateStreamPacketHeader* header,
scoped_refptr<media::DataBuffer>* output_data,
bool* encode_done,
Task* data_available_task);
virtual void SetSize(int width, int height);
virtual void SetPixelFormat(PixelFormat pixel_format);
DataAvailableCallback* data_available_callback);
private:
// Setup the VP8 encoder.
bool Init();
// Initialize the encoder. Returns true if successful.
bool Init(int width, int height);
// Prepare |image_| for encoding. Returns true if successful.
bool PrepareImage(scoped_refptr<CaptureData> capture_data);
// True if the encoder is initialized.
bool initialized_;
int width_;
int height_;
PixelFormat pixel_format_;
on2_codec_ctx_t codec_;
on2_image_t image_;
scoped_ptr<vpx_codec_ctx_t> codec_;
scoped_ptr<vpx_image_t> image_;
int last_timestamp_;
// Buffer for storing the yuv image.
scoped_array<uint8> yuv_image_;
DISALLOW_COPY_AND_ASSIGN(EncoderVp8);
};

@ -2,67 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/base/data_buffer.h"
#include "remoting/base/pixel_format.h"
#include "remoting/host/encoder_vp8.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "remoting/base/codec_test.h"
#include "remoting/base/encoder_vp8.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace remoting {
static const int kWidth = 1024;
static const int kHeight = 768;
static const PixelFormat kPixelFormat = kPixelFormat_YV12;
static void GenerateData(uint8* data, int size) {
for (int i = 0; i < size; ++i) {
data[i] = i;
}
}
class EncodeDoneHandler
: public base::RefCountedThreadSafe<EncodeDoneHandler> {
public:
MOCK_METHOD0(EncodeDone, void());
};
TEST(EncoderVp8Test, SimpleEncode) {
TEST(EncoderVp8Test, TestEncoder) {
EncoderVp8 encoder;
encoder.SetSize(kWidth, kHeight);
encoder.SetPixelFormat(kPixelFormat);
DirtyRects rects;
rects.push_back(gfx::Rect(kWidth, kHeight));
// Prepare memory for encoding.
int strides[3];
strides[0] = kWidth;
strides[1] = strides[2] = kWidth / 2;
uint8* planes[3];
planes[0] = new uint8[kWidth * kHeight];
planes[1] = new uint8[kWidth * kHeight / 4];
planes[2] = new uint8[kWidth * kHeight / 4];
GenerateData(planes[0], kWidth * kHeight);
GenerateData(planes[1], kWidth * kHeight / 4);
GenerateData(planes[2], kWidth * kHeight / 4);
scoped_refptr<EncodeDoneHandler> handler = new EncodeDoneHandler();
UpdateStreamPacketHeader* header = new UpdateStreamPacketHeader();
scoped_refptr<media::DataBuffer> encoded_data;
bool encode_done = false;
EXPECT_CALL(*handler, EncodeDone());
encoder.Encode(rects, const_cast<const uint8**>(planes),
strides, true, header, &encoded_data, &encode_done,
NewRunnableMethod(handler.get(),
&EncodeDoneHandler::EncodeDone));
EXPECT_TRUE(encode_done);
ASSERT_TRUE(encoded_data.get());
EXPECT_NE(0u, encoded_data->GetBufferSize());
delete [] planes[0];
delete [] planes[1];
delete [] planes[2];
TestEncoder(&encoder, false);
}
} // namespace remoting

@ -34,6 +34,7 @@ enum UpdateStreamEncoding {
EncodingInvalid = -1;
EncodingNone = 0;
EncodingZlib = 1;
EncodingVp8 = 2;
}
// Identifies the pixel format.

@ -114,18 +114,15 @@
'../gfx/gfx.gyp:gfx',
'../media/media.gyp:media',
'../third_party/protobuf2/protobuf.gyp:protobuf_lite',
'../third_party/libvpx/libvpx.gyp:libvpx_include',
'../third_party/zlib/zlib.gyp:zlib',
'base/protocol/chromotocol.gyp:chromotocol_proto_lib',
'base/protocol/chromotocol.gyp:trace_proto_lib',
'chromoting_jingle_glue',
# TODO(hclam): Enable VP8 in the build.
#'third_party/on2/on2.gyp:vp8',
],
'export_dependent_settings': [
'../third_party/protobuf2/protobuf.gyp:protobuf_lite',
'base/protocol/chromotocol.gyp:chromotocol_proto_lib',
# TODO(hclam): Enable VP8 in the build.
#'third_party/on2/on2.gyp:vp8',
],
# This target needs a hard dependency because dependent targets
# depend on chromotocol_proto_lib for headers.
@ -148,11 +145,10 @@
'base/encoder.h',
'base/encoder_verbatim.cc',
'base/encoder_verbatim.h',
'base/encoder_vp8.cc',
'base/encoder_vp8.h',
'base/encoder_zlib.cc',
'base/encoder_zlib.h',
# TODO(hclam): Enable VP8 in the build.
#'base/encoder_vp8.cc',
#'base/encoder_vp8.h',
'base/multiple_array_input_stream.cc',
'base/multiple_array_input_stream.h',
'base/protocol_decoder.cc',
@ -407,8 +403,7 @@
'base/decoder_zlib_unittest.cc',
'base/decompressor_zlib_unittest.cc',
'base/encoder_verbatim_unittest.cc',
# TODO(hclam): Enable VP8 in the build.
#'base/encoder_vp8_unittest.cc',
'base/encoder_vp8_unittest.cc',
'base/encoder_zlib_unittest.cc',
'base/mock_objects.h',
'base/multiple_array_input_stream_unittest.cc',

@ -2,8 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/file_path.h"
#include "base/logging.h"
#include "base/path_service.h"
#include "base/test/test_suite.h"
#include "media/base/media.h"
int main(int argc, char** argv) {
return base::TestSuite(argc, argv).Run();
base::TestSuite test_suite(argc, argv);
// Load the media library so we can use libvpx.
FilePath path;
PathService::Get(base::DIR_MODULE, &path);
CHECK(media::InitializeMediaLibrary(path))
<< "Cannot load media library";
return test_suite.Run();
}

90
third_party/libvpx/libvpx.gyp vendored Normal file

@ -0,0 +1,90 @@
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
# libvpx_lib is currently not being used since we use libvpx inside
# libavcodec. Keeping this just in case we need this later.
{
'target_name': 'libvpx_lib',
'type': 'none',
'variables': {
'libvpx_lib': 'libvpx.a',
},
'conditions': [
# This section specifies the folder for looking for libvpx.a.
#
['OS=="linux" and target_arch=="ia32"', {
'variables': {
'libvpx_path': 'lib/linux/ia32',
},
}],
['OS=="linux" and target_arch=="x64"', {
'variables': {
'libvpx_path': 'lib/linux/x64',
},
}],
['OS=="linux" and target_arch=="arm" and arm_neon==1', {
'variables': {
'libvpx_path': 'lib/linux/arm-neon',
},
}],
['OS=="linux" and target_arch=="arm" and arm_neon==0', {
'variables': {
'libvpx_path': 'lib/linux/arm',
},
}],
['OS=="win"', {
'variables': {
'libvpx_path': 'lib/win/ia32',
},
}],
['OS=="mac"', {
'variables': {
'libvpx_path': 'lib/mac/ia32',
},
}],
],
'actions': [
{
'action_name': 'copy_lib',
'inputs': [
'<(libvpx_path)/<(libvpx_lib)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/<(libvpx_lib)',
],
'action': [
'cp',
'<(libvpx_path)/<(libvpx_lib)',
'<(SHARED_INTERMEDIATE_DIR)/<(libvpx_lib)',
],
'message': 'Copying libvpx.a into <(SHARED_INTERMEDIATE_DIR)',
},
],
'all_dependent_settings': {
'link_settings': {
'libraries': [
'<(SHARED_INTERMEDIATE_DIR)/<(libvpx_lib)',
],
},
},
},
{
'target_name': 'libvpx_include',
'type': 'none',
'direct_dependent_settings': {
'include_dirs': [
'include',
],
},
}
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2: