Commit 43e89eb1 authored by Mehrdad Hessar's avatar Mehrdad Hessar Committed by Commit Bot

This CL adds tensorflow lite inference to chrome.

Change-Id: I5130c97e01eaa7a8fe260058fe9c98740f1028b9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2321717
Commit-Queue: Mehrdad Hessar <mehrdadh@google.com>
Reviewed-by: default avatarRyan Sturm <ryansturm@chromium.org>
Reviewed-by: default avatarMichael Crouse <mcrouse@chromium.org>
Cr-Commit-Position: refs/heads/master@{#794342}
parent 36b8aa19
import("//build/buildflag_header.gni")
import("//build/config/features.gni")
import("//chrome/services/machine_learning/features.gni")
source_set("machine_learning") {
sources = [
......@@ -14,7 +16,23 @@ source_set("machine_learning") {
"//mojo/public/cpp/bindings",
]
if (build_with_tflite_lib) {
sources += [
"machine_learning_tflite_predictor.cc",
"machine_learning_tflite_predictor.h",
]
deps += [
":tflite_lib",
":tflite_simple_test",
]
lib_dirs = [ "$root_out_dir" ]
libs = [ "tensorflowlite_c" ]
}
public_deps = [
":machine_learning_tflite_buildflags",
"//chrome/services/machine_learning/public/cpp:decision_tree",
"//chrome/services/machine_learning/public/mojom",
"//mojo/public/mojom/base",
......@@ -32,6 +50,10 @@ source_set("unit_tests") {
"public/cpp/decision_tree_model_unittest.cc",
]
if (build_with_tflite_lib) {
sources += [ "machine_learning_tflite_predictor_unittest.cc" ]
}
deps = [
":machine_learning",
"//base",
......@@ -41,3 +63,20 @@ source_set("unit_tests") {
"//testing/gtest",
]
}
if (build_with_tflite_lib) {
copy("tflite_simple_test") {
sources = [ "//chrome/test/data/simple_test.tflite" ]
outputs = [ "$root_out_dir/test_data/simple_test.tflite" ]
}
copy("tflite_lib") {
sources = [ "//third_party/tensorflow/libtensorflowlite_c.so" ]
outputs = [ "$root_out_dir/libtensorflowlite_c.so" ]
}
}
buildflag_header("machine_learning_tflite_buildflags") {
header = "machine_learning_tflite_buildflags.h"
flags = [ "BUILD_WITH_TFLITE_LIB=$build_with_tflite_lib" ]
}
include_rules = [
"+components/optimization_guide",
"+components/optimization_guide/proto",
]
\ No newline at end of file
]
......@@ -2,3 +2,32 @@
This is a service for sandboxed evaluations of machine learning models.
([Design doc](https://docs.google.com/document/d/1i5uSTFe3uKwHifVQ0aFs6kYfsGlCt_ZGCiOgqcYM0Tg/edit?usp=sharing))
To build Chrome with TFLite library follow these instructions. For the unit test we use a [simple tflite model](../../test/data/simple_test.tflite). This is a simple sequential model as following:
```python
input_shape = (32, 32, 3)
model = tf.keras.models.Sequential([
tf.keras.Input(shape=input_shape, dtype=np.float32),
tf.keras.layers.Conv2D(16, 3, strides=(1, 1), activation='relu', padding='same',
input_shape=input_shape),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10),
])
```
Build Tensorflow Lite library:
- clone https://github.com/tensorflow/tensorflow
- cd tensorflow
- bazel build tensorflow/lite/c:libtensorflowlite_c.so
- copy 'libtensorflowlite_c.so' file to chromium/src/third_party/tensorflow
Copy libraries:
- c_api.h and common.h [here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/c) to into third_party/tensorflow/lite/c
Build TFLite in chrome:
- Set flag build_with_tflite_lib=true
- Uncomment thirdparty library in [machine learning header file](./machine_learning_tflite_predictor.h).
\ No newline at end of file
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
declare_args() {
# This enables build with TFLite library.
build_with_tflite_lib = false
}
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/services/machine_learning/machine_learning_tflite_predictor.h"
#include "base/check.h"
namespace machine_learning {
TFLitePredictor::TFLitePredictor(std::string filename)
: model_file_name_(filename) {}
TFLitePredictor::~TFLitePredictor() = default;
TfLiteStatus TFLitePredictor::Initialize() {
if (!LoadModel())
return kTfLiteError;
if (!BuildInterpreter())
return kTfLiteError;
TfLiteStatus status = AllocateTensors();
if (status == kTfLiteOk)
initialized_ = true;
return status;
}
TfLiteStatus TFLitePredictor::Evaluate() {
return TfLiteInterpreterInvoke(interpreter_.get());
}
bool TFLitePredictor::LoadModel() {
if (model_file_name_.empty())
return false;
// We create the pointer using this approach since |TfLiteModel| is a
// structure without the delete operator.
model_ = std::unique_ptr<TfLiteModel, std::function<void(TfLiteModel*)>>(
TfLiteModelCreateFromFile(model_file_name_.c_str()), &TfLiteModelDelete);
if (model_ == nullptr)
return false;
return true;
}
bool TFLitePredictor::BuildInterpreter() {
// We create the pointer using this approach since |TfLiteInterpreterOptions|
// is a structure without the delete operator.
options_ = std::unique_ptr<TfLiteInterpreterOptions,
std::function<void(TfLiteInterpreterOptions*)>>(
TfLiteInterpreterOptionsCreate(), &TfLiteInterpreterOptionsDelete);
if (options_ == nullptr)
return false;
// We create the pointer using this approach since |TfLiteInterpreter| is a
// structure without the delete operator.
interpreter_ = std::unique_ptr<TfLiteInterpreter,
std::function<void(TfLiteInterpreter*)>>(
TfLiteInterpreterCreate(model_.get(), options_.get()),
&TfLiteInterpreterDelete);
if (interpreter_ == nullptr)
return false;
return true;
}
TfLiteStatus TFLitePredictor::AllocateTensors() {
TfLiteStatus status = TfLiteInterpreterAllocateTensors(interpreter_.get());
DCHECK(status == kTfLiteOk);
return status;
}
int32_t TFLitePredictor::GetInputTensorCount() const {
if (interpreter_ == nullptr)
return 0;
return TfLiteInterpreterGetInputTensorCount(interpreter_.get());
}
int32_t TFLitePredictor::GetOutputTensorCount() const {
if (interpreter_ == nullptr)
return 0;
return TfLiteInterpreterGetOutputTensorCount(interpreter_.get());
}
// TODO: change this to private
TfLiteTensor* TFLitePredictor::GetInputTensor(int32_t index) const {
if (interpreter_ == nullptr)
return nullptr;
return TfLiteInterpreterGetInputTensor(interpreter_.get(), index);
}
const TfLiteTensor* TFLitePredictor::GetOutputTensor(int32_t index) const {
if (interpreter_ == nullptr)
return nullptr;
return TfLiteInterpreterGetOutputTensor(interpreter_.get(), index);
}
bool TFLitePredictor::IsInitialized() const {
return initialized_;
}
int32_t TFLitePredictor::GetInputTensorNumDims(int32_t tensor_index) const {
TfLiteTensor* tensor = GetInputTensor(tensor_index);
return TfLiteTensorNumDims(tensor);
}
int32_t TFLitePredictor::GetInputTensorDim(int32_t tensor_index,
int32_t dim_index) const {
TfLiteTensor* tensor = GetInputTensor(tensor_index);
return TfLiteTensorDim(tensor, dim_index);
}
void* TFLitePredictor::GetInputTensorData(int32_t tensor_index) const {
TfLiteTensor* tensor = GetInputTensor(tensor_index);
return TfLiteTensorData(tensor);
}
int32_t TFLitePredictor::GetOutputTensorNumDims(int32_t tensor_index) const {
const TfLiteTensor* tensor = GetOutputTensor(tensor_index);
return TfLiteTensorNumDims(tensor);
}
int32_t TFLitePredictor::GetOutputTensorDim(int32_t tensor_index,
int32_t dim_index) const {
const TfLiteTensor* tensor = GetOutputTensor(tensor_index);
return TfLiteTensorDim(tensor, dim_index);
}
void* TFLitePredictor::GetOutputTensorData(int32_t tensor_index) const {
const TfLiteTensor* tensor = GetInputTensor(tensor_index);
return TfLiteTensorData(tensor);
}
} // namespace machine_learning
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_SERVICES_MACHINE_LEARNING_MACHINE_LEARNING_TFLITE_PREDICTOR_H_
#define CHROME_SERVICES_MACHINE_LEARNING_MACHINE_LEARNING_TFLITE_PREDICTOR_H_
#include <functional>
#include <string>
#include "base/memory/ptr_util.h"
#include "chrome/common/buildflags.h"
#include "chrome/services/machine_learning/machine_learning_tflite_buildflags.h"
#if BUILDFLAG(BUILD_WITH_TFLITE_LIB)
// TODO(mcrouse): add tensorflow to third_party, and add appropriate DEPS.
// #include "third_party/tensorflow/lite/c/c_api.h"
#endif
namespace machine_learning {
// TFLite predictor class around TFLite C API for model evaluation.
class TFLitePredictor {
public:
// Creates a |TFLitePredictor| from a
// |char*| TFLite file.
explicit TFLitePredictor(std::string filename);
~TFLitePredictor();
// Loads model, build the TFLite interpreter and allocates tensors.
TfLiteStatus Initialize();
// Invokes TFLite interpreter.
TfLiteStatus Evaluate();
// Returns number of input tensors.
int32_t GetInputTensorCount() const;
// Returns number of output tensors.
int32_t GetOutputTensorCount() const;
// Returns input tensor with |index| value starting from 0.
TfLiteTensor* GetInputTensor(int32_t index) const;
// Returns output tensor with |index| value starting from 0.
const TfLiteTensor* GetOutputTensor(int32_t index) const;
// Returns |initialized_|.
bool IsInitialized() const;
// Returns number of dimensions of input tensor |tensor_index|.
int32_t GetInputTensorNumDims(int32_t tensor_index) const;
// Returns value of dimension |dim_index| of input tensor |tensor_index|.
int32_t GetInputTensorDim(int32_t tensor_index, int32_t dim_index) const;
// Returns data pointer to input tensor with index |tensor_index|.
void* GetInputTensorData(int32_t tensor_index) const;
// Returns number of dimensions of output tensor |tensor_index|.
int32_t GetOutputTensorNumDims(int32_t tensor_index) const;
// Returns value of dimension |dim_index| of output tensor |tensor_index|.
int32_t GetOutputTensorDim(int32_t tensor_index, int32_t dim_index) const;
// Returns data pointer to output tensor with index |tensor_index|.
void* GetOutputTensorData(int32_t tensor_index) const;
private:
// Loads TFLite model.
bool LoadModel();
// Builds TFLite interpreter.
bool BuildInterpreter();
// Allocates tensor for the current loaded model.
TfLiteStatus AllocateTensors();
std::string model_file_name_;
std::unique_ptr<TfLiteModel, std::function<void(TfLiteModel*)>> model_;
std::unique_ptr<TfLiteInterpreterOptions,
std::function<void(TfLiteInterpreterOptions*)>>
options_;
std::unique_ptr<TfLiteInterpreter, std::function<void(TfLiteInterpreter*)>>
interpreter_;
// True if TFLite interpreter is initialized.
bool initialized_ = false;
};
} // namespace machine_learning
#endif // CHROME_SERVICES_MACHINE_LEARNING_MACHINE_LEARNING_TFLITE_PREDICTOR_H_
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/services/machine_learning/machine_learning_tflite_predictor.h"
#include <string>
#include "base/logging.h"
#include "base/macros.h"
#include "base/path_service.h"
#include "base/test/task_environment.h"
#include "chrome/common/chrome_paths.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace machine_learning {
class TFLitePredictorTest : public ::testing::Test {
public:
const int32_t kInputTensorNum = 1;
const int32_t kOutputTensorNum = 1;
const int32_t kInputTensorDims = 4;
const int32_t kOutputTensorDims = 2;
const int32_t kInputTensorDim0 = 1;
const int32_t kInputTensorDim1 = 32;
const int32_t kInputTensorDim2 = 32;
const int32_t kInputTensorDim3 = 3;
const int32_t kOutputTensorDim0 = 1;
const int32_t kOutputTensorDim1 = 10;
TFLitePredictorTest() = default;
~TFLitePredictorTest() override = default;
// Returns TFLite test model path
std::string GetTFLiteTestPath() {
// Location of generated test data (<(PROGRAM_DIR)/test_data).
base::FilePath g_gen_test_data_directory;
base::PathService::Get(chrome::DIR_GEN_TEST_DATA,
&g_gen_test_data_directory);
g_gen_test_data_directory =
g_gen_test_data_directory.Append("simple_test.tflite");
std::string model_path =
static_cast<std::string>(g_gen_test_data_directory.value());
return model_path;
}
};
TEST_F(TFLitePredictorTest, TFLiteInitializationTest) {
// Initialize the model
std::string model_path = GetTFLiteTestPath();
TFLitePredictor predictor(model_path);
TfLiteStatus status = predictor.Initialize();
EXPECT_EQ(status, kTfLiteOk);
}
TEST_F(TFLitePredictorTest, TFLiteTensorsCountTest) {
// Initialize the model
std::string model_path = GetTFLiteTestPath();
TFLitePredictor predictor(model_path);
TfLiteStatus status = predictor.Initialize();
EXPECT_EQ(status, kTfLiteOk);
EXPECT_EQ(predictor.GetInputTensorCount(), kInputTensorNum);
EXPECT_EQ(predictor.GetOutputTensorCount(), kOutputTensorNum);
}
TEST_F(TFLitePredictorTest, TFLiteTensorsTest) {
// Initialize the model
std::string model_path = GetTFLiteTestPath();
TFLitePredictor predictor(model_path);
TfLiteStatus status = predictor.Initialize();
EXPECT_EQ(status, kTfLiteOk);
TfLiteTensor* inputTensor = predictor.GetInputTensor(0);
const TfLiteTensor* outputTensor = predictor.GetOutputTensor(0);
EXPECT_EQ(TfLiteTensorNumDims(inputTensor), kInputTensorDims);
EXPECT_EQ(TfLiteTensorNumDims(outputTensor), kOutputTensorDims);
EXPECT_EQ(TfLiteTensorDim(inputTensor, 0), kInputTensorDim0);
EXPECT_EQ(TfLiteTensorDim(inputTensor, 1), kInputTensorDim1);
EXPECT_EQ(TfLiteTensorDim(inputTensor, 2), kInputTensorDim2);
EXPECT_EQ(TfLiteTensorDim(inputTensor, 3), kInputTensorDim3);
EXPECT_EQ(TfLiteTensorDim(outputTensor, 0), kOutputTensorDim0);
EXPECT_EQ(TfLiteTensorDim(outputTensor, 1), kOutputTensorDim1);
}
TEST_F(TFLitePredictorTest, TFLiteEvaluationTest) {
int const kOutpuSize = 10;
float expectedOutput[kOutpuSize] = {
-0.4936581, -0.32497078, -0.1705023, -0.38193324, 0.36136785,
0.2177353, 0.32200375, 0.28686714, -0.21846706, -0.4200018};
// Initialize the model
std::string model_path = GetTFLiteTestPath();
TFLitePredictor predictor(model_path);
predictor.Initialize();
// Initialize model input tensor
TfLiteTensor* inputTensor = predictor.GetInputTensor(0);
EXPECT_TRUE(inputTensor);
EXPECT_EQ(TfLiteTensorNumDims(inputTensor), kInputTensorDims);
int32_t tensorTotalDims = 1;
for (int i = 0; i < TfLiteTensorNumDims(inputTensor); i++)
tensorTotalDims = tensorTotalDims * TfLiteTensorDim(inputTensor, i);
float* tensorData = static_cast<float*>(TfLiteTensorData(inputTensor));
for (int i = 0; i < tensorTotalDims; i++)
tensorData[i] = 1.0;
// Evaluate model and check output
TfLiteStatus status = predictor.Evaluate();
EXPECT_EQ(status, kTfLiteOk);
const TfLiteTensor* outputTensor = predictor.GetOutputTensor(0);
float* outputData = (float*)TfLiteTensorData(outputTensor);
EXPECT_EQ(TfLiteTensorDim(outputTensor, 1), kOutpuSize);
for (int i = 0; i < TfLiteTensorDim(outputTensor, 1); i++)
EXPECT_NEAR(expectedOutput[i], outputData[i], 1e-5);
}
} // namespace machine_learning
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment