장수창

added freeze_graph

Showing 115 changed files with 4854 additions and 0 deletions
1 +# This file is based on https://github.com/github/gitignore/blob/master/Android.gitignore
2 +*.iml
3 +.idea/compiler.xml
4 +.idea/copyright
5 +.idea/dictionaries
6 +.idea/gradle.xml
7 +.idea/libraries
8 +.idea/inspectionProfiles
9 +.idea/misc.xml
10 +.idea/modules.xml
11 +.idea/runConfigurations.xml
12 +.idea/tasks.xml
13 +.idea/workspace.xml
14 +.gradle
15 +local.properties
16 +.DS_Store
17 +build/
18 +gradleBuild/
19 +*.apk
20 +*.ap_
21 +*.dex
22 +*.class
23 +bin/
24 +gen/
25 +out/
26 +*.log
27 +.navigation/
28 +/captures
29 +.externalNativeBuild
1 +<component name="ProjectCodeStyleConfiguration">
2 + <code_scheme name="Project" version="173">
3 + <codeStyleSettings language="XML">
4 + <indentOptions>
5 + <option name="CONTINUATION_INDENT_SIZE" value="4" />
6 + </indentOptions>
7 + <arrangement>
8 + <rules>
9 + <section>
10 + <rule>
11 + <match>
12 + <AND>
13 + <NAME>xmlns:android</NAME>
14 + <XML_ATTRIBUTE />
15 + <XML_NAMESPACE>^$</XML_NAMESPACE>
16 + </AND>
17 + </match>
18 + </rule>
19 + </section>
20 + <section>
21 + <rule>
22 + <match>
23 + <AND>
24 + <NAME>xmlns:.*</NAME>
25 + <XML_ATTRIBUTE />
26 + <XML_NAMESPACE>^$</XML_NAMESPACE>
27 + </AND>
28 + </match>
29 + <order>BY_NAME</order>
30 + </rule>
31 + </section>
32 + <section>
33 + <rule>
34 + <match>
35 + <AND>
36 + <NAME>.*:id</NAME>
37 + <XML_ATTRIBUTE />
38 + <XML_NAMESPACE>http://schemas.android.com/apk/res/android</XML_NAMESPACE>
39 + </AND>
40 + </match>
41 + </rule>
42 + </section>
43 + <section>
44 + <rule>
45 + <match>
46 + <AND>
47 + <NAME>.*:name</NAME>
48 + <XML_ATTRIBUTE />
49 + <XML_NAMESPACE>http://schemas.android.com/apk/res/android</XML_NAMESPACE>
50 + </AND>
51 + </match>
52 + </rule>
53 + </section>
54 + <section>
55 + <rule>
56 + <match>
57 + <AND>
58 + <NAME>name</NAME>
59 + <XML_ATTRIBUTE />
60 + <XML_NAMESPACE>^$</XML_NAMESPACE>
61 + </AND>
62 + </match>
63 + </rule>
64 + </section>
65 + <section>
66 + <rule>
67 + <match>
68 + <AND>
69 + <NAME>style</NAME>
70 + <XML_ATTRIBUTE />
71 + <XML_NAMESPACE>^$</XML_NAMESPACE>
72 + </AND>
73 + </match>
74 + </rule>
75 + </section>
76 + <section>
77 + <rule>
78 + <match>
79 + <AND>
80 + <NAME>.*</NAME>
81 + <XML_ATTRIBUTE />
82 + <XML_NAMESPACE>^$</XML_NAMESPACE>
83 + </AND>
84 + </match>
85 + <order>BY_NAME</order>
86 + </rule>
87 + </section>
88 + <section>
89 + <rule>
90 + <match>
91 + <AND>
92 + <NAME>.*</NAME>
93 + <XML_ATTRIBUTE />
94 + <XML_NAMESPACE>http://schemas.android.com/apk/res/android</XML_NAMESPACE>
95 + </AND>
96 + </match>
97 + <order>ANDROID_ATTRIBUTE_ORDER</order>
98 + </rule>
99 + </section>
100 + <section>
101 + <rule>
102 + <match>
103 + <AND>
104 + <NAME>.*</NAME>
105 + <XML_ATTRIBUTE />
106 + <XML_NAMESPACE>.*</XML_NAMESPACE>
107 + </AND>
108 + </match>
109 + <order>BY_NAME</order>
110 + </rule>
111 + </section>
112 + </rules>
113 + </arrangement>
114 + </codeStyleSettings>
115 + </code_scheme>
116 +</component>
...\ No newline at end of file ...\ No newline at end of file
1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<project version="4">
3 + <component name="VcsDirectoryMappings">
4 + <mapping directory="$PROJECT_DIR$/../../.." vcs="Git" />
5 + </component>
6 +</project>
...\ No newline at end of file ...\ No newline at end of file
1 +<?xml version="1.0" encoding="UTF-8"?>
2 +<!--
3 + Copyright 2016 The TensorFlow Authors. All Rights Reserved.
4 +
5 + Licensed under the Apache License, Version 2.0 (the "License");
6 + you may not use this file except in compliance with the License.
7 + You may obtain a copy of the License at
8 +
9 + http://www.apache.org/licenses/LICENSE-2.0
10 +
11 + Unless required by applicable law or agreed to in writing, software
12 + distributed under the License is distributed on an "AS IS" BASIS,
13 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 + See the License for the specific language governing permissions and
15 + limitations under the License.
16 +-->
17 +
18 +<manifest xmlns:android="http://schemas.android.com/apk/res/android"
19 + package="org.tensorflow.demo">
20 +
21 + <uses-permission android:name="android.permission.CAMERA" />
22 + <uses-feature android:name="android.hardware.camera" />
23 + <uses-feature android:name="android.hardware.camera.autofocus" />
24 + <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
25 + <uses-permission android:name="android.permission.RECORD_AUDIO" />
26 +
27 + <application android:allowBackup="true"
28 + android:debuggable="true"
29 + android:label="@string/app_name"
30 + android:icon="@drawable/ic_launcher"
31 + android:theme="@style/MaterialTheme">
32 +
33 +<!-- <activity android:name="org.tensorflow.demo.ClassifierActivity"-->
34 +<!-- android:screenOrientation="portrait"-->
35 +<!-- android:label="@string/activity_name_classification">-->
36 +<!-- <intent-filter>-->
37 +<!-- <action android:name="android.intent.action.MAIN" />-->
38 +<!-- <category android:name="android.intent.category.LAUNCHER" />-->
39 +<!-- <category android:name="android.intent.category.LEANBACK_LAUNCHER" />-->
40 +<!-- </intent-filter>-->
41 +<!-- </activity>-->
42 +
43 + <activity android:name="org.tensorflow.demo.DetectorActivity"
44 + android:screenOrientation="portrait"
45 + android:label="@string/activity_name_detection">
46 + <intent-filter>
47 + <action android:name="android.intent.action.MAIN" />
48 + <category android:name="android.intent.category.LAUNCHER" />
49 + <category android:name="android.intent.category.LEANBACK_LAUNCHER" />
50 + </intent-filter>
51 + </activity>
52 +
53 +<!-- <activity android:name="org.tensorflow.demo.StylizeActivity"-->
54 +<!-- android:screenOrientation="portrait"-->
55 +<!-- android:label="@string/activity_name_stylize">-->
56 +<!-- <intent-filter>-->
57 +<!-- <action android:name="android.intent.action.MAIN" />-->
58 +<!-- <category android:name="android.intent.category.LAUNCHER" />-->
59 +<!-- <category android:name="android.intent.category.LEANBACK_LAUNCHER" />-->
60 +<!-- </intent-filter>-->
61 +<!-- </activity>-->
62 +
63 +<!-- <activity android:name="org.tensorflow.demo.SpeechActivity"-->
64 +<!-- android:screenOrientation="portrait"-->
65 +<!-- android:label="@string/activity_name_speech">-->
66 +<!-- <intent-filter>-->
67 +<!-- <action android:name="android.intent.action.MAIN" />-->
68 +<!-- <category android:name="android.intent.category.LAUNCHER" />-->
69 +<!-- <category android:name="android.intent.category.LEANBACK_LAUNCHER" />-->
70 +<!-- </intent-filter>-->
71 +<!-- </activity>-->
72 + </application>
73 +
74 +</manifest>
1 +# Description:
2 +# TensorFlow camera demo app for Android.
3 +
4 +load("@build_bazel_rules_android//android:rules.bzl", "android_binary")
5 +load(
6 + "//tensorflow:tensorflow.bzl",
7 + "tf_copts",
8 +)
9 +
10 +package(
11 + default_visibility = ["//visibility:public"],
12 + licenses = ["notice"], # Apache 2.0
13 +)
14 +
15 +exports_files(["LICENSE"])
16 +
17 +LINKER_SCRIPT = "jni/version_script.lds"
18 +
19 +# libtensorflow_demo.so contains the native code for image colorspace conversion
20 +# and object tracking used by the demo. It does not require TF as a dependency
21 +# to build if STANDALONE_DEMO_LIB is defined.
22 +# TF support for the demo is provided separately by libtensorflow_inference.so.
23 +cc_binary(
24 + name = "libtensorflow_demo.so",
25 + srcs = glob([
26 + "jni/**/*.cc",
27 + "jni/**/*.h",
28 + ]),
29 + copts = tf_copts(),
30 + defines = ["STANDALONE_DEMO_LIB"],
31 + linkopts = [
32 + "-landroid",
33 + "-ldl",
34 + "-ljnigraphics",
35 + "-llog",
36 + "-lm",
37 + "-z defs",
38 + "-s",
39 + "-Wl,--version-script,$(location {})".format(LINKER_SCRIPT),
40 + ],
41 + linkshared = 1,
42 + linkstatic = 1,
43 + tags = [
44 + "manual",
45 + "notap",
46 + ],
47 + deps = [
48 + LINKER_SCRIPT,
49 + ],
50 +)
51 +
52 +cc_library(
53 + name = "tensorflow_native_libs",
54 + srcs = [
55 + ":libtensorflow_demo.so",
56 + "//tensorflow/tools/android/inference_interface:libtensorflow_inference.so",
57 + ],
58 + tags = [
59 + "manual",
60 + "notap",
61 + ],
62 +)
63 +
64 +android_binary(
65 + name = "tensorflow_demo",
66 + srcs = glob([
67 + "src/**/*.java",
68 + ]),
69 + # Package assets from assets dir as well as all model targets. Remove undesired models
70 + # (and corresponding Activities in source) to reduce APK size.
71 + assets = [
72 + "//tensorflow/examples/android/assets:asset_files",
73 + ":external_assets",
74 + ],
75 + assets_dir = "",
76 + custom_package = "org.tensorflow.demo",
77 + manifest = "AndroidManifest.xml",
78 + resource_files = glob(["res/**"]),
79 + tags = [
80 + "manual",
81 + "notap",
82 + ],
83 + deps = [
84 + ":tensorflow_native_libs",
85 + "//tensorflow/tools/android/inference_interface:android_tensorflow_inference_java",
86 + ],
87 +)
88 +
89 +# LINT.IfChange
90 +filegroup(
91 + name = "external_assets",
92 + srcs = [
93 + "@inception_v1//:model_files",
94 + "@mobile_ssd//:model_files",
95 + "@speech_commands//:model_files",
96 + "@stylize//:model_files",
97 + ],
98 +)
99 +# LINT.ThenChange(//tensorflow/examples/android/download-models.gradle)
100 +
101 +filegroup(
102 + name = "java_files",
103 + srcs = glob(["src/**/*.java"]),
104 +)
105 +
106 +filegroup(
107 + name = "jni_files",
108 + srcs = glob([
109 + "jni/**/*.cc",
110 + "jni/**/*.h",
111 + ]),
112 +)
113 +
114 +filegroup(
115 + name = "resource_files",
116 + srcs = glob(["res/**"]),
117 +)
118 +
119 +exports_files([
120 + "AndroidManifest.xml",
121 +])
1 +# TensorFlow Android Camera Demo
2 +
3 +This folder contains an example application utilizing TensorFlow for Android
4 +devices.
5 +
6 +## Description
7 +
8 +The demos in this folder are designed to give straightforward samples of using
9 +TensorFlow in mobile applications.
10 +
11 +Inference is done using the [TensorFlow Android Inference
12 +Interface](../../tools/android/inference_interface), which may be built
13 +separately if you want a standalone library to drop into your existing
14 +application. Object tracking and efficient YUV -> RGB conversion are handled by
15 +`libtensorflow_demo.so`.
16 +
17 +A device running Android 5.0 (API 21) or higher is required to run the demo due
18 +to the use of the camera2 API, although the native libraries themselves can run
19 +on API >= 14 devices.
20 +
21 +## Current samples:
22 +
23 +1. [TF Classify](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java):
24 + Uses the [Google Inception](https://arxiv.org/abs/1409.4842)
25 + model to classify camera frames in real-time, displaying the top results
26 + in an overlay on the camera image.
27 +2. [TF Detect](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/android/src/org/tensorflow/demo/DetectorActivity.java):
28 + Demonstrates an SSD-Mobilenet model trained using the
29 + [Tensorflow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection/)
30 + introduced in [Speed/accuracy trade-offs for modern convolutional object detectors](https://arxiv.org/abs/1611.10012) to
31 + localize and track objects (from 80 categories) in the camera preview
32 + in real-time.
33 +3. [TF Stylize](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java):
34 + Uses a model based on [A Learned Representation For Artistic
35 + Style](https://arxiv.org/abs/1610.07629) to restyle the camera preview
36 + image to that of a number of different artists.
37 +4. [TF
38 + Speech](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java):
39 + Runs a simple speech recognition model built by the [audio training
40 + tutorial](https://www.tensorflow.org/versions/master/tutorials/audio_recognition). Listens
41 + for a small set of words, and highlights them in the UI when they are
42 + recognized.
43 +
44 +<img src="sample_images/classify1.jpg" width="30%"><img src="sample_images/stylize1.jpg" width="30%"><img src="sample_images/detect1.jpg" width="30%">
45 +
46 +## Prebuilt Components:
47 +
48 +The fastest path to trying the demo is to download the [prebuilt demo APK](https://storage.googleapis.com/download.tensorflow.org/deps/tflite/TfLiteCameraDemo.apk).
49 +
50 +Also available are precompiled native libraries, and a jcenter package that you
51 +may simply drop into your own applications. See
52 +[tensorflow/tools/android/inference_interface/README.md](../../tools/android/inference_interface/README.md)
53 +for more details.
54 +
55 +## Running the Demo
56 +
57 +Once the app is installed it can be started via the "TF Classify", "TF Detect",
58 +"TF Stylize", and "TF Speech" icons, which have the orange TensorFlow logo as
59 +their icon.
60 +
61 +While running the activities, pressing the volume keys on your device will
62 +toggle debug visualizations on/off, rendering additional info to the screen that
63 +may be useful for development purposes.
64 +
65 +## Building in Android Studio using the TensorFlow AAR from JCenter
66 +
67 +The simplest way to compile the demo app yourself, and try out changes to the
68 +project code is to use AndroidStudio. Simply set this `android` directory as the
69 +project root.
70 +
71 +Then edit the `build.gradle` file and change the value of `nativeBuildSystem` to
72 +`'none'` so that the project is built in the simplest way possible:
73 +
74 +```None
75 +def nativeBuildSystem = 'none'
76 +```
77 +
78 +While this project includes full build integration for TensorFlow, this setting
79 +disables it, and uses the TensorFlow Inference Interface package from JCenter.
80 +
81 +Note: Currently, in this build mode, YUV -> RGB is done using a less efficient
82 +Java implementation, and object tracking is not available in the "TF Detect"
83 +activity. Setting the build system to `'cmake'` currently only builds
84 +`libtensorflow_demo.so`, which provides fast YUV -> RGB conversion and object
85 +tracking, while still acquiring TensorFlow support via the downloaded AAR, so it
86 +may be a lightweight way to enable these features.
87 +
88 +For any project that does not include custom low level TensorFlow code, this is
89 +likely sufficient.
90 +
91 +For details on how to include this JCenter package in your own project see
92 +[tensorflow/tools/android/inference_interface/README.md](../../tools/android/inference_interface/README.md)
93 +
94 +## Building the Demo with TensorFlow from Source
95 +
96 +Pick your preferred approach below. At the moment, we have full support for
97 +Bazel, and partial support for gradle, cmake, make, and Android Studio.
98 +
99 +As a first step for all build types, clone the TensorFlow repo with:
100 +
101 +```
102 +git clone --recurse-submodules https://github.com/tensorflow/tensorflow.git
103 +```
104 +
105 +Note that `--recurse-submodules` is necessary to prevent some issues with
106 +protobuf compilation.
107 +
108 +### Bazel
109 +
110 +NOTE: Bazel does not currently support building for Android on Windows. Full
111 +support for gradle/cmake builds is coming soon, but in the meantime we suggest
112 +that Windows users download the
113 +[prebuilt demo APK](https://storage.googleapis.com/download.tensorflow.org/deps/tflite/TfLiteCameraDemo.apk)
114 +instead.
115 +
116 +##### Install Bazel and Android Prerequisites
117 +
118 +Bazel is the primary build system for TensorFlow. To build with Bazel, it and
119 +the Android NDK and SDK must be installed on your system.
120 +
121 +1. Install the latest version of Bazel as per the instructions [on the Bazel
122 + website](https://bazel.build/versions/master/docs/install.html).
123 +2. The Android NDK is required to build the native (C/C++) TensorFlow code. The
124 + current recommended version is 14b, which may be found
125 + [here](https://developer.android.com/ndk/downloads/older_releases.html#ndk-14b-downloads).
126 +3. The Android SDK and build tools may be obtained
127 + [here](https://developer.android.com/tools/revisions/build-tools.html), or
128 + alternatively as part of [Android
129 + Studio](https://developer.android.com/studio/index.html). Build tools API >=
130 + 23 is required to build the TF Android demo (though it will run on API >= 21
131 + devices).
132 +
133 +##### Edit WORKSPACE
134 +
135 +NOTE: As long as you have the SDK and NDK installed, the `./configure` script
136 +will create these rules for you. Answer "Yes" when the script asks to
137 +automatically configure the `./WORKSPACE`.
138 +
139 +The Android entries in
140 +[`<workspace_root>/WORKSPACE`](../../../WORKSPACE#L19-L36) must be uncommented
141 +with the paths filled in appropriately depending on where you installed the NDK
142 +and SDK. Otherwise an error such as: "The external label
143 +'//external:android/sdk' is not bound to anything" will be reported.
144 +
145 +Also edit the API levels for the SDK in WORKSPACE to the highest level you have
146 +installed in your SDK. This must be >= 23 (this is completely independent of the
147 +API level of the demo, which is defined in AndroidManifest.xml). The NDK API
148 +level may remain at 14.
149 +
150 +##### Install Model Files (optional)
151 +
152 +The TensorFlow `GraphDef`s that contain the model definitions and weights are
153 +not packaged in the repo because of their size. They are downloaded
154 +automatically and packaged with the APK by Bazel via a new_http_archive defined
155 +in `WORKSPACE` during the build process, and by Gradle via
156 +download-models.gradle.
157 +
158 +**Optional**: If you wish to place the models in your assets manually, remove
159 +all of the `model_files` entries from the `assets` list in `tensorflow_demo`
160 +found in the [`BUILD`](BUILD#L92) file. Then download and extract the archives
161 +yourself to the `assets` directory in the source tree:
162 +
163 +```bash
164 +BASE_URL=https://storage.googleapis.com/download.tensorflow.org/models
165 +for MODEL_ZIP in inception5h.zip ssd_mobilenet_v1_android_export.zip stylize_v1.zip
166 +do
167 + curl -L ${BASE_URL}/${MODEL_ZIP} -o /tmp/${MODEL_ZIP}
168 + unzip /tmp/${MODEL_ZIP} -d tensorflow/examples/android/assets/
169 +done
170 +```
171 +
172 +This will extract the models and their associated metadata files to the local
173 +assets/ directory.
174 +
175 +If you are using Gradle, make sure to remove download-models.gradle reference
176 +from build.gradle after your manually download models; otherwise gradle might
177 +download models again and overwrite your models.
178 +
179 +##### Build
180 +
181 +After editing your WORKSPACE file to update the SDK/NDK configuration, you may
182 +build the APK. Run this from your workspace root:
183 +
184 +```bash
185 +bazel build --cxxopt='--std=c++11' -c opt //tensorflow/examples/android:tensorflow_demo
186 +```
187 +
188 +##### Install
189 +
190 +Make sure that adb debugging is enabled on your Android 5.0 (API 21) or later
191 +device, then after building use the following command from your workspace root
192 +to install the APK:
193 +
194 +```bash
195 +adb install -r bazel-bin/tensorflow/examples/android/tensorflow_demo.apk
196 +```
197 +
198 +### Android Studio with Bazel
199 +
200 +Android Studio may be used to build the demo in conjunction with Bazel. First,
201 +make sure that you can build with Bazel following the above directions. Then,
202 +look at [build.gradle](build.gradle) and make sure that the path to Bazel
203 +matches that of your system.
204 +
205 +At this point you can add the tensorflow/examples/android directory as a new
206 +Android Studio project. Click through installing all the Gradle extensions it
207 +requests, and you should be able to have Android Studio build the demo like any
208 +other application (it will call out to Bazel to build the native code with the
209 +NDK).
210 +
211 +### CMake
212 +
213 +Full CMake support for the demo is coming soon, but for now it is possible to
214 +build the TensorFlow Android Inference library using
215 +[tensorflow/tools/android/inference_interface/cmake](../../tools/android/inference_interface/cmake).
1 +package(
2 + default_visibility = ["//visibility:public"],
3 + licenses = ["notice"], # Apache 2.0
4 +)
5 +
6 +# It is necessary to use this filegroup rather than globbing the files in this
7 +# folder directly the examples/android:tensorflow_demo target due to the fact
8 +# that assets_dir is necessarily set to "" there (to allow using other
9 +# arbitrary targets as assets).
10 +filegroup(
11 + name = "asset_files",
12 + srcs = glob(
13 + ["**/*"],
14 + exclude = ["BUILD"],
15 + ),
16 +)
This file is too large to display.
1 +// This file provides basic support for building the TensorFlow demo
2 +// in Android Studio with Gradle.
3 +//
4 +// Note that Bazel is still used by default to compile the native libs,
5 +// and should be installed at the location noted below. This build file
6 +// automates the process of calling out to it and copying the compiled
7 +// libraries back into the appropriate directory.
8 +//
9 +// Alternatively, experimental support for Makefile builds is provided by
10 +// setting nativeBuildSystem below to 'makefile'. This will allow building the demo
11 +// on Windows machines, but note that full equivalence with the Bazel
12 +// build is not yet guaranteed. See comments below for caveats and tips
13 +// for speeding up the build, such as enabling ccache.
14 +// NOTE: Running a make build will cause subsequent Bazel builds to *fail*
15 +// unless the contrib/makefile/downloads/ and gen/ dirs are deleted afterwards.
16 +
17 +// The cmake build only creates libtensorflow_demo.so. In this situation,
18 +// libtensorflow_inference.so will be acquired via the tensorflow.aar dependency.
19 +
20 +// It is necessary to customize Gradle's build directory, as otherwise
21 +// it will conflict with the BUILD file used by Bazel on case-insensitive OSs.
22 +project.buildDir = 'gradleBuild'
23 +getProject().setBuildDir('gradleBuild')
24 +
25 +buildscript {
26 + repositories {
27 + jcenter()
28 + google()
29 + }
30 +
31 + dependencies {
32 + classpath 'com.android.tools.build:gradle:3.3.1'
33 + classpath 'org.apache.httpcomponents:httpclient:4.5.4'
34 + }
35 +}
36 +
37 +allprojects {
38 + repositories {
39 + jcenter()
40 + google()
41 + }
42 +}
43 +
44 +// set to 'bazel', 'cmake', 'makefile', 'none'
45 +def nativeBuildSystem = 'none'
46 +
47 +// Controls output directory in APK and CPU type for Bazel builds.
48 +// NOTE: Does not affect the Makefile build target API (yet), which currently
49 +// assumes armeabi-v7a. If building with make, changing this will require
50 +// editing the Makefile as well.
51 +// The CMake build has only been tested with armeabi-v7a; others may not work.
52 +def cpuType = 'armeabi-v7a'
53 +
54 +// Output directory in the local directory for packaging into the APK.
55 +def nativeOutDir = 'libs/' + cpuType
56 +
57 +// Default to building with Bazel and override with make if requested.
58 +def nativeBuildRule = 'buildNativeBazel'
59 +def demoLibPath = '../../../bazel-bin/tensorflow/examples/android/libtensorflow_demo.so'
60 +def inferenceLibPath = '../../../bazel-bin/tensorflow/tools/android/inference_interface/libtensorflow_inference.so'
61 +
62 +// Override for Makefile builds.
63 +if (nativeBuildSystem == 'makefile') {
64 + nativeBuildRule = 'buildNativeMake'
65 + demoLibPath = '../../../tensorflow/contrib/makefile/gen/lib/android_' + cpuType + '/libtensorflow_demo.so'
66 + inferenceLibPath = '../../../tensorflow/contrib/makefile/gen/lib/android_' + cpuType + '/libtensorflow_inference.so'
67 +}
68 +
69 +// If building with Bazel, this is the location of the bazel binary.
70 +// NOTE: Bazel does not yet support building for Android on Windows,
71 +// so in this case the Makefile build must be used as described above.
72 +def bazelLocation = '/usr/local/bin/bazel'
73 +
74 +// import DownloadModels task
75 +project.ext.ASSET_DIR = projectDir.toString() + '/assets'
76 +project.ext.TMP_DIR = project.buildDir.toString() + '/downloads'
77 +
78 +// Download default models; if you wish to use your own models then
79 +// place them in the "assets" directory and comment out this line.
80 +apply from: "download-models.gradle"
81 +
82 +apply plugin: 'com.android.application'
83 +
84 +android {
85 + compileSdkVersion 23
86 +
87 + if (nativeBuildSystem == 'cmake') {
88 + defaultConfig {
89 + applicationId = 'org.tensorflow.demo'
90 + minSdkVersion 21
91 + targetSdkVersion 23
92 + ndk {
93 + abiFilters "${cpuType}"
94 + }
95 + externalNativeBuild {
96 + cmake {
97 + arguments '-DANDROID_STL=c++_static'
98 + }
99 + }
100 + }
101 + externalNativeBuild {
102 + cmake {
103 + path './jni/CMakeLists.txt'
104 + }
105 + }
106 + }
107 +
108 + lintOptions {
109 + abortOnError false
110 + }
111 +
112 + sourceSets {
113 + main {
114 + if (nativeBuildSystem == 'bazel' || nativeBuildSystem == 'makefile') {
115 + // TensorFlow Java API sources.
116 + java {
117 + srcDir '../../java/src/main/java'
118 + exclude '**/examples/**'
119 + }
120 +
121 + // Android TensorFlow wrappers, etc.
122 + java {
123 + srcDir '../../tools/android/inference_interface/java'
124 + }
125 + }
126 + // Android demo app sources.
127 + java {
128 + srcDir 'src'
129 + }
130 +
131 + manifest.srcFile 'AndroidManifest.xml'
132 + resources.srcDirs = ['src']
133 + aidl.srcDirs = ['src']
134 + renderscript.srcDirs = ['src']
135 + res.srcDirs = ['res']
136 + assets.srcDirs = [project.ext.ASSET_DIR]
137 + jniLibs.srcDirs = ['libs']
138 + }
139 +
140 + debug.setRoot('build-types/debug')
141 + release.setRoot('build-types/release')
142 + }
143 + defaultConfig {
144 + targetSdkVersion 23
145 + minSdkVersion 21
146 + }
147 +}
148 +
149 +task buildNativeBazel(type: Exec) {
150 + workingDir '../../..'
151 + commandLine bazelLocation, 'build', '-c', 'opt', \
152 + 'tensorflow/examples/android:tensorflow_native_libs', \
153 + '--crosstool_top=//external:android/crosstool', \
154 + '--cpu=' + cpuType, \
155 + '--host_crosstool_top=@bazel_tools//tools/cpp:toolchain'
156 +}
157 +
158 +task buildNativeMake(type: Exec) {
159 + environment "NDK_ROOT", android.ndkDirectory
160 + // Tip: install ccache and uncomment the following to speed up
161 + // builds significantly.
162 + // environment "CC_PREFIX", 'ccache'
163 + workingDir '../../..'
164 + commandLine 'tensorflow/contrib/makefile/build_all_android.sh', \
165 + '-s', \
166 + 'tensorflow/contrib/makefile/sub_makefiles/android/Makefile.in', \
167 + '-t', \
168 + 'libtensorflow_inference.so libtensorflow_demo.so all' \
169 + , '-a', cpuType \
170 + //, '-T' // Uncomment to skip protobuf and speed up subsequent builds.
171 +}
172 +
173 +
174 +task copyNativeLibs(type: Copy) {
175 + from demoLibPath
176 + from inferenceLibPath
177 + into nativeOutDir
178 + duplicatesStrategy = 'include'
179 + dependsOn nativeBuildRule
180 + fileMode 0644
181 +}
182 +
183 +tasks.whenTaskAdded { task ->
184 + if (nativeBuildSystem == 'bazel' || nativeBuildSystem == 'makefile') {
185 + if (task.name == 'assembleDebug') {
186 + task.dependsOn 'copyNativeLibs'
187 + }
188 + if (task.name == 'assembleRelease') {
189 + task.dependsOn 'copyNativeLibs'
190 + }
191 + }
192 +}
193 +
194 +dependencies {
195 + if (nativeBuildSystem == 'cmake' || nativeBuildSystem == 'none') {
196 + implementation 'org.tensorflow:tensorflow-android:+'
197 + }
198 +}
1 +/*
2 + * download-models.gradle
3 + * Downloads model files from ${MODEL_URL} into application's asset folder
4 + * Input:
5 + * project.ext.TMP_DIR: absolute path to hold downloaded zip files
6 + * project.ext.ASSET_DIR: absolute path to save unzipped model files
7 + * Output:
8 + * 3 model files will be downloaded into given folder of ext.ASSET_DIR
9 + */
10 +// hard coded model files
11 +// LINT.IfChange
12 +def models = ['inception_v1.zip',
13 + 'object_detection/ssd_mobilenet_v1_android_export.zip',
14 + 'stylize_v1.zip',
15 + 'speech_commands_conv_actions.zip']
16 +// LINT.ThenChange(//tensorflow/examples/android/BUILD)
17 +
18 +// Root URL for model archives
19 +def MODEL_URL = 'https://storage.googleapis.com/download.tensorflow.org/models'
20 +
21 +buildscript {
22 + repositories {
23 + jcenter()
24 + }
25 + dependencies {
26 + classpath 'de.undercouch:gradle-download-task:3.2.0'
27 + }
28 +}
29 +
30 +import de.undercouch.gradle.tasks.download.Download
31 +task downloadFile(type: Download){
32 + for (f in models) {
33 + src "${MODEL_URL}/" + f
34 + }
35 + dest new File(project.ext.TMP_DIR)
36 + overwrite true
37 +}
38 +
39 +task extractModels(type: Copy) {
40 + for (f in models) {
41 + def localFile = f.split("/")[-1]
42 + from zipTree(project.ext.TMP_DIR + '/' + localFile)
43 + }
44 +
45 + into file(project.ext.ASSET_DIR)
46 + fileMode 0644
47 + exclude '**/LICENSE'
48 +
49 + def needDownload = false
50 + for (f in models) {
51 + def localFile = f.split("/")[-1]
52 + if (!(new File(project.ext.TMP_DIR + '/' + localFile)).exists()) {
53 + needDownload = true
54 + }
55 + }
56 +
57 + if (needDownload) {
58 + dependsOn downloadFile
59 + }
60 +}
61 +
62 +tasks.whenTaskAdded { task ->
63 + if (task.name == 'assembleDebug') {
64 + task.dependsOn 'extractModels'
65 + }
66 + if (task.name == 'assembleRelease') {
67 + task.dependsOn 'extractModels'
68 + }
69 +}
70 +
1 +#Sat Nov 18 15:06:47 CET 2017
2 +distributionBase=GRADLE_USER_HOME
3 +distributionPath=wrapper/dists
4 +zipStoreBase=GRADLE_USER_HOME
5 +zipStorePath=wrapper/dists
6 +distributionUrl=https\://services.gradle.org/distributions/gradle-4.1-all.zip
1 +#!/usr/bin/env bash
2 +
3 +##############################################################################
4 +##
5 +## Gradle start up script for UN*X
6 +##
7 +##############################################################################
8 +
9 +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
10 +DEFAULT_JVM_OPTS=""
11 +
12 +APP_NAME="Gradle"
13 +APP_BASE_NAME=`basename "$0"`
14 +
15 +# Use the maximum available, or set MAX_FD != -1 to use that value.
16 +MAX_FD="maximum"
17 +
18 +warn ( ) {
19 + echo "$*"
20 +}
21 +
22 +die ( ) {
23 + echo
24 + echo "$*"
25 + echo
26 + exit 1
27 +}
28 +
29 +# OS specific support (must be 'true' or 'false').
30 +cygwin=false
31 +msys=false
32 +darwin=false
33 +case "`uname`" in
34 + CYGWIN* )
35 + cygwin=true
36 + ;;
37 + Darwin* )
38 + darwin=true
39 + ;;
40 + MINGW* )
41 + msys=true
42 + ;;
43 +esac
44 +
45 +# Attempt to set APP_HOME
46 +# Resolve links: $0 may be a link
47 +PRG="$0"
48 +# Need this for relative symlinks.
49 +while [ -h "$PRG" ] ; do
50 + ls=`ls -ld "$PRG"`
51 + link=`expr "$ls" : '.*-> \(.*\)$'`
52 + if expr "$link" : '/.*' > /dev/null; then
53 + PRG="$link"
54 + else
55 + PRG=`dirname "$PRG"`"/$link"
56 + fi
57 +done
58 +SAVED="`pwd`"
59 +cd "`dirname \"$PRG\"`/" >/dev/null
60 +APP_HOME="`pwd -P`"
61 +cd "$SAVED" >/dev/null
62 +
63 +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
64 +
65 +# Determine the Java command to use to start the JVM.
66 +if [ -n "$JAVA_HOME" ] ; then
67 + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
68 + # IBM's JDK on AIX uses strange locations for the executables
69 + JAVACMD="$JAVA_HOME/jre/sh/java"
70 + else
71 + JAVACMD="$JAVA_HOME/bin/java"
72 + fi
73 + if [ ! -x "$JAVACMD" ] ; then
74 + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
75 +
76 +Please set the JAVA_HOME variable in your environment to match the
77 +location of your Java installation."
78 + fi
79 +else
80 + JAVACMD="java"
81 + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
82 +
83 +Please set the JAVA_HOME variable in your environment to match the
84 +location of your Java installation."
85 +fi
86 +
87 +# Increase the maximum file descriptors if we can.
88 +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
89 + MAX_FD_LIMIT=`ulimit -H -n`
90 + if [ $? -eq 0 ] ; then
91 + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
92 + MAX_FD="$MAX_FD_LIMIT"
93 + fi
94 + ulimit -n $MAX_FD
95 + if [ $? -ne 0 ] ; then
96 + warn "Could not set maximum file descriptor limit: $MAX_FD"
97 + fi
98 + else
99 + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
100 + fi
101 +fi
102 +
103 +# For Darwin, add options to specify how the application appears in the dock
104 +if $darwin; then
105 + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
106 +fi
107 +
108 +# For Cygwin, switch paths to Windows format before running java
109 +if $cygwin ; then
110 + APP_HOME=`cygpath --path --mixed "$APP_HOME"`
111 + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
112 + JAVACMD=`cygpath --unix "$JAVACMD"`
113 +
114 + # We build the pattern for arguments to be converted via cygpath
115 + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
116 + SEP=""
117 + for dir in $ROOTDIRSRAW ; do
118 + ROOTDIRS="$ROOTDIRS$SEP$dir"
119 + SEP="|"
120 + done
121 + OURCYGPATTERN="(^($ROOTDIRS))"
122 + # Add a user-defined pattern to the cygpath arguments
123 + if [ "$GRADLE_CYGPATTERN" != "" ] ; then
124 + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
125 + fi
126 + # Now convert the arguments - kludge to limit ourselves to /bin/sh
127 + i=0
128 + for arg in "$@" ; do
129 + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
130 + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
131 +
132 + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
133 + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
134 + else
135 + eval `echo args$i`="\"$arg\""
136 + fi
137 + i=$((i+1))
138 + done
139 + case $i in
140 + (0) set -- ;;
141 + (1) set -- "$args0" ;;
142 + (2) set -- "$args0" "$args1" ;;
143 + (3) set -- "$args0" "$args1" "$args2" ;;
144 + (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
145 + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
146 + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
147 + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
148 + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
149 + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
150 + esac
151 +fi
152 +
153 +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
154 +function splitJvmOpts() {
155 + JVM_OPTS=("$@")
156 +}
157 +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
158 +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
159 +
160 +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
1 +@if "%DEBUG%" == "" @echo off
2 +@rem ##########################################################################
3 +@rem
4 +@rem Gradle startup script for Windows
5 +@rem
6 +@rem ##########################################################################
7 +
8 +@rem Set local scope for the variables with windows NT shell
9 +if "%OS%"=="Windows_NT" setlocal
10 +
11 +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
12 +set DEFAULT_JVM_OPTS=
13 +
14 +set DIRNAME=%~dp0
15 +if "%DIRNAME%" == "" set DIRNAME=.
16 +set APP_BASE_NAME=%~n0
17 +set APP_HOME=%DIRNAME%
18 +
19 +@rem Find java.exe
20 +if defined JAVA_HOME goto findJavaFromJavaHome
21 +
22 +set JAVA_EXE=java.exe
23 +%JAVA_EXE% -version >NUL 2>&1
24 +if "%ERRORLEVEL%" == "0" goto init
25 +
26 +echo.
27 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 +echo.
29 +echo Please set the JAVA_HOME variable in your environment to match the
30 +echo location of your Java installation.
31 +
32 +goto fail
33 +
34 +:findJavaFromJavaHome
35 +set JAVA_HOME=%JAVA_HOME:"=%
36 +set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 +
38 +if exist "%JAVA_EXE%" goto init
39 +
40 +echo.
41 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 +echo.
43 +echo Please set the JAVA_HOME variable in your environment to match the
44 +echo location of your Java installation.
45 +
46 +goto fail
47 +
48 +:init
49 +@rem Get command-line arguments, handling Windowz variants
50 +
51 +if not "%OS%" == "Windows_NT" goto win9xME_args
52 +if "%@eval[2+2]" == "4" goto 4NT_args
53 +
54 +:win9xME_args
55 +@rem Slurp the command line arguments.
56 +set CMD_LINE_ARGS=
57 +set _SKIP=2
58 +
59 +:win9xME_args_slurp
60 +if "x%~1" == "x" goto execute
61 +
62 +set CMD_LINE_ARGS=%*
63 +goto execute
64 +
65 +:4NT_args
66 +@rem Get arguments from the 4NT Shell from JP Software
67 +set CMD_LINE_ARGS=%$
68 +
69 +:execute
70 +@rem Setup the command line
71 +
72 +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
73 +
74 +@rem Execute Gradle
75 +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
76 +
77 +:end
78 +@rem End local scope for the variables with windows NT shell
79 +if "%ERRORLEVEL%"=="0" goto mainEnd
80 +
81 +:fail
82 +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
83 +rem the _cmd.exe /c_ return code!
84 +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
85 +exit /b 1
86 +
87 +:mainEnd
88 +if "%OS%"=="Windows_NT" endlocal
89 +
90 +:omega
1 +#
2 +# Copyright (C) 2016 The Android Open Source Project
3 +#
4 +# Licensed under the Apache License, Version 2.0 (the "License");
5 +# you may not use this file except in compliance with the License.
6 +# You may obtain a copy of the License at
7 +#
8 +# http://www.apache.org/licenses/LICENSE-2.0
9 +#
10 +# Unless required by applicable law or agreed to in writing, software
11 +# distributed under the License is distributed on an "AS IS" BASIS,
12 +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 +# See the License for the specific language governing permissions and
14 +# limitations under the License.
15 +#
16 +
17 +project(TENSORFLOW_DEMO)
18 +cmake_minimum_required(VERSION 3.4.1)
19 +
20 +set(CMAKE_VERBOSE_MAKEFILE on)
21 +
22 +get_filename_component(TF_SRC_ROOT ${CMAKE_SOURCE_DIR}/../../../.. ABSOLUTE)
23 +get_filename_component(SAMPLE_SRC_DIR ${CMAKE_SOURCE_DIR}/.. ABSOLUTE)
24 +
25 +if (ANDROID_ABI MATCHES "^armeabi-v7a$")
26 + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfloat-abi=softfp -mfpu=neon")
27 +elseif(ANDROID_ABI MATCHES "^arm64-v8a")
28 + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -ftree-vectorize")
29 +endif()
30 +
31 +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTANDALONE_DEMO_LIB \
32 + -std=c++11 -fno-exceptions -fno-rtti -O2 -Wno-narrowing \
33 + -fPIE")
34 +set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} \
35 + -Wl,--allow-multiple-definition \
36 + -Wl,--whole-archive -fPIE -v")
37 +
38 +file(GLOB_RECURSE tensorflow_demo_sources ${SAMPLE_SRC_DIR}/jni/*.*)
39 +add_library(tensorflow_demo SHARED
40 + ${tensorflow_demo_sources})
41 +target_include_directories(tensorflow_demo PRIVATE
42 + ${TF_SRC_ROOT}
43 + ${CMAKE_SOURCE_DIR})
44 +
45 +target_link_libraries(tensorflow_demo
46 + android
47 + log
48 + jnigraphics
49 + m
50 + atomic
51 + z)
1 +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// This file binds the native image utility code to the Java class
17 +// which exposes them.
18 +
19 +#include <jni.h>
20 +#include <stdio.h>
21 +#include <stdlib.h>
22 +
23 +#include "tensorflow/examples/android/jni/rgb2yuv.h"
24 +#include "tensorflow/examples/android/jni/yuv2rgb.h"
25 +
26 +#define IMAGEUTILS_METHOD(METHOD_NAME) \
27 + Java_org_tensorflow_demo_env_ImageUtils_##METHOD_NAME // NOLINT
28 +
29 +#ifdef __cplusplus
30 +extern "C" {
31 +#endif
32 +
33 +JNIEXPORT void JNICALL
34 +IMAGEUTILS_METHOD(convertYUV420SPToARGB8888)(
35 + JNIEnv* env, jclass clazz, jbyteArray input, jintArray output,
36 + jint width, jint height, jboolean halfSize);
37 +
38 +JNIEXPORT void JNICALL IMAGEUTILS_METHOD(convertYUV420ToARGB8888)(
39 + JNIEnv* env, jclass clazz, jbyteArray y, jbyteArray u, jbyteArray v,
40 + jintArray output, jint width, jint height, jint y_row_stride,
41 + jint uv_row_stride, jint uv_pixel_stride, jboolean halfSize);
42 +
43 +JNIEXPORT void JNICALL IMAGEUTILS_METHOD(convertYUV420SPToRGB565)(
44 + JNIEnv* env, jclass clazz, jbyteArray input, jbyteArray output, jint width,
45 + jint height);
46 +
47 +JNIEXPORT void JNICALL
48 +IMAGEUTILS_METHOD(convertARGB8888ToYUV420SP)(
49 + JNIEnv* env, jclass clazz, jintArray input, jbyteArray output,
50 + jint width, jint height);
51 +
52 +JNIEXPORT void JNICALL
53 +IMAGEUTILS_METHOD(convertRGB565ToYUV420SP)(
54 + JNIEnv* env, jclass clazz, jbyteArray input, jbyteArray output,
55 + jint width, jint height);
56 +
57 +#ifdef __cplusplus
58 +}
59 +#endif
60 +
61 +JNIEXPORT void JNICALL
62 +IMAGEUTILS_METHOD(convertYUV420SPToARGB8888)(
63 + JNIEnv* env, jclass clazz, jbyteArray input, jintArray output,
64 + jint width, jint height, jboolean halfSize) {
65 + jboolean inputCopy = JNI_FALSE;
66 + jbyte* const i = env->GetByteArrayElements(input, &inputCopy);
67 +
68 + jboolean outputCopy = JNI_FALSE;
69 + jint* const o = env->GetIntArrayElements(output, &outputCopy);
70 +
71 + if (halfSize) {
72 + ConvertYUV420SPToARGB8888HalfSize(reinterpret_cast<uint8_t*>(i),
73 + reinterpret_cast<uint32_t*>(o), width,
74 + height);
75 + } else {
76 + ConvertYUV420SPToARGB8888(reinterpret_cast<uint8_t*>(i),
77 + reinterpret_cast<uint8_t*>(i) + width * height,
78 + reinterpret_cast<uint32_t*>(o), width, height);
79 + }
80 +
81 + env->ReleaseByteArrayElements(input, i, JNI_ABORT);
82 + env->ReleaseIntArrayElements(output, o, 0);
83 +}
84 +
85 +JNIEXPORT void JNICALL IMAGEUTILS_METHOD(convertYUV420ToARGB8888)(
86 + JNIEnv* env, jclass clazz, jbyteArray y, jbyteArray u, jbyteArray v,
87 + jintArray output, jint width, jint height, jint y_row_stride,
88 + jint uv_row_stride, jint uv_pixel_stride, jboolean halfSize) {
89 + jboolean inputCopy = JNI_FALSE;
90 + jbyte* const y_buff = env->GetByteArrayElements(y, &inputCopy);
91 + jboolean outputCopy = JNI_FALSE;
92 + jint* const o = env->GetIntArrayElements(output, &outputCopy);
93 +
94 + if (halfSize) {
95 + ConvertYUV420SPToARGB8888HalfSize(reinterpret_cast<uint8_t*>(y_buff),
96 + reinterpret_cast<uint32_t*>(o), width,
97 + height);
98 + } else {
99 + jbyte* const u_buff = env->GetByteArrayElements(u, &inputCopy);
100 + jbyte* const v_buff = env->GetByteArrayElements(v, &inputCopy);
101 +
102 + ConvertYUV420ToARGB8888(
103 + reinterpret_cast<uint8_t*>(y_buff), reinterpret_cast<uint8_t*>(u_buff),
104 + reinterpret_cast<uint8_t*>(v_buff), reinterpret_cast<uint32_t*>(o),
105 + width, height, y_row_stride, uv_row_stride, uv_pixel_stride);
106 +
107 + env->ReleaseByteArrayElements(u, u_buff, JNI_ABORT);
108 + env->ReleaseByteArrayElements(v, v_buff, JNI_ABORT);
109 + }
110 +
111 + env->ReleaseByteArrayElements(y, y_buff, JNI_ABORT);
112 + env->ReleaseIntArrayElements(output, o, 0);
113 +}
114 +
115 +JNIEXPORT void JNICALL IMAGEUTILS_METHOD(convertYUV420SPToRGB565)(
116 + JNIEnv* env, jclass clazz, jbyteArray input, jbyteArray output, jint width,
117 + jint height) {
118 + jboolean inputCopy = JNI_FALSE;
119 + jbyte* const i = env->GetByteArrayElements(input, &inputCopy);
120 +
121 + jboolean outputCopy = JNI_FALSE;
122 + jbyte* const o = env->GetByteArrayElements(output, &outputCopy);
123 +
124 + ConvertYUV420SPToRGB565(reinterpret_cast<uint8_t*>(i),
125 + reinterpret_cast<uint16_t*>(o), width, height);
126 +
127 + env->ReleaseByteArrayElements(input, i, JNI_ABORT);
128 + env->ReleaseByteArrayElements(output, o, 0);
129 +}
130 +
131 +JNIEXPORT void JNICALL
132 +IMAGEUTILS_METHOD(convertARGB8888ToYUV420SP)(
133 + JNIEnv* env, jclass clazz, jintArray input, jbyteArray output,
134 + jint width, jint height) {
135 + jboolean inputCopy = JNI_FALSE;
136 + jint* const i = env->GetIntArrayElements(input, &inputCopy);
137 +
138 + jboolean outputCopy = JNI_FALSE;
139 + jbyte* const o = env->GetByteArrayElements(output, &outputCopy);
140 +
141 + ConvertARGB8888ToYUV420SP(reinterpret_cast<uint32_t*>(i),
142 + reinterpret_cast<uint8_t*>(o), width, height);
143 +
144 + env->ReleaseIntArrayElements(input, i, JNI_ABORT);
145 + env->ReleaseByteArrayElements(output, o, 0);
146 +}
147 +
148 +JNIEXPORT void JNICALL
149 +IMAGEUTILS_METHOD(convertRGB565ToYUV420SP)(
150 + JNIEnv* env, jclass clazz, jbyteArray input, jbyteArray output,
151 + jint width, jint height) {
152 + jboolean inputCopy = JNI_FALSE;
153 + jbyte* const i = env->GetByteArrayElements(input, &inputCopy);
154 +
155 + jboolean outputCopy = JNI_FALSE;
156 + jbyte* const o = env->GetByteArrayElements(output, &outputCopy);
157 +
158 + ConvertRGB565ToYUV420SP(reinterpret_cast<uint16_t*>(i),
159 + reinterpret_cast<uint8_t*>(o), width, height);
160 +
161 + env->ReleaseByteArrayElements(input, i, JNI_ABORT);
162 + env->ReleaseByteArrayElements(output, o, 0);
163 +}
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_FRAME_PAIR_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_FRAME_PAIR_H_
18 +
19 +#include "tensorflow/examples/android/jni/object_tracking/keypoint.h"
20 +
21 +namespace tf_tracking {
22 +
23 +// A class that records keypoint correspondences from pairs of
24 +// consecutive frames.
25 +class FramePair {
26 + public:
27 + FramePair()
28 + : start_time_(0),
29 + end_time_(0),
30 + number_of_keypoints_(0) {}
31 +
32 + // Cleans up the FramePair so that they can be reused.
33 + void Init(const int64_t start_time, const int64_t end_time);
34 +
35 + void AdjustBox(const BoundingBox box,
36 + float* const translation_x,
37 + float* const translation_y,
38 + float* const scale_x,
39 + float* const scale_y) const;
40 +
41 + private:
42 + // Returns the weighted median of the given deltas, computed independently on
43 + // x and y. Returns 0,0 in case of failure. The assumption is that a
44 + // translation of 0.0 in the degenerate case is the best that can be done, and
45 + // should not be considered an error.
46 + //
47 + // In the case of scale, a slight exception is made just to be safe and
48 + // there is a check for 0.0 explicitly, but that shouldn't ever be possible to
49 + // happen naturally because of the non-zero + parity checks in FillScales.
50 + Point2f GetWeightedMedian(const float* const weights,
51 + const Point2f* const deltas) const;
52 +
53 + float GetWeightedMedianScale(const float* const weights,
54 + const Point2f* const deltas) const;
55 +
56 + // Weights points based on the query_point and cutoff_dist.
57 + int FillWeights(const BoundingBox& box,
58 + float* const weights) const;
59 +
60 + // Fills in the array of deltas with the translations of the points
61 + // between frames.
62 + void FillTranslations(Point2f* const translations) const;
63 +
64 + // Fills in the array of deltas with the relative scale factor of points
65 + // relative to a given center. Has the ability to override the weight to 0 if
66 + // a degenerate scale is detected.
67 + // Translation is the amount the center of the box has moved from one frame to
68 + // the next.
69 + int FillScales(const Point2f& old_center,
70 + const Point2f& translation,
71 + float* const weights,
72 + Point2f* const scales) const;
73 +
74 + // TODO(andrewharp): Make these private.
75 + public:
76 + // The time at frame1.
77 + int64_t start_time_;
78 +
79 + // The time at frame2.
80 + int64_t end_time_;
81 +
82 + // This array will contain the keypoints found in frame 1.
83 + Keypoint frame1_keypoints_[kMaxKeypoints];
84 +
85 + // Contain the locations of the keypoints from frame 1 in frame 2.
86 + Keypoint frame2_keypoints_[kMaxKeypoints];
87 +
88 + // The number of keypoints in frame 1.
89 + int number_of_keypoints_;
90 +
91 + // Keeps track of which keypoint correspondences were actually found from one
92 + // frame to another.
93 + // The i-th element of this array will be non-zero if and only if the i-th
94 + // keypoint of frame 1 was found in frame 2.
95 + bool optical_flow_found_keypoint_[kMaxKeypoints];
96 +
97 + private:
98 + TF_DISALLOW_COPY_AND_ASSIGN(FramePair);
99 +};
100 +
101 +} // namespace tf_tracking
102 +
103 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_FRAME_PAIR_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_GEOM_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_GEOM_H_
18 +
19 +#include "tensorflow/examples/android/jni/object_tracking/logging.h"
20 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
21 +
22 +namespace tf_tracking {
23 +
24 +struct Size {
25 + Size(const int width, const int height) : width(width), height(height) {}
26 +
27 + int width;
28 + int height;
29 +};
30 +
31 +
32 +class Point2f {
33 + public:
34 + Point2f() : x(0.0f), y(0.0f) {}
35 + Point2f(const float x, const float y) : x(x), y(y) {}
36 +
37 + inline Point2f operator- (const Point2f& that) const {
38 + return Point2f(this->x - that.x, this->y - that.y);
39 + }
40 +
41 + inline Point2f operator+ (const Point2f& that) const {
42 + return Point2f(this->x + that.x, this->y + that.y);
43 + }
44 +
45 + inline Point2f& operator+= (const Point2f& that) {
46 + this->x += that.x;
47 + this->y += that.y;
48 + return *this;
49 + }
50 +
51 + inline Point2f& operator-= (const Point2f& that) {
52 + this->x -= that.x;
53 + this->y -= that.y;
54 + return *this;
55 + }
56 +
57 + inline Point2f operator- (const Point2f& that) {
58 + return Point2f(this->x - that.x, this->y - that.y);
59 + }
60 +
61 + inline float LengthSquared() {
62 + return Square(this->x) + Square(this->y);
63 + }
64 +
65 + inline float Length() {
66 + return sqrtf(LengthSquared());
67 + }
68 +
69 + inline float DistanceSquared(const Point2f& that) {
70 + return Square(this->x - that.x) + Square(this->y - that.y);
71 + }
72 +
73 + inline float Distance(const Point2f& that) {
74 + return sqrtf(DistanceSquared(that));
75 + }
76 +
77 + float x;
78 + float y;
79 +};
80 +
81 +inline std::ostream& operator<<(std::ostream& stream, const Point2f& point) {
82 + stream << point.x << "," << point.y;
83 + return stream;
84 +}
85 +
86 +class BoundingBox {
87 + public:
88 + BoundingBox()
89 + : left_(0),
90 + top_(0),
91 + right_(0),
92 + bottom_(0) {}
93 +
94 + BoundingBox(const BoundingBox& bounding_box)
95 + : left_(bounding_box.left_),
96 + top_(bounding_box.top_),
97 + right_(bounding_box.right_),
98 + bottom_(bounding_box.bottom_) {
99 + SCHECK(left_ < right_, "Bounds out of whack! %.2f vs %.2f!", left_, right_);
100 + SCHECK(top_ < bottom_, "Bounds out of whack! %.2f vs %.2f!", top_, bottom_);
101 + }
102 +
103 + BoundingBox(const float left,
104 + const float top,
105 + const float right,
106 + const float bottom)
107 + : left_(left),
108 + top_(top),
109 + right_(right),
110 + bottom_(bottom) {
111 + SCHECK(left_ < right_, "Bounds out of whack! %.2f vs %.2f!", left_, right_);
112 + SCHECK(top_ < bottom_, "Bounds out of whack! %.2f vs %.2f!", top_, bottom_);
113 + }
114 +
115 + BoundingBox(const Point2f& point1, const Point2f& point2)
116 + : left_(MIN(point1.x, point2.x)),
117 + top_(MIN(point1.y, point2.y)),
118 + right_(MAX(point1.x, point2.x)),
119 + bottom_(MAX(point1.y, point2.y)) {}
120 +
121 + inline void CopyToArray(float* const bounds_array) const {
122 + bounds_array[0] = left_;
123 + bounds_array[1] = top_;
124 + bounds_array[2] = right_;
125 + bounds_array[3] = bottom_;
126 + }
127 +
128 + inline float GetWidth() const {
129 + return right_ - left_;
130 + }
131 +
132 + inline float GetHeight() const {
133 + return bottom_ - top_;
134 + }
135 +
136 + inline float GetArea() const {
137 + const float width = GetWidth();
138 + const float height = GetHeight();
139 + if (width <= 0 || height <= 0) {
140 + return 0.0f;
141 + }
142 +
143 + return width * height;
144 + }
145 +
146 + inline Point2f GetCenter() const {
147 + return Point2f((left_ + right_) / 2.0f,
148 + (top_ + bottom_) / 2.0f);
149 + }
150 +
151 + inline bool ValidBox() const {
152 + return GetArea() > 0.0f;
153 + }
154 +
155 + // Returns a bounding box created from the overlapping area of these two.
156 + inline BoundingBox Intersect(const BoundingBox& that) const {
157 + const float new_left = MAX(this->left_, that.left_);
158 + const float new_right = MIN(this->right_, that.right_);
159 +
160 + if (new_left >= new_right) {
161 + return BoundingBox();
162 + }
163 +
164 + const float new_top = MAX(this->top_, that.top_);
165 + const float new_bottom = MIN(this->bottom_, that.bottom_);
166 +
167 + if (new_top >= new_bottom) {
168 + return BoundingBox();
169 + }
170 +
171 + return BoundingBox(new_left, new_top, new_right, new_bottom);
172 + }
173 +
174 + // Returns a bounding box that can contain both boxes.
175 + inline BoundingBox Union(const BoundingBox& that) const {
176 + return BoundingBox(MIN(this->left_, that.left_),
177 + MIN(this->top_, that.top_),
178 + MAX(this->right_, that.right_),
179 + MAX(this->bottom_, that.bottom_));
180 + }
181 +
182 + inline float PascalScore(const BoundingBox& that) const {
183 + SCHECK(GetArea() > 0.0f, "Empty bounding box!");
184 + SCHECK(that.GetArea() > 0.0f, "Empty bounding box!");
185 +
186 + const float intersect_area = this->Intersect(that).GetArea();
187 +
188 + if (intersect_area <= 0) {
189 + return 0;
190 + }
191 +
192 + const float score =
193 + intersect_area / (GetArea() + that.GetArea() - intersect_area);
194 + SCHECK(InRange(score, 0.0f, 1.0f), "Invalid score! %.2f", score);
195 + return score;
196 + }
197 +
198 + inline bool Intersects(const BoundingBox& that) const {
199 + return InRange(that.left_, left_, right_)
200 + || InRange(that.right_, left_, right_)
201 + || InRange(that.top_, top_, bottom_)
202 + || InRange(that.bottom_, top_, bottom_);
203 + }
204 +
205 + // Returns whether another bounding box is completely inside of this bounding
206 + // box. Sharing edges is ok.
207 + inline bool Contains(const BoundingBox& that) const {
208 + return that.left_ >= left_ &&
209 + that.right_ <= right_ &&
210 + that.top_ >= top_ &&
211 + that.bottom_ <= bottom_;
212 + }
213 +
214 + inline bool Contains(const Point2f& point) const {
215 + return InRange(point.x, left_, right_) && InRange(point.y, top_, bottom_);
216 + }
217 +
218 + inline void Shift(const Point2f shift_amount) {
219 + left_ += shift_amount.x;
220 + top_ += shift_amount.y;
221 + right_ += shift_amount.x;
222 + bottom_ += shift_amount.y;
223 + }
224 +
225 + inline void ScaleOrigin(const float scale_x, const float scale_y) {
226 + left_ *= scale_x;
227 + right_ *= scale_x;
228 + top_ *= scale_y;
229 + bottom_ *= scale_y;
230 + }
231 +
232 + inline void Scale(const float scale_x, const float scale_y) {
233 + const Point2f center = GetCenter();
234 + const float half_width = GetWidth() / 2.0f;
235 + const float half_height = GetHeight() / 2.0f;
236 +
237 + left_ = center.x - half_width * scale_x;
238 + right_ = center.x + half_width * scale_x;
239 +
240 + top_ = center.y - half_height * scale_y;
241 + bottom_ = center.y + half_height * scale_y;
242 + }
243 +
244 + float left_;
245 + float top_;
246 + float right_;
247 + float bottom_;
248 +};
249 +inline std::ostream& operator<<(std::ostream& stream, const BoundingBox& box) {
250 + stream << "[" << box.left_ << " - " << box.right_
251 + << ", " << box.top_ << " - " << box.bottom_
252 + << ", w:" << box.GetWidth() << " h:" << box.GetHeight() << "]";
253 + return stream;
254 +}
255 +
256 +
257 +class BoundingSquare {
258 + public:
259 + BoundingSquare(const float x, const float y, const float size)
260 + : x_(x), y_(y), size_(size) {}
261 +
262 + explicit BoundingSquare(const BoundingBox& box)
263 + : x_(box.left_), y_(box.top_), size_(box.GetWidth()) {
264 +#ifdef SANITY_CHECKS
265 + if (std::abs(box.GetWidth() - box.GetHeight()) > 0.1f) {
266 + LOG(WARNING) << "This is not a square: " << box << std::endl;
267 + }
268 +#endif
269 + }
270 +
271 + inline BoundingBox ToBoundingBox() const {
272 + return BoundingBox(x_, y_, x_ + size_, y_ + size_);
273 + }
274 +
275 + inline bool ValidBox() {
276 + return size_ > 0.0f;
277 + }
278 +
279 + inline void Shift(const Point2f shift_amount) {
280 + x_ += shift_amount.x;
281 + y_ += shift_amount.y;
282 + }
283 +
284 + inline void Scale(const float scale) {
285 + const float new_size = size_ * scale;
286 + const float position_diff = (new_size - size_) / 2.0f;
287 + x_ -= position_diff;
288 + y_ -= position_diff;
289 + size_ = new_size;
290 + }
291 +
292 + float x_;
293 + float y_;
294 + float size_;
295 +};
296 +inline std::ostream& operator<<(std::ostream& stream,
297 + const BoundingSquare& square) {
298 + stream << "[" << square.x_ << "," << square.y_ << " " << square.size_ << "]";
299 + return stream;
300 +}
301 +
302 +
303 +inline BoundingSquare GetCenteredSquare(const BoundingBox& original_box,
304 + const float size) {
305 + const float width_diff = (original_box.GetWidth() - size) / 2.0f;
306 + const float height_diff = (original_box.GetHeight() - size) / 2.0f;
307 + return BoundingSquare(original_box.left_ + width_diff,
308 + original_box.top_ + height_diff,
309 + size);
310 +}
311 +
312 +inline BoundingSquare GetCenteredSquare(const BoundingBox& original_box) {
313 + return GetCenteredSquare(
314 + original_box, MIN(original_box.GetWidth(), original_box.GetHeight()));
315 +}
316 +
317 +} // namespace tf_tracking
318 +
319 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_GEOM_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_GL_UTILS_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_GL_UTILS_H_
18 +
19 +#include <GLES/gl.h>
20 +#include <GLES/glext.h>
21 +
22 +#include "tensorflow/examples/android/jni/object_tracking/geom.h"
23 +
24 +namespace tf_tracking {
25 +
26 +// Draws a box at the given position.
27 +inline static void DrawBox(const BoundingBox& bounding_box) {
28 + const GLfloat line[] = {
29 + bounding_box.left_, bounding_box.bottom_,
30 + bounding_box.left_, bounding_box.top_,
31 + bounding_box.left_, bounding_box.top_,
32 + bounding_box.right_, bounding_box.top_,
33 + bounding_box.right_, bounding_box.top_,
34 + bounding_box.right_, bounding_box.bottom_,
35 + bounding_box.right_, bounding_box.bottom_,
36 + bounding_box.left_, bounding_box.bottom_
37 + };
38 +
39 + glVertexPointer(2, GL_FLOAT, 0, line);
40 + glEnableClientState(GL_VERTEX_ARRAY);
41 +
42 + glDrawArrays(GL_LINES, 0, 8);
43 +}
44 +
45 +
46 +// Changes the coordinate system such that drawing to an arbitrary square in
47 +// the world can thereafter be drawn to using coordinates 0 - 1.
48 +inline static void MapWorldSquareToUnitSquare(const BoundingSquare& square) {
49 + glScalef(square.size_, square.size_, 1.0f);
50 + glTranslatef(square.x_ / square.size_, square.y_ / square.size_, 0.0f);
51 +}
52 +
53 +} // namespace tf_tracking
54 +
55 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_GL_UTILS_H_
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_IMAGE_DATA_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_IMAGE_DATA_H_
18 +
19 +#include <stdint.h>
20 +#include <memory>
21 +
22 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
23 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
24 +#include "tensorflow/examples/android/jni/object_tracking/image_utils.h"
25 +#include "tensorflow/examples/android/jni/object_tracking/integral_image.h"
26 +#include "tensorflow/examples/android/jni/object_tracking/time_log.h"
27 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
28 +
29 +#include "tensorflow/examples/android/jni/object_tracking/config.h"
30 +
31 +namespace tf_tracking {
32 +
33 +// Class that encapsulates all bulky processed data for a frame.
34 +class ImageData {
35 + public:
36 + explicit ImageData(const int width, const int height)
37 + : uv_frame_width_(width << 1),
38 + uv_frame_height_(height << 1),
39 + timestamp_(0),
40 + image_(width, height) {
41 + InitPyramid(width, height);
42 + ResetComputationCache();
43 + }
44 +
45 + private:
46 + void ResetComputationCache() {
47 + uv_data_computed_ = false;
48 + integral_image_computed_ = false;
49 + for (int i = 0; i < kNumPyramidLevels; ++i) {
50 + spatial_x_computed_[i] = false;
51 + spatial_y_computed_[i] = false;
52 + pyramid_sqrt2_computed_[i * 2] = false;
53 + pyramid_sqrt2_computed_[i * 2 + 1] = false;
54 + }
55 + }
56 +
57 + void InitPyramid(const int width, const int height) {
58 + int level_width = width;
59 + int level_height = height;
60 +
61 + for (int i = 0; i < kNumPyramidLevels; ++i) {
62 + pyramid_sqrt2_[i * 2] = NULL;
63 + pyramid_sqrt2_[i * 2 + 1] = NULL;
64 + spatial_x_[i] = NULL;
65 + spatial_y_[i] = NULL;
66 +
67 + level_width /= 2;
68 + level_height /= 2;
69 + }
70 +
71 + // Alias the first pyramid level to image_.
72 + pyramid_sqrt2_[0] = &image_;
73 + }
74 +
75 + public:
76 + ~ImageData() {
77 + // The first pyramid level is actually an alias to image_,
78 + // so make sure it doesn't get deleted here.
79 + pyramid_sqrt2_[0] = NULL;
80 +
81 + for (int i = 0; i < kNumPyramidLevels; ++i) {
82 + SAFE_DELETE(pyramid_sqrt2_[i * 2]);
83 + SAFE_DELETE(pyramid_sqrt2_[i * 2 + 1]);
84 + SAFE_DELETE(spatial_x_[i]);
85 + SAFE_DELETE(spatial_y_[i]);
86 + }
87 + }
88 +
89 + void SetData(const uint8_t* const new_frame, const int stride,
90 + const int64_t timestamp, const int downsample_factor) {
91 + SetData(new_frame, NULL, stride, timestamp, downsample_factor);
92 + }
93 +
94 + void SetData(const uint8_t* const new_frame, const uint8_t* const uv_frame,
95 + const int stride, const int64_t timestamp,
96 + const int downsample_factor) {
97 + ResetComputationCache();
98 +
99 + timestamp_ = timestamp;
100 +
101 + TimeLog("SetData!");
102 +
103 + pyramid_sqrt2_[0]->FromArray(new_frame, stride, downsample_factor);
104 + pyramid_sqrt2_computed_[0] = true;
105 + TimeLog("Downsampled image");
106 +
107 + if (uv_frame != NULL) {
108 + if (u_data_.get() == NULL) {
109 + u_data_.reset(new Image<uint8_t>(uv_frame_width_, uv_frame_height_));
110 + v_data_.reset(new Image<uint8_t>(uv_frame_width_, uv_frame_height_));
111 + }
112 +
113 + GetUV(uv_frame, u_data_.get(), v_data_.get());
114 + uv_data_computed_ = true;
115 + TimeLog("Copied UV data");
116 + } else {
117 + LOGV("No uv data!");
118 + }
119 +
120 +#ifdef LOG_TIME
121 + // If profiling is enabled, precompute here to make it easier to distinguish
122 + // total costs.
123 + Precompute();
124 +#endif
125 + }
126 +
127 + inline const uint64_t GetTimestamp() const { return timestamp_; }
128 +
129 + inline const Image<uint8_t>* GetImage() const {
130 + SCHECK(pyramid_sqrt2_computed_[0], "image not set!");
131 + return pyramid_sqrt2_[0];
132 + }
133 +
134 + const Image<uint8_t>* GetPyramidSqrt2Level(const int level) const {
135 + if (!pyramid_sqrt2_computed_[level]) {
136 + SCHECK(level != 0, "Level equals 0!");
137 + if (level == 1) {
138 + const Image<uint8_t>& upper_level = *GetPyramidSqrt2Level(0);
139 + if (pyramid_sqrt2_[level] == NULL) {
140 + const int new_width =
141 + (static_cast<int>(upper_level.GetWidth() / sqrtf(2)) + 1) / 2 * 2;
142 + const int new_height =
143 + (static_cast<int>(upper_level.GetHeight() / sqrtf(2)) + 1) / 2 *
144 + 2;
145 +
146 + pyramid_sqrt2_[level] = new Image<uint8_t>(new_width, new_height);
147 + }
148 + pyramid_sqrt2_[level]->DownsampleInterpolateLinear(upper_level);
149 + } else {
150 + const Image<uint8_t>& upper_level = *GetPyramidSqrt2Level(level - 2);
151 + if (pyramid_sqrt2_[level] == NULL) {
152 + pyramid_sqrt2_[level] = new Image<uint8_t>(
153 + upper_level.GetWidth() / 2, upper_level.GetHeight() / 2);
154 + }
155 + pyramid_sqrt2_[level]->DownsampleAveraged(
156 + upper_level.data(), upper_level.stride(), 2);
157 + }
158 + pyramid_sqrt2_computed_[level] = true;
159 + }
160 + return pyramid_sqrt2_[level];
161 + }
162 +
163 + inline const Image<int32_t>* GetSpatialX(const int level) const {
164 + if (!spatial_x_computed_[level]) {
165 + const Image<uint8_t>& src = *GetPyramidSqrt2Level(level * 2);
166 + if (spatial_x_[level] == NULL) {
167 + spatial_x_[level] = new Image<int32_t>(src.GetWidth(), src.GetHeight());
168 + }
169 + spatial_x_[level]->DerivativeX(src);
170 + spatial_x_computed_[level] = true;
171 + }
172 + return spatial_x_[level];
173 + }
174 +
175 + inline const Image<int32_t>* GetSpatialY(const int level) const {
176 + if (!spatial_y_computed_[level]) {
177 + const Image<uint8_t>& src = *GetPyramidSqrt2Level(level * 2);
178 + if (spatial_y_[level] == NULL) {
179 + spatial_y_[level] = new Image<int32_t>(src.GetWidth(), src.GetHeight());
180 + }
181 + spatial_y_[level]->DerivativeY(src);
182 + spatial_y_computed_[level] = true;
183 + }
184 + return spatial_y_[level];
185 + }
186 +
187 + // The integral image is currently only used for object detection, so lazily
188 + // initialize it on request.
189 + inline const IntegralImage* GetIntegralImage() const {
190 + if (integral_image_.get() == NULL) {
191 + integral_image_.reset(new IntegralImage(image_));
192 + } else if (!integral_image_computed_) {
193 + integral_image_->Recompute(image_);
194 + }
195 + integral_image_computed_ = true;
196 + return integral_image_.get();
197 + }
198 +
199 + inline const Image<uint8_t>* GetU() const {
200 + SCHECK(uv_data_computed_, "UV data not provided!");
201 + return u_data_.get();
202 + }
203 +
204 + inline const Image<uint8_t>* GetV() const {
205 + SCHECK(uv_data_computed_, "UV data not provided!");
206 + return v_data_.get();
207 + }
208 +
209 + private:
210 + void Precompute() {
211 + // Create the smoothed pyramids.
212 + for (int i = 0; i < kNumPyramidLevels * 2; i += 2) {
213 + (void) GetPyramidSqrt2Level(i);
214 + }
215 + TimeLog("Created smoothed pyramids");
216 +
217 + // Create the smoothed pyramids.
218 + for (int i = 1; i < kNumPyramidLevels * 2; i += 2) {
219 + (void) GetPyramidSqrt2Level(i);
220 + }
221 + TimeLog("Created smoothed sqrt pyramids");
222 +
223 + // Create the spatial derivatives for frame 1.
224 + for (int i = 0; i < kNumPyramidLevels; ++i) {
225 + (void) GetSpatialX(i);
226 + (void) GetSpatialY(i);
227 + }
228 + TimeLog("Created spatial derivatives");
229 +
230 + (void) GetIntegralImage();
231 + TimeLog("Got integral image!");
232 + }
233 +
234 + const int uv_frame_width_;
235 + const int uv_frame_height_;
236 +
237 + int64_t timestamp_;
238 +
239 + Image<uint8_t> image_;
240 +
241 + bool uv_data_computed_;
242 + std::unique_ptr<Image<uint8_t> > u_data_;
243 + std::unique_ptr<Image<uint8_t> > v_data_;
244 +
245 + mutable bool spatial_x_computed_[kNumPyramidLevels];
246 + mutable Image<int32_t>* spatial_x_[kNumPyramidLevels];
247 +
248 + mutable bool spatial_y_computed_[kNumPyramidLevels];
249 + mutable Image<int32_t>* spatial_y_[kNumPyramidLevels];
250 +
251 + // Mutable so the lazy initialization can work when this class is const.
252 + // Whether or not the integral image has been computed for the current image.
253 + mutable bool integral_image_computed_;
254 + mutable std::unique_ptr<IntegralImage> integral_image_;
255 +
256 + mutable bool pyramid_sqrt2_computed_[kNumPyramidLevels * 2];
257 + mutable Image<uint8_t>* pyramid_sqrt2_[kNumPyramidLevels * 2];
258 +
259 + TF_DISALLOW_COPY_AND_ASSIGN(ImageData);
260 +};
261 +
262 +} // namespace tf_tracking
263 +
264 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_IMAGE_DATA_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// NEON implementations of Image methods for compatible devices. Control
17 +// should never enter this compilation unit on incompatible devices.
18 +
19 +#ifdef __ARM_NEON
20 +
21 +#include <arm_neon.h>
22 +
23 +#include <stdint.h>
24 +
25 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
26 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
27 +#include "tensorflow/examples/android/jni/object_tracking/image_utils.h"
28 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
29 +
30 +namespace tf_tracking {
31 +
32 +// This function does the bulk of the work.
33 +template <>
34 +void Image<uint8_t>::Downsample2x32ColumnsNeon(const uint8_t* const original,
35 + const int stride,
36 + const int orig_x) {
37 + // Divide input x offset by 2 to find output offset.
38 + const int new_x = orig_x >> 1;
39 +
40 + // Initial offset into top row.
41 + const uint8_t* offset = original + orig_x;
42 +
43 + // This points to the leftmost pixel of our 8 horizontally arranged
44 + // pixels in the destination data.
45 + uint8_t* ptr_dst = (*this)[0] + new_x;
46 +
47 + // Sum along vertical columns.
48 + // Process 32x2 input pixels and 16x1 output pixels per iteration.
49 + for (int new_y = 0; new_y < height_; ++new_y) {
50 + uint16x8_t accum1 = vdupq_n_u16(0);
51 + uint16x8_t accum2 = vdupq_n_u16(0);
52 +
53 + // Go top to bottom across the four rows of input pixels that make up
54 + // this output row.
55 + for (int row_num = 0; row_num < 2; ++row_num) {
56 + // First 16 bytes.
57 + {
58 + // Load 16 bytes of data from current offset.
59 + const uint8x16_t curr_data1 = vld1q_u8(offset);
60 +
61 + // Pairwise add and accumulate into accum vectors (16 bit to account
62 + // for values above 255).
63 + accum1 = vpadalq_u8(accum1, curr_data1);
64 + }
65 +
66 + // Second 16 bytes.
67 + {
68 + // Load 16 bytes of data from current offset.
69 + const uint8x16_t curr_data2 = vld1q_u8(offset + 16);
70 +
71 + // Pairwise add and accumulate into accum vectors (16 bit to account
72 + // for values above 255).
73 + accum2 = vpadalq_u8(accum2, curr_data2);
74 + }
75 +
76 + // Move offset down one row.
77 + offset += stride;
78 + }
79 +
80 + // Divide by 4 (number of input pixels per output
81 + // pixel) and narrow data from 16 bits per pixel to 8 bpp.
82 + const uint8x8_t tmp_pix1 = vqshrn_n_u16(accum1, 2);
83 + const uint8x8_t tmp_pix2 = vqshrn_n_u16(accum2, 2);
84 +
85 + // Concatenate 8x1 pixel strips into 16x1 pixel strip.
86 + const uint8x16_t allpixels = vcombine_u8(tmp_pix1, tmp_pix2);
87 +
88 + // Copy all pixels from composite 16x1 vector into output strip.
89 + vst1q_u8(ptr_dst, allpixels);
90 +
91 + ptr_dst += stride_;
92 + }
93 +}
94 +
95 +// This function does the bulk of the work.
96 +template <>
97 +void Image<uint8_t>::Downsample4x32ColumnsNeon(const uint8_t* const original,
98 + const int stride,
99 + const int orig_x) {
100 + // Divide input x offset by 4 to find output offset.
101 + const int new_x = orig_x >> 2;
102 +
103 + // Initial offset into top row.
104 + const uint8_t* offset = original + orig_x;
105 +
106 + // This points to the leftmost pixel of our 8 horizontally arranged
107 + // pixels in the destination data.
108 + uint8_t* ptr_dst = (*this)[0] + new_x;
109 +
110 + // Sum along vertical columns.
111 + // Process 32x4 input pixels and 8x1 output pixels per iteration.
112 + for (int new_y = 0; new_y < height_; ++new_y) {
113 + uint16x8_t accum1 = vdupq_n_u16(0);
114 + uint16x8_t accum2 = vdupq_n_u16(0);
115 +
116 + // Go top to bottom across the four rows of input pixels that make up
117 + // this output row.
118 + for (int row_num = 0; row_num < 4; ++row_num) {
119 + // First 16 bytes.
120 + {
121 + // Load 16 bytes of data from current offset.
122 + const uint8x16_t curr_data1 = vld1q_u8(offset);
123 +
124 + // Pairwise add and accumulate into accum vectors (16 bit to account
125 + // for values above 255).
126 + accum1 = vpadalq_u8(accum1, curr_data1);
127 + }
128 +
129 + // Second 16 bytes.
130 + {
131 + // Load 16 bytes of data from current offset.
132 + const uint8x16_t curr_data2 = vld1q_u8(offset + 16);
133 +
134 + // Pairwise add and accumulate into accum vectors (16 bit to account
135 + // for values above 255).
136 + accum2 = vpadalq_u8(accum2, curr_data2);
137 + }
138 +
139 + // Move offset down one row.
140 + offset += stride;
141 + }
142 +
143 + // Add and widen, then divide by 16 (number of input pixels per output
144 + // pixel) and narrow data from 32 bits per pixel to 16 bpp.
145 + const uint16x4_t tmp_pix1 = vqshrn_n_u32(vpaddlq_u16(accum1), 4);
146 + const uint16x4_t tmp_pix2 = vqshrn_n_u32(vpaddlq_u16(accum2), 4);
147 +
148 + // Combine 4x1 pixel strips into 8x1 pixel strip and narrow from
149 + // 16 bits to 8 bits per pixel.
150 + const uint8x8_t allpixels = vmovn_u16(vcombine_u16(tmp_pix1, tmp_pix2));
151 +
152 + // Copy all pixels from composite 8x1 vector into output strip.
153 + vst1_u8(ptr_dst, allpixels);
154 +
155 + ptr_dst += stride_;
156 + }
157 +}
158 +
159 +
160 +// Hardware accelerated downsampling method for supported devices.
161 +// Requires that image size be a multiple of 16 pixels in each dimension,
162 +// and that downsampling be by a factor of 2 or 4.
163 +template <>
164 +void Image<uint8_t>::DownsampleAveragedNeon(const uint8_t* const original,
165 + const int stride,
166 + const int factor) {
167 + // TODO(andrewharp): stride is a bad approximation for the src image's width.
168 + // Better to pass that in directly.
169 + SCHECK(width_ * factor <= stride, "Uh oh!");
170 + const int last_starting_index = width_ * factor - 32;
171 +
172 + // We process 32 input pixels lengthwise at a time.
173 + // The output per pass of this loop is an 8 wide by downsampled height tall
174 + // pixel strip.
175 + int orig_x = 0;
176 + for (; orig_x <= last_starting_index; orig_x += 32) {
177 + if (factor == 2) {
178 + Downsample2x32ColumnsNeon(original, stride, orig_x);
179 + } else {
180 + Downsample4x32ColumnsNeon(original, stride, orig_x);
181 + }
182 + }
183 +
184 + // If a last pass is required, push it to the left enough so that it never
185 + // goes out of bounds. This will result in some extra computation on devices
186 + // whose frame widths are multiples of 16 and not 32.
187 + if (orig_x < last_starting_index + 32) {
188 + if (factor == 2) {
189 + Downsample2x32ColumnsNeon(original, stride, last_starting_index);
190 + } else {
191 + Downsample4x32ColumnsNeon(original, stride, last_starting_index);
192 + }
193 + }
194 +}
195 +
196 +
197 +// Puts the image gradient matrix about a pixel into the 2x2 float array G.
198 +// vals_x should be an array of the window x gradient values, whose indices
199 +// can be in any order but are parallel to the vals_y entries.
200 +// See http://robots.stanford.edu/cs223b04/algo_tracking.pdf for more details.
201 +void CalculateGNeon(const float* const vals_x, const float* const vals_y,
202 + const int num_vals, float* const G) {
203 + const float32_t* const arm_vals_x = (const float32_t*) vals_x;
204 + const float32_t* const arm_vals_y = (const float32_t*) vals_y;
205 +
206 + // Running sums.
207 + float32x4_t xx = vdupq_n_f32(0.0f);
208 + float32x4_t xy = vdupq_n_f32(0.0f);
209 + float32x4_t yy = vdupq_n_f32(0.0f);
210 +
211 + // Maximum index we can load 4 consecutive values from.
212 + // e.g. if there are 81 values, our last full pass can be from index 77:
213 + // 81-4=>77 (77, 78, 79, 80)
214 + const int max_i = num_vals - 4;
215 +
216 + // Defined here because we want to keep track of how many values were
217 + // processed by NEON, so that we can finish off the remainder the normal
218 + // way.
219 + int i = 0;
220 +
221 + // Process values 4 at a time, accumulating the sums of
222 + // the pixel-wise x*x, x*y, and y*y values.
223 + for (; i <= max_i; i += 4) {
224 + // Load xs
225 + float32x4_t x = vld1q_f32(arm_vals_x + i);
226 +
227 + // Multiply x*x and accumulate.
228 + xx = vmlaq_f32(xx, x, x);
229 +
230 + // Load ys
231 + float32x4_t y = vld1q_f32(arm_vals_y + i);
232 +
233 + // Multiply x*y and accumulate.
234 + xy = vmlaq_f32(xy, x, y);
235 +
236 + // Multiply y*y and accumulate.
237 + yy = vmlaq_f32(yy, y, y);
238 + }
239 +
240 + static float32_t xx_vals[4];
241 + static float32_t xy_vals[4];
242 + static float32_t yy_vals[4];
243 +
244 + vst1q_f32(xx_vals, xx);
245 + vst1q_f32(xy_vals, xy);
246 + vst1q_f32(yy_vals, yy);
247 +
248 + // Accumulated values are store in sets of 4, we have to manually add
249 + // the last bits together.
250 + for (int j = 0; j < 4; ++j) {
251 + G[0] += xx_vals[j];
252 + G[1] += xy_vals[j];
253 + G[3] += yy_vals[j];
254 + }
255 +
256 + // Finishes off last few values (< 4) from above.
257 + for (; i < num_vals; ++i) {
258 + G[0] += Square(vals_x[i]);
259 + G[1] += vals_x[i] * vals_y[i];
260 + G[3] += Square(vals_y[i]);
261 + }
262 +
263 + // The matrix is symmetric, so this is a given.
264 + G[2] = G[1];
265 +}
266 +
267 +} // namespace tf_tracking
268 +
269 +#endif
This diff is collapsed. Click to expand it.
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_INTEGRAL_IMAGE_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_INTEGRAL_IMAGE_H_
18 +
19 +#include "tensorflow/examples/android/jni/object_tracking/geom.h"
20 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
21 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
22 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
23 +
24 +namespace tf_tracking {
25 +
26 +typedef uint8_t Code;
27 +
28 +class IntegralImage : public Image<uint32_t> {
29 + public:
30 + explicit IntegralImage(const Image<uint8_t>& image_base)
31 + : Image<uint32_t>(image_base.GetWidth(), image_base.GetHeight()) {
32 + Recompute(image_base);
33 + }
34 +
35 + IntegralImage(const int width, const int height)
36 + : Image<uint32_t>(width, height) {}
37 +
38 + void Recompute(const Image<uint8_t>& image_base) {
39 + SCHECK(image_base.GetWidth() == GetWidth() &&
40 + image_base.GetHeight() == GetHeight(), "Dimensions don't match!");
41 +
42 + // Sum along first row.
43 + {
44 + int x_sum = 0;
45 + for (int x = 0; x < image_base.GetWidth(); ++x) {
46 + x_sum += image_base[0][x];
47 + (*this)[0][x] = x_sum;
48 + }
49 + }
50 +
51 + // Sum everything else.
52 + for (int y = 1; y < image_base.GetHeight(); ++y) {
53 + uint32_t* curr_sum = (*this)[y];
54 +
55 + // Previously summed pointers.
56 + const uint32_t* up_one = (*this)[y - 1];
57 +
58 + // Current value pointer.
59 + const uint8_t* curr_delta = image_base[y];
60 +
61 + uint32_t row_till_now = 0;
62 +
63 + for (int x = 0; x < GetWidth(); ++x) {
64 + // Add the one above and the one to the left.
65 + row_till_now += *curr_delta;
66 + *curr_sum = *up_one + row_till_now;
67 +
68 + // Scoot everything along.
69 + ++curr_sum;
70 + ++up_one;
71 + ++curr_delta;
72 + }
73 + }
74 +
75 + SCHECK(VerifyData(image_base), "Images did not match!");
76 + }
77 +
78 + bool VerifyData(const Image<uint8_t>& image_base) {
79 + for (int y = 0; y < GetHeight(); ++y) {
80 + for (int x = 0; x < GetWidth(); ++x) {
81 + uint32_t curr_val = (*this)[y][x];
82 +
83 + if (x > 0) {
84 + curr_val -= (*this)[y][x - 1];
85 + }
86 +
87 + if (y > 0) {
88 + curr_val -= (*this)[y - 1][x];
89 + }
90 +
91 + if (x > 0 && y > 0) {
92 + curr_val += (*this)[y - 1][x - 1];
93 + }
94 +
95 + if (curr_val != image_base[y][x]) {
96 + LOGE("Mismatch! %d vs %d", curr_val, image_base[y][x]);
97 + return false;
98 + }
99 +
100 + if (GetRegionSum(x, y, x, y) != curr_val) {
101 + LOGE("Mismatch!");
102 + }
103 + }
104 + }
105 +
106 + return true;
107 + }
108 +
109 + // Returns the sum of all pixels in the specified region.
110 + inline uint32_t GetRegionSum(const int x1, const int y1, const int x2,
111 + const int y2) const {
112 + SCHECK(x1 >= 0 && y1 >= 0 &&
113 + x2 >= x1 && y2 >= y1 && x2 < GetWidth() && y2 < GetHeight(),
114 + "indices out of bounds! %d-%d / %d, %d-%d / %d, ",
115 + x1, x2, GetWidth(), y1, y2, GetHeight());
116 +
117 + const uint32_t everything = (*this)[y2][x2];
118 +
119 + uint32_t sum = everything;
120 + if (x1 > 0 && y1 > 0) {
121 + // Most common case.
122 + const uint32_t left = (*this)[y2][x1 - 1];
123 + const uint32_t top = (*this)[y1 - 1][x2];
124 + const uint32_t top_left = (*this)[y1 - 1][x1 - 1];
125 +
126 + sum = everything - left - top + top_left;
127 + SCHECK(sum >= 0, "Both: %d - %d - %d + %d => %d! indices: %d %d %d %d",
128 + everything, left, top, top_left, sum, x1, y1, x2, y2);
129 + } else if (x1 > 0) {
130 + // Flush against top of image.
131 + // Subtract out the region to the left only.
132 + const uint32_t top = (*this)[y2][x1 - 1];
133 + sum = everything - top;
134 + SCHECK(sum >= 0, "Top: %d - %d => %d!", everything, top, sum);
135 + } else if (y1 > 0) {
136 + // Flush against left side of image.
137 + // Subtract out the region above only.
138 + const uint32_t left = (*this)[y1 - 1][x2];
139 + sum = everything - left;
140 + SCHECK(sum >= 0, "Left: %d - %d => %d!", everything, left, sum);
141 + }
142 +
143 + SCHECK(sum >= 0, "Negative sum!");
144 +
145 + return sum;
146 + }
147 +
148 + // Returns the 2bit code associated with this region, which represents
149 + // the overall gradient.
150 + inline Code GetCode(const BoundingBox& bounding_box) const {
151 + return GetCode(bounding_box.left_, bounding_box.top_,
152 + bounding_box.right_, bounding_box.bottom_);
153 + }
154 +
155 + inline Code GetCode(const int x1, const int y1,
156 + const int x2, const int y2) const {
157 + SCHECK(x1 < x2 && y1 < y2, "Bounds out of order!! TL:%d,%d BR:%d,%d",
158 + x1, y1, x2, y2);
159 +
160 + // Gradient computed vertically.
161 + const int box_height = (y2 - y1) / 2;
162 + const int top_sum = GetRegionSum(x1, y1, x2, y1 + box_height);
163 + const int bottom_sum = GetRegionSum(x1, y2 - box_height, x2, y2);
164 + const bool vertical_code = top_sum > bottom_sum;
165 +
166 + // Gradient computed horizontally.
167 + const int box_width = (x2 - x1) / 2;
168 + const int left_sum = GetRegionSum(x1, y1, x1 + box_width, y2);
169 + const int right_sum = GetRegionSum(x2 - box_width, y1, x2, y2);
170 + const bool horizontal_code = left_sum > right_sum;
171 +
172 + const Code final_code = (vertical_code << 1) | horizontal_code;
173 +
174 + SCHECK(InRange(final_code, static_cast<Code>(0), static_cast<Code>(3)),
175 + "Invalid code! %d", final_code);
176 +
177 + // Returns a value 0-3.
178 + return final_code;
179 + }
180 +
181 + private:
182 + TF_DISALLOW_COPY_AND_ASSIGN(IntegralImage);
183 +};
184 +
185 +} // namespace tf_tracking
186 +
187 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_INTEGRAL_IMAGE_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_JNI_UTILS_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_JNI_UTILS_H_
18 +
19 +#include <jni.h>
20 +#include <stdint.h>
21 +
22 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
23 +
24 +// The JniLongField class is used to access Java fields from native code. This
25 +// technique of hiding pointers to native objects in opaque Java fields is how
26 +// the Android hardware libraries work. This reduces the amount of static
27 +// native methods and makes it easier to manage the lifetime of native objects.
28 +class JniLongField {
29 + public:
30 + JniLongField(const char* field_name)
31 + : field_name_(field_name), field_ID_(0) {}
32 +
33 + int64_t get(JNIEnv* env, jobject thiz) {
34 + if (field_ID_ == 0) {
35 + jclass cls = env->GetObjectClass(thiz);
36 + CHECK_ALWAYS(cls != 0, "Unable to find class");
37 + field_ID_ = env->GetFieldID(cls, field_name_, "J");
38 + CHECK_ALWAYS(field_ID_ != 0,
39 + "Unable to find field %s. (Check proguard cfg)", field_name_);
40 + }
41 +
42 + return env->GetLongField(thiz, field_ID_);
43 + }
44 +
45 + void set(JNIEnv* env, jobject thiz, int64_t value) {
46 + if (field_ID_ == 0) {
47 + jclass cls = env->GetObjectClass(thiz);
48 + CHECK_ALWAYS(cls != 0, "Unable to find class");
49 + field_ID_ = env->GetFieldID(cls, field_name_, "J");
50 + CHECK_ALWAYS(field_ID_ != 0,
51 + "Unable to find field %s (Check proguard cfg)", field_name_);
52 + }
53 +
54 + env->SetLongField(thiz, field_ID_, value);
55 + }
56 +
57 + private:
58 + const char* const field_name_;
59 +
60 + // This is just a cache
61 + jfieldID field_ID_;
62 +};
63 +
64 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_JNI_UTILS_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_KEYPOINT_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_KEYPOINT_H_
18 +
19 +#include "tensorflow/examples/android/jni/object_tracking/geom.h"
20 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
21 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
22 +#include "tensorflow/examples/android/jni/object_tracking/logging.h"
23 +#include "tensorflow/examples/android/jni/object_tracking/time_log.h"
24 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
25 +
26 +#include "tensorflow/examples/android/jni/object_tracking/config.h"
27 +
28 +namespace tf_tracking {
29 +
30 +// For keeping track of keypoints.
31 +struct Keypoint {
32 + Keypoint() : pos_(0.0f, 0.0f), score_(0.0f), type_(0) {}
33 + Keypoint(const float x, const float y)
34 + : pos_(x, y), score_(0.0f), type_(0) {}
35 +
36 + Point2f pos_;
37 + float score_;
38 + uint8_t type_;
39 +};
40 +
41 +inline std::ostream& operator<<(std::ostream& stream, const Keypoint keypoint) {
42 + return stream << "[" << keypoint.pos_ << ", "
43 + << keypoint.score_ << ", " << keypoint.type_ << "]";
44 +}
45 +
46 +} // namespace tf_tracking
47 +
48 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_KEYPOINT_H_
This diff is collapsed. Click to expand it.
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_KEYPOINT_DETECTOR_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_KEYPOINT_DETECTOR_H_
18 +
19 +#include <stdint.h>
20 +#include <vector>
21 +
22 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
23 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
24 +#include "tensorflow/examples/android/jni/object_tracking/image_data.h"
25 +#include "tensorflow/examples/android/jni/object_tracking/optical_flow.h"
26 +
27 +namespace tf_tracking {
28 +
29 +struct Keypoint;
30 +
31 +class KeypointDetector {
32 + public:
33 + explicit KeypointDetector(const KeypointDetectorConfig* const config)
34 + : config_(config),
35 + keypoint_scratch_(new Image<uint8_t>(config_->image_size)),
36 + interest_map_(new Image<bool>(config_->image_size)),
37 + fast_quadrant_(0) {
38 + interest_map_->Clear(false);
39 + }
40 +
41 + ~KeypointDetector() {}
42 +
43 + // Finds a new set of keypoints for the current frame, picked from the current
44 + // set of keypoints and also from a set discovered via a keypoint detector.
45 + // Special attention is applied to make sure that keypoints are distributed
46 + // within the supplied ROIs.
47 + void FindKeypoints(const ImageData& image_data,
48 + const std::vector<BoundingBox>& rois,
49 + const FramePair& prev_change,
50 + FramePair* const curr_change);
51 +
52 + private:
53 + // Compute the corneriness of a point in the image.
54 + float HarrisFilter(const Image<int32_t>& I_x, const Image<int32_t>& I_y,
55 + const float x, const float y) const;
56 +
57 + // Adds a grid of candidate keypoints to the given box, up to
58 + // max_num_keypoints or kNumToAddAsCandidates^2, whichever is lower.
59 + int AddExtraCandidatesForBoxes(
60 + const std::vector<BoundingBox>& boxes,
61 + const int max_num_keypoints,
62 + Keypoint* const keypoints) const;
63 +
64 + // Scan the frame for potential keypoints using the FAST keypoint detector.
65 + // Quadrant is an argument 0-3 which refers to the quadrant of the image in
66 + // which to detect keypoints.
67 + int FindFastKeypoints(const Image<uint8_t>& frame, const int quadrant,
68 + const int downsample_factor,
69 + const int max_num_keypoints, Keypoint* const keypoints);
70 +
71 + int FindFastKeypoints(const ImageData& image_data,
72 + const int max_num_keypoints,
73 + Keypoint* const keypoints);
74 +
75 + // Score a bunch of candidate keypoints. Assigns the scores to the input
76 + // candidate_keypoints array entries.
77 + void ScoreKeypoints(const ImageData& image_data,
78 + const int num_candidates,
79 + Keypoint* const candidate_keypoints);
80 +
81 + void SortKeypoints(const int num_candidates,
82 + Keypoint* const candidate_keypoints) const;
83 +
84 + // Selects a set of keypoints falling within the supplied box such that the
85 + // most highly rated keypoints are picked first, and so that none of them are
86 + // too close together.
87 + int SelectKeypointsInBox(
88 + const BoundingBox& box,
89 + const Keypoint* const candidate_keypoints,
90 + const int num_candidates,
91 + const int max_keypoints,
92 + const int num_existing_keypoints,
93 + const Keypoint* const existing_keypoints,
94 + Keypoint* const final_keypoints) const;
95 +
96 + // Selects from the supplied sorted keypoint pool a set of keypoints that will
97 + // best cover the given set of boxes, such that each box is covered at a
98 + // resolution proportional to its size.
99 + void SelectKeypoints(
100 + const std::vector<BoundingBox>& boxes,
101 + const Keypoint* const candidate_keypoints,
102 + const int num_candidates,
103 + FramePair* const frame_change) const;
104 +
105 + // Copies and compacts the found keypoints in the second frame of prev_change
106 + // into the array at new_keypoints.
107 + static int CopyKeypoints(const FramePair& prev_change,
108 + Keypoint* const new_keypoints);
109 +
110 + const KeypointDetectorConfig* const config_;
111 +
112 + // Scratch memory for keypoint candidacy detection and non-max suppression.
113 + std::unique_ptr<Image<uint8_t> > keypoint_scratch_;
114 +
115 + // Regions of the image to pay special attention to.
116 + std::unique_ptr<Image<bool> > interest_map_;
117 +
118 + // The current quadrant of the image to detect FAST keypoints in.
119 + // Keypoint detection is staggered for performance reasons. Every four frames
120 + // a full scan of the frame will have been performed.
121 + int fast_quadrant_;
122 +
123 + Keypoint tmp_keypoints_[kMaxTempKeypoints];
124 +};
125 +
126 +} // namespace tf_tracking
127 +
128 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_KEYPOINT_DETECTOR_H_
1 +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#include "tensorflow/examples/android/jni/object_tracking/logging.h"
17 +
18 +#ifdef STANDALONE_DEMO_LIB
19 +
20 +#include <android/log.h>
21 +#include <stdlib.h>
22 +#include <time.h>
23 +#include <iostream>
24 +#include <sstream>
25 +
26 +LogMessage::LogMessage(const char* fname, int line, int severity)
27 + : fname_(fname), line_(line), severity_(severity) {}
28 +
29 +void LogMessage::GenerateLogMessage() {
30 + int android_log_level;
31 + switch (severity_) {
32 + case INFO:
33 + android_log_level = ANDROID_LOG_INFO;
34 + break;
35 + case WARNING:
36 + android_log_level = ANDROID_LOG_WARN;
37 + break;
38 + case ERROR:
39 + android_log_level = ANDROID_LOG_ERROR;
40 + break;
41 + case FATAL:
42 + android_log_level = ANDROID_LOG_FATAL;
43 + break;
44 + default:
45 + if (severity_ < INFO) {
46 + android_log_level = ANDROID_LOG_VERBOSE;
47 + } else {
48 + android_log_level = ANDROID_LOG_ERROR;
49 + }
50 + break;
51 + }
52 +
53 + std::stringstream ss;
54 + const char* const partial_name = strrchr(fname_, '/');
55 + ss << (partial_name != nullptr ? partial_name + 1 : fname_) << ":" << line_
56 + << " " << str();
57 + __android_log_write(android_log_level, "native", ss.str().c_str());
58 +
59 + // Also log to stderr (for standalone Android apps).
60 + std::cerr << "native : " << ss.str() << std::endl;
61 +
62 + // Android logging at level FATAL does not terminate execution, so abort()
63 + // is still required to stop the program.
64 + if (severity_ == FATAL) {
65 + abort();
66 + }
67 +}
68 +
69 +namespace {
70 +
71 +// Parse log level (int64) from environment variable (char*)
72 +int64_t LogLevelStrToInt(const char* tf_env_var_val) {
73 + if (tf_env_var_val == nullptr) {
74 + return 0;
75 + }
76 +
77 + // Ideally we would use env_var / safe_strto64, but it is
78 + // hard to use here without pulling in a lot of dependencies,
79 + // so we use std:istringstream instead
80 + std::string min_log_level(tf_env_var_val);
81 + std::istringstream ss(min_log_level);
82 + int64_t level;
83 + if (!(ss >> level)) {
84 + // Invalid vlog level setting, set level to default (0)
85 + level = 0;
86 + }
87 +
88 + return level;
89 +}
90 +
91 +int64_t MinLogLevelFromEnv() {
92 + const char* tf_env_var_val = getenv("TF_CPP_MIN_LOG_LEVEL");
93 + return LogLevelStrToInt(tf_env_var_val);
94 +}
95 +
96 +int64_t MinVLogLevelFromEnv() {
97 + const char* tf_env_var_val = getenv("TF_CPP_MIN_VLOG_LEVEL");
98 + return LogLevelStrToInt(tf_env_var_val);
99 +}
100 +
101 +} // namespace
102 +
103 +LogMessage::~LogMessage() {
104 + // Read the min log level once during the first call to logging.
105 + static int64_t min_log_level = MinLogLevelFromEnv();
106 + if (TF_PREDICT_TRUE(severity_ >= min_log_level)) GenerateLogMessage();
107 +}
108 +
109 +int64_t LogMessage::MinVLogLevel() {
110 + static const int64_t min_vlog_level = MinVLogLevelFromEnv();
111 + return min_vlog_level;
112 +}
113 +
114 +LogMessageFatal::LogMessageFatal(const char* file, int line)
115 + : LogMessage(file, line, ANDROID_LOG_FATAL) {}
116 +LogMessageFatal::~LogMessageFatal() {
117 + // abort() ensures we don't return (we promised we would not via
118 + // ATTRIBUTE_NORETURN).
119 + GenerateLogMessage();
120 + abort();
121 +}
122 +
123 +void LogString(const char* fname, int line, int severity,
124 + const std::string& message) {
125 + LogMessage(fname, line, severity) << message;
126 +}
127 +
128 +void LogPrintF(const int severity, const char* format, ...) {
129 + char message[1024];
130 + va_list argptr;
131 + va_start(argptr, format);
132 + vsnprintf(message, 1024, format, argptr);
133 + va_end(argptr);
134 + __android_log_write(severity, "native", message);
135 +
136 + // Also log to stderr (for standalone Android apps).
137 + std::cerr << "native : " << message << std::endl;
138 +}
139 +
140 +#endif
1 +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_LOGGING_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_LOGGING_H_
18 +
19 +#include <android/log.h>
20 +#include <string.h>
21 +#include <ostream>
22 +#include <sstream>
23 +#include <string>
24 +
25 +// Allow this library to be built without depending on TensorFlow by
26 +// defining STANDALONE_DEMO_LIB. Otherwise TensorFlow headers will be
27 +// used.
28 +#ifdef STANDALONE_DEMO_LIB
29 +
30 +// A macro to disallow the copy constructor and operator= functions
31 +// This is usually placed in the private: declarations for a class.
32 +#define TF_DISALLOW_COPY_AND_ASSIGN(TypeName) \
33 + TypeName(const TypeName&) = delete; \
34 + void operator=(const TypeName&) = delete
35 +
36 +#if defined(COMPILER_GCC3)
37 +#define TF_PREDICT_FALSE(x) (__builtin_expect(x, 0))
38 +#define TF_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
39 +#else
40 +#define TF_PREDICT_FALSE(x) (x)
41 +#define TF_PREDICT_TRUE(x) (x)
42 +#endif
43 +
44 +// Log levels equivalent to those defined by
45 +// third_party/tensorflow/core/platform/logging.h
46 +const int INFO = 0; // base_logging::INFO;
47 +const int WARNING = 1; // base_logging::WARNING;
48 +const int ERROR = 2; // base_logging::ERROR;
49 +const int FATAL = 3; // base_logging::FATAL;
50 +const int NUM_SEVERITIES = 4; // base_logging::NUM_SEVERITIES;
51 +
52 +class LogMessage : public std::basic_ostringstream<char> {
53 + public:
54 + LogMessage(const char* fname, int line, int severity);
55 + ~LogMessage();
56 +
57 + // Returns the minimum log level for VLOG statements.
58 + // E.g., if MinVLogLevel() is 2, then VLOG(2) statements will produce output,
59 + // but VLOG(3) will not. Defaults to 0.
60 + static int64_t MinVLogLevel();
61 +
62 + protected:
63 + void GenerateLogMessage();
64 +
65 + private:
66 + const char* fname_;
67 + int line_;
68 + int severity_;
69 +};
70 +
71 +// LogMessageFatal ensures the process will exit in failure after
72 +// logging this message.
73 +class LogMessageFatal : public LogMessage {
74 + public:
75 + LogMessageFatal(const char* file, int line);
76 + ~LogMessageFatal();
77 +};
78 +
79 +#define _TF_LOG_INFO \
80 + ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::INFO)
81 +#define _TF_LOG_WARNING \
82 + ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::WARNING)
83 +#define _TF_LOG_ERROR \
84 + ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::ERROR)
85 +#define _TF_LOG_FATAL \
86 + ::tensorflow::internal::LogMessageFatal(__FILE__, __LINE__)
87 +
88 +#define _TF_LOG_QFATAL _TF_LOG_FATAL
89 +
90 +#define LOG(severity) _TF_LOG_##severity
91 +
92 +#define VLOG_IS_ON(lvl) ((lvl) <= LogMessage::MinVLogLevel())
93 +
94 +#define VLOG(lvl) \
95 + if (TF_PREDICT_FALSE(VLOG_IS_ON(lvl))) \
96 + LogMessage(__FILE__, __LINE__, ANDROID_LOG_INFO)
97 +
98 +void LogPrintF(const int severity, const char* format, ...);
99 +
100 +// Support for printf style logging.
101 +#define LOGV(...)
102 +#define LOGD(...)
103 +#define LOGI(...) LogPrintF(ANDROID_LOG_INFO, __VA_ARGS__);
104 +#define LOGW(...) LogPrintF(ANDROID_LOG_INFO, __VA_ARGS__);
105 +#define LOGE(...) LogPrintF(ANDROID_LOG_ERROR, __VA_ARGS__);
106 +
107 +#else
108 +
109 +#include "tensorflow/core/lib/strings/stringprintf.h"
110 +#include "tensorflow/core/platform/logging.h"
111 +
112 +// Support for printf style logging.
113 +#define LOGV(...)
114 +#define LOGD(...)
115 +#define LOGI(...) LOG(INFO) << tensorflow::strings::Printf(__VA_ARGS__);
116 +#define LOGW(...) LOG(INFO) << tensorflow::strings::Printf(__VA_ARGS__);
117 +#define LOGE(...) LOG(INFO) << tensorflow::strings::Printf(__VA_ARGS__);
118 +
119 +#endif
120 +
121 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_LOGGING_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// NOTE: no native object detectors are currently provided or used by the code
17 +// in this directory. This class remains mainly for historical reasons.
18 +// Detection in the TF demo is done through TensorFlowMultiBoxDetector.java.
19 +
20 +#include "tensorflow/examples/android/jni/object_tracking/object_detector.h"
21 +
22 +namespace tf_tracking {
23 +
24 +// This is here so that the vtable gets created properly.
25 +ObjectDetectorBase::~ObjectDetectorBase() {}
26 +
27 +} // namespace tf_tracking
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// NOTE: no native object detectors are currently provided or used by the code
17 +// in this directory. This class remains mainly for historical reasons.
18 +// Detection in the TF demo is done through TensorFlowMultiBoxDetector.java.
19 +
20 +// Defines the ObjectDetector class that is the main interface for detecting
21 +// ObjectModelBases in frames.
22 +
23 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OBJECT_DETECTOR_H_
24 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OBJECT_DETECTOR_H_
25 +
26 +#include <float.h>
27 +#include <map>
28 +#include <memory>
29 +#include <sstream>
30 +#include <string>
31 +#include <vector>
32 +
33 +#include "tensorflow/examples/android/jni/object_tracking/geom.h"
34 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
35 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
36 +#include "tensorflow/examples/android/jni/object_tracking/integral_image.h"
37 +#ifdef __RENDER_OPENGL__
38 +#include "tensorflow/examples/android/jni/object_tracking/sprite.h"
39 +#endif
40 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
41 +
42 +#include "tensorflow/examples/android/jni/object_tracking/config.h"
43 +#include "tensorflow/examples/android/jni/object_tracking/image_data.h"
44 +#include "tensorflow/examples/android/jni/object_tracking/object_model.h"
45 +
46 +namespace tf_tracking {
47 +
48 +// Adds BoundingSquares to a vector such that the first square added is centered
49 +// in the position given and of square_size, and the remaining squares are added
50 +// concentrentically, scaling down by scale_factor until the minimum threshold
51 +// size is passed.
52 +// Squares that do not fall completely within image_bounds will not be added.
53 +static inline void FillWithSquares(
54 + const BoundingBox& image_bounds,
55 + const BoundingBox& position,
56 + const float starting_square_size,
57 + const float smallest_square_size,
58 + const float scale_factor,
59 + std::vector<BoundingSquare>* const squares) {
60 + BoundingSquare descriptor_area =
61 + GetCenteredSquare(position, starting_square_size);
62 +
63 + SCHECK(scale_factor < 1.0f, "Scale factor too large at %.2f!", scale_factor);
64 +
65 + // Use a do/while loop to ensure that at least one descriptor is created.
66 + do {
67 + if (image_bounds.Contains(descriptor_area.ToBoundingBox())) {
68 + squares->push_back(descriptor_area);
69 + }
70 + descriptor_area.Scale(scale_factor);
71 + } while (descriptor_area.size_ >= smallest_square_size - EPSILON);
72 + LOGV("Created %zu squares starting from size %.2f to min size %.2f "
73 + "using scale factor: %.2f",
74 + squares->size(), starting_square_size, smallest_square_size,
75 + scale_factor);
76 +}
77 +
78 +
79 +// Represents a potential detection of a specific ObjectExemplar and Descriptor
80 +// at a specific position in the image.
81 +class Detection {
82 + public:
83 + explicit Detection(const ObjectModelBase* const object_model,
84 + const MatchScore match_score,
85 + const BoundingBox& bounding_box)
86 + : object_model_(object_model),
87 + match_score_(match_score),
88 + bounding_box_(bounding_box) {}
89 +
90 + Detection(const Detection& other)
91 + : object_model_(other.object_model_),
92 + match_score_(other.match_score_),
93 + bounding_box_(other.bounding_box_) {}
94 +
95 + virtual ~Detection() {}
96 +
97 + inline BoundingBox GetObjectBoundingBox() const {
98 + return bounding_box_;
99 + }
100 +
101 + inline MatchScore GetMatchScore() const {
102 + return match_score_;
103 + }
104 +
105 + inline const ObjectModelBase* GetObjectModel() const {
106 + return object_model_;
107 + }
108 +
109 + inline bool Intersects(const Detection& other) {
110 + // Check if any of the four axes separates us, there must be at least one.
111 + return bounding_box_.Intersects(other.bounding_box_);
112 + }
113 +
114 + struct Comp {
115 + inline bool operator()(const Detection& a, const Detection& b) const {
116 + return a.match_score_ > b.match_score_;
117 + }
118 + };
119 +
120 + // TODO(andrewharp): add accessors to update these instead.
121 + const ObjectModelBase* object_model_;
122 + MatchScore match_score_;
123 + BoundingBox bounding_box_;
124 +};
125 +
126 +inline std::ostream& operator<<(std::ostream& stream,
127 + const Detection& detection) {
128 + const BoundingBox actual_area = detection.GetObjectBoundingBox();
129 + stream << actual_area;
130 + return stream;
131 +}
132 +
133 +class ObjectDetectorBase {
134 + public:
135 + explicit ObjectDetectorBase(const ObjectDetectorConfig* const config)
136 + : config_(config),
137 + image_data_(NULL) {}
138 +
139 + virtual ~ObjectDetectorBase();
140 +
141 + // Sets the current image data. All calls to ObjectDetector other than
142 + // FillDescriptors use the image data last set.
143 + inline void SetImageData(const ImageData* const image_data) {
144 + image_data_ = image_data;
145 + }
146 +
147 + // Main entry point into the detection algorithm.
148 + // Scans the frame for candidates, tweaks them, and fills in the
149 + // given std::vector of Detection objects with acceptable matches.
150 + virtual void Detect(const std::vector<BoundingSquare>& positions,
151 + std::vector<Detection>* const detections) const = 0;
152 +
153 + virtual ObjectModelBase* CreateObjectModel(const std::string& name) = 0;
154 +
155 + virtual void DeleteObjectModel(const std::string& name) = 0;
156 +
157 + virtual void GetObjectModels(
158 + std::vector<const ObjectModelBase*>* models) const = 0;
159 +
160 + // Creates a new ObjectExemplar from the given position in the context of
161 + // the last frame passed to NextFrame.
162 + // Will return null in the case that there's no room for a descriptor to be
163 + // created in the example area, or the example area is not completely
164 + // contained within the frame.
165 + virtual void UpdateModel(const Image<uint8_t>& base_image,
166 + const IntegralImage& integral_image,
167 + const BoundingBox& bounding_box, const bool locked,
168 + ObjectModelBase* model) const = 0;
169 +
170 + virtual void Draw() const = 0;
171 +
172 + virtual bool AllowSpontaneousDetections() = 0;
173 +
174 + protected:
175 + const std::unique_ptr<const ObjectDetectorConfig> config_;
176 +
177 + // The latest frame data, upon which all detections will be performed.
178 + // Not owned by this object, just provided for reference by ObjectTracker
179 + // via SetImageData().
180 + const ImageData* image_data_;
181 +
182 + private:
183 + TF_DISALLOW_COPY_AND_ASSIGN(ObjectDetectorBase);
184 +};
185 +
186 +template <typename ModelType>
187 +class ObjectDetector : public ObjectDetectorBase {
188 + public:
189 + explicit ObjectDetector(const ObjectDetectorConfig* const config)
190 + : ObjectDetectorBase(config) {}
191 +
192 + virtual ~ObjectDetector() {
193 + typename std::map<std::string, ModelType*>::const_iterator it =
194 + object_models_.begin();
195 + for (; it != object_models_.end(); ++it) {
196 + ModelType* model = it->second;
197 + delete model;
198 + }
199 + }
200 +
201 + virtual void DeleteObjectModel(const std::string& name) {
202 + ModelType* model = object_models_[name];
203 + CHECK_ALWAYS(model != NULL, "Model was null!");
204 + object_models_.erase(name);
205 + SAFE_DELETE(model);
206 + }
207 +
208 + virtual void GetObjectModels(
209 + std::vector<const ObjectModelBase*>* models) const {
210 + typename std::map<std::string, ModelType*>::const_iterator it =
211 + object_models_.begin();
212 + for (; it != object_models_.end(); ++it) {
213 + models->push_back(it->second);
214 + }
215 + }
216 +
217 + virtual bool AllowSpontaneousDetections() {
218 + return false;
219 + }
220 +
221 + protected:
222 + std::map<std::string, ModelType*> object_models_;
223 +
224 + private:
225 + TF_DISALLOW_COPY_AND_ASSIGN(ObjectDetector);
226 +};
227 +
228 +} // namespace tf_tracking
229 +
230 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OBJECT_DETECTOR_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// NOTE: no native object detectors are currently provided or used by the code
17 +// in this directory. This class remains mainly for historical reasons.
18 +// Detection in the TF demo is done through TensorFlowMultiBoxDetector.java.
19 +
20 +// Contains ObjectModelBase declaration.
21 +
22 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OBJECT_MODEL_H_
23 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OBJECT_MODEL_H_
24 +
25 +#ifdef __RENDER_OPENGL__
26 +#include <GLES/gl.h>
27 +#include <GLES/glext.h>
28 +#endif
29 +
30 +#include <vector>
31 +
32 +#include "tensorflow/examples/android/jni/object_tracking/geom.h"
33 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
34 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
35 +#include "tensorflow/examples/android/jni/object_tracking/integral_image.h"
36 +#ifdef __RENDER_OPENGL__
37 +#include "tensorflow/examples/android/jni/object_tracking/sprite.h"
38 +#endif
39 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
40 +
41 +#include "tensorflow/examples/android/jni/object_tracking/config.h"
42 +#include "tensorflow/examples/android/jni/object_tracking/image_data.h"
43 +#include "tensorflow/examples/android/jni/object_tracking/keypoint.h"
44 +
45 +namespace tf_tracking {
46 +
47 +// The ObjectModelBase class represents all the known appearance information for
48 +// an object. It is not a specific instance of the object in the world,
49 +// but just the general appearance information that enables detection. An
50 +// ObjectModelBase can be reused across multiple-instances of TrackedObjects.
51 +class ObjectModelBase {
52 + public:
53 + ObjectModelBase(const std::string& name) : name_(name) {}
54 +
55 + virtual ~ObjectModelBase() {}
56 +
57 + // Called when the next step in an ongoing track occurs.
58 + virtual void TrackStep(const BoundingBox& position,
59 + const Image<uint8_t>& image,
60 + const IntegralImage& integral_image,
61 + const bool authoritative) {}
62 +
63 + // Called when an object track is lost.
64 + virtual void TrackLost() {}
65 +
66 + // Called when an object track is confirmed as legitimate.
67 + virtual void TrackConfirmed() {}
68 +
69 + virtual float GetMaxCorrelation(const Image<float>& patch_image) const = 0;
70 +
71 + virtual MatchScore GetMatchScore(
72 + const BoundingBox& position, const ImageData& image_data) const = 0;
73 +
74 + virtual void Draw(float* const depth) const = 0;
75 +
76 + inline const std::string& GetName() const {
77 + return name_;
78 + }
79 +
80 + protected:
81 + const std::string name_;
82 +
83 + private:
84 + TF_DISALLOW_COPY_AND_ASSIGN(ObjectModelBase);
85 +};
86 +
87 +template <typename DetectorType>
88 +class ObjectModel : public ObjectModelBase {
89 + public:
90 + ObjectModel<DetectorType>(const DetectorType* const detector,
91 + const std::string& name)
92 + : ObjectModelBase(name), detector_(detector) {}
93 +
94 + protected:
95 + const DetectorType* const detector_;
96 +
97 + TF_DISALLOW_COPY_AND_ASSIGN(ObjectModel<DetectorType>);
98 +};
99 +
100 +} // namespace tf_tracking
101 +
102 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OBJECT_MODEL_H_
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OPTICAL_FLOW_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OPTICAL_FLOW_H_
18 +
19 +#include "tensorflow/examples/android/jni/object_tracking/geom.h"
20 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
21 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
22 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
23 +
24 +#include "tensorflow/examples/android/jni/object_tracking/config.h"
25 +#include "tensorflow/examples/android/jni/object_tracking/frame_pair.h"
26 +#include "tensorflow/examples/android/jni/object_tracking/image_data.h"
27 +#include "tensorflow/examples/android/jni/object_tracking/keypoint.h"
28 +
29 +namespace tf_tracking {
30 +
31 +class FlowCache;
32 +
33 +// Class encapsulating all the data and logic necessary for performing optical
34 +// flow.
35 +class OpticalFlow {
36 + public:
37 + explicit OpticalFlow(const OpticalFlowConfig* const config);
38 +
39 + // Add a new frame to the optical flow. Will update all the non-keypoint
40 + // related member variables.
41 + //
42 + // new_frame should be a buffer of grayscale values, one byte per pixel,
43 + // at the original frame_width and frame_height used to initialize the
44 + // OpticalFlow object. Downsampling will be handled internally.
45 + //
46 + // time_stamp should be a time in milliseconds that later calls to this and
47 + // other methods will be relative to.
48 + void NextFrame(const ImageData* const image_data);
49 +
50 + // An implementation of the Lucas-Kanade Optical Flow algorithm.
51 + static bool FindFlowAtPoint_LK(const Image<uint8_t>& img_I,
52 + const Image<uint8_t>& img_J,
53 + const Image<int32_t>& I_x,
54 + const Image<int32_t>& I_y, const float p_x,
55 + const float p_y, float* out_g_x,
56 + float* out_g_y);
57 +
58 + // Pointwise flow using translational 2dof ESM.
59 + static bool FindFlowAtPoint_ESM(
60 + const Image<uint8_t>& img_I, const Image<uint8_t>& img_J,
61 + const Image<int32_t>& I_x, const Image<int32_t>& I_y,
62 + const Image<int32_t>& J_x, const Image<int32_t>& J_y, const float p_x,
63 + const float p_y, float* out_g_x, float* out_g_y);
64 +
65 + // Finds the flow using a specific level, in either direction.
66 + // If reversed, the coordinates are in the context of the latest
67 + // frame, not the frame before it.
68 + // All coordinates used in parameters are global, not scaled.
69 + bool FindFlowAtPointReversible(
70 + const int level, const float u_x, const float u_y,
71 + const bool reverse_flow,
72 + float* final_x, float* final_y) const;
73 +
74 + // Finds the flow using a specific level, filterable by forward-backward
75 + // error. All coordinates used in parameters are global, not scaled.
76 + bool FindFlowAtPointSingleLevel(const int level,
77 + const float u_x, const float u_y,
78 + const bool filter_by_fb_error,
79 + float* flow_x, float* flow_y) const;
80 +
81 + // Pyramidal optical-flow using all levels.
82 + bool FindFlowAtPointPyramidal(const float u_x, const float u_y,
83 + const bool filter_by_fb_error,
84 + float* flow_x, float* flow_y) const;
85 +
86 + private:
87 + const OpticalFlowConfig* const config_;
88 +
89 + const ImageData* frame1_;
90 + const ImageData* frame2_;
91 +
92 + // Size of the internally allocated images (after original is downsampled).
93 + const Size working_size_;
94 +
95 + TF_DISALLOW_COPY_AND_ASSIGN(OpticalFlow);
96 +};
97 +
98 +} // namespace tf_tracking
99 +
100 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_OPTICAL_FLOW_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_SPRITE_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_SPRITE_H_
18 +
19 +#ifdef __RENDER_OPENGL__
20 +
21 +#include <GLES/gl.h>
22 +#include <GLES/glext.h>
23 +
24 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
25 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
26 +
27 +namespace tf_tracking {
28 +
29 +// This class encapsulates the logic necessary to load an render image data
30 +// at the same aspect ratio as the original source.
31 +class Sprite {
32 + public:
33 + // Only create Sprites when you have an OpenGl context.
34 + explicit Sprite(const Image<uint8_t>& image) { LoadTexture(image, NULL); }
35 +
36 + Sprite(const Image<uint8_t>& image, const BoundingBox* const area) {
37 + LoadTexture(image, area);
38 + }
39 +
40 + // Also, try to only delete a Sprite when holding an OpenGl context.
41 + ~Sprite() {
42 + glDeleteTextures(1, &texture_);
43 + }
44 +
45 + inline int GetWidth() const {
46 + return actual_width_;
47 + }
48 +
49 + inline int GetHeight() const {
50 + return actual_height_;
51 + }
52 +
53 + // Draw the sprite at 0,0 - original width/height in the current reference
54 + // frame. Any transformations desired must be applied before calling this
55 + // function.
56 + void Draw() const {
57 + const float float_width = static_cast<float>(actual_width_);
58 + const float float_height = static_cast<float>(actual_height_);
59 +
60 + // Where it gets rendered to.
61 + const float vertices[] = { 0.0f, 0.0f, 0.0f,
62 + 0.0f, float_height, 0.0f,
63 + float_width, 0.0f, 0.0f,
64 + float_width, float_height, 0.0f,
65 + };
66 +
67 + // The coordinates the texture gets drawn from.
68 + const float max_x = float_width / texture_width_;
69 + const float max_y = float_height / texture_height_;
70 + const float textureVertices[] = {
71 + 0, 0,
72 + 0, max_y,
73 + max_x, 0,
74 + max_x, max_y,
75 + };
76 +
77 + glEnable(GL_TEXTURE_2D);
78 + glBindTexture(GL_TEXTURE_2D, texture_);
79 +
80 + glEnableClientState(GL_VERTEX_ARRAY);
81 + glEnableClientState(GL_TEXTURE_COORD_ARRAY);
82 +
83 + glVertexPointer(3, GL_FLOAT, 0, vertices);
84 + glTexCoordPointer(2, GL_FLOAT, 0, textureVertices);
85 +
86 + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
87 +
88 + glDisableClientState(GL_VERTEX_ARRAY);
89 + glDisableClientState(GL_TEXTURE_COORD_ARRAY);
90 + }
91 +
92 + private:
93 + inline int GetNextPowerOfTwo(const int number) const {
94 + int power_of_two = 1;
95 + while (power_of_two < number) {
96 + power_of_two *= 2;
97 + }
98 + return power_of_two;
99 + }
100 +
101 + // TODO(andrewharp): Allow sprites to have their textures reloaded.
102 + void LoadTexture(const Image<uint8_t>& texture_source,
103 + const BoundingBox* const area) {
104 + glEnable(GL_TEXTURE_2D);
105 +
106 + glGenTextures(1, &texture_);
107 +
108 + glBindTexture(GL_TEXTURE_2D, texture_);
109 +
110 + int left = 0;
111 + int top = 0;
112 +
113 + if (area != NULL) {
114 + // If a sub-region was provided to pull the texture from, use that.
115 + left = area->left_;
116 + top = area->top_;
117 + actual_width_ = area->GetWidth();
118 + actual_height_ = area->GetHeight();
119 + } else {
120 + actual_width_ = texture_source.GetWidth();
121 + actual_height_ = texture_source.GetHeight();
122 + }
123 +
124 + // The textures must be a power of two, so find the sizes that are large
125 + // enough to contain the image data.
126 + texture_width_ = GetNextPowerOfTwo(actual_width_);
127 + texture_height_ = GetNextPowerOfTwo(actual_height_);
128 +
129 + bool allocated_data = false;
130 + uint8_t* texture_data;
131 +
132 + // Except in the lucky case where we're not using a sub-region of the
133 + // original image AND the source data has dimensions that are power of two,
134 + // care must be taken to copy data at the appropriate source and destination
135 + // strides so that the final block can be copied directly into texture
136 + // memory.
137 + // TODO(andrewharp): Figure out if data can be pulled directly from the
138 + // source image with some alignment modifications.
139 + if (left != 0 || top != 0 ||
140 + actual_width_ != texture_source.GetWidth() ||
141 + actual_height_ != texture_source.GetHeight()) {
142 + texture_data = new uint8_t[actual_width_ * actual_height_];
143 +
144 + for (int y = 0; y < actual_height_; ++y) {
145 + memcpy(texture_data + actual_width_ * y, texture_source[top + y] + left,
146 + actual_width_ * sizeof(uint8_t));
147 + }
148 + allocated_data = true;
149 + } else {
150 + // Cast away const-ness because for some reason glTexSubImage2D wants
151 + // a non-const data pointer.
152 + texture_data = const_cast<uint8_t*>(texture_source.data());
153 + }
154 +
155 + glTexImage2D(GL_TEXTURE_2D,
156 + 0,
157 + GL_LUMINANCE,
158 + texture_width_,
159 + texture_height_,
160 + 0,
161 + GL_LUMINANCE,
162 + GL_UNSIGNED_BYTE,
163 + NULL);
164 +
165 + glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
166 + glTexSubImage2D(GL_TEXTURE_2D,
167 + 0,
168 + 0,
169 + 0,
170 + actual_width_,
171 + actual_height_,
172 + GL_LUMINANCE,
173 + GL_UNSIGNED_BYTE,
174 + texture_data);
175 +
176 + if (allocated_data) {
177 + delete(texture_data);
178 + }
179 +
180 + glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
181 + }
182 +
183 + // The id for the texture on the GPU.
184 + GLuint texture_;
185 +
186 + // The width and height to be used for display purposes, referring to the
187 + // dimensions of the original texture.
188 + int actual_width_;
189 + int actual_height_;
190 +
191 + // The allocated dimensions of the texture data, which must be powers of 2.
192 + int texture_width_;
193 + int texture_height_;
194 +
195 + TF_DISALLOW_COPY_AND_ASSIGN(Sprite);
196 +};
197 +
198 +} // namespace tf_tracking
199 +
200 +#endif // __RENDER_OPENGL__
201 +
202 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_SPRITE_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#include "tensorflow/examples/android/jni/object_tracking/time_log.h"
17 +
18 +#ifdef LOG_TIME
19 +// Storage for logging functionality.
20 +int num_time_logs = 0;
21 +LogEntry time_logs[NUM_LOGS];
22 +
23 +int num_avg_entries = 0;
24 +AverageEntry avg_entries[NUM_LOGS];
25 +#endif
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// Utility functions for performance profiling.
17 +
18 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_TIME_LOG_H_
19 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_TIME_LOG_H_
20 +
21 +#include <stdint.h>
22 +
23 +#include "tensorflow/examples/android/jni/object_tracking/logging.h"
24 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
25 +
26 +#ifdef LOG_TIME
27 +
28 +// Blend constant for running average.
29 +#define ALPHA 0.98f
30 +#define NUM_LOGS 100
31 +
32 +struct LogEntry {
33 + const char* id;
34 + int64_t time_stamp;
35 +};
36 +
37 +struct AverageEntry {
38 + const char* id;
39 + float average_duration;
40 +};
41 +
42 +// Storage for keeping track of this frame's values.
43 +extern int num_time_logs;
44 +extern LogEntry time_logs[NUM_LOGS];
45 +
46 +// Storage for keeping track of average values (each entry may not be printed
47 +// out each frame).
48 +extern AverageEntry avg_entries[NUM_LOGS];
49 +extern int num_avg_entries;
50 +
51 +// Call this at the start of a logging phase.
52 +inline static void ResetTimeLog() {
53 + num_time_logs = 0;
54 +}
55 +
56 +
57 +// Log a message to be printed out when printTimeLog is called, along with the
58 +// amount of time in ms that has passed since the last call to this function.
59 +inline static void TimeLog(const char* const str) {
60 + LOGV("%s", str);
61 + if (num_time_logs >= NUM_LOGS) {
62 + LOGE("Out of log entries!");
63 + return;
64 + }
65 +
66 + time_logs[num_time_logs].id = str;
67 + time_logs[num_time_logs].time_stamp = CurrentThreadTimeNanos();
68 + ++num_time_logs;
69 +}
70 +
71 +
72 +inline static float Blend(float old_val, float new_val) {
73 + return ALPHA * old_val + (1.0f - ALPHA) * new_val;
74 +}
75 +
76 +
77 +inline static float UpdateAverage(const char* str, const float new_val) {
78 + for (int entry_num = 0; entry_num < num_avg_entries; ++entry_num) {
79 + AverageEntry* const entry = avg_entries + entry_num;
80 + if (str == entry->id) {
81 + entry->average_duration = Blend(entry->average_duration, new_val);
82 + return entry->average_duration;
83 + }
84 + }
85 +
86 + if (num_avg_entries >= NUM_LOGS) {
87 + LOGE("Too many log entries!");
88 + }
89 +
90 + // If it wasn't there already, add it.
91 + avg_entries[num_avg_entries].id = str;
92 + avg_entries[num_avg_entries].average_duration = new_val;
93 + ++num_avg_entries;
94 +
95 + return new_val;
96 +}
97 +
98 +
99 +// Prints out all the timeLog statements in chronological order with the
100 +// interval that passed between subsequent statements. The total time between
101 +// the first and last statements is printed last.
102 +inline static void PrintTimeLog() {
103 + LogEntry* last_time = time_logs;
104 +
105 + float average_running_total = 0.0f;
106 +
107 + for (int i = 0; i < num_time_logs; ++i) {
108 + LogEntry* const this_time = time_logs + i;
109 +
110 + const float curr_time =
111 + (this_time->time_stamp - last_time->time_stamp) / 1000000.0f;
112 +
113 + const float avg_time = UpdateAverage(this_time->id, curr_time);
114 + average_running_total += avg_time;
115 +
116 + LOGD("%32s: %6.3fms %6.4fms", this_time->id, curr_time, avg_time);
117 + last_time = this_time;
118 + }
119 +
120 + const float total_time =
121 + (last_time->time_stamp - time_logs->time_stamp) / 1000000.0f;
122 +
123 + LOGD("TOTAL TIME: %6.3fms %6.4fms\n",
124 + total_time, average_running_total);
125 + LOGD(" ");
126 +}
127 +#else
128 +inline static void ResetTimeLog() {}
129 +
130 +inline static void TimeLog(const char* const str) {
131 + LOGV("%s", str);
132 +}
133 +
134 +inline static void PrintTimeLog() {}
135 +#endif
136 +
137 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_TIME_LOG_H_
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#include "tensorflow/examples/android/jni/object_tracking/tracked_object.h"
17 +
18 +namespace tf_tracking {
19 +
20 +static const float kInitialDistance = 20.0f;
21 +
22 +static void InitNormalized(const Image<uint8_t>& src_image,
23 + const BoundingBox& position,
24 + Image<float>* const dst_image) {
25 + BoundingBox scaled_box(position);
26 + CopyArea(src_image, scaled_box, dst_image);
27 + NormalizeImage(dst_image);
28 +}
29 +
30 +TrackedObject::TrackedObject(const std::string& id, const Image<uint8_t>& image,
31 + const BoundingBox& bounding_box,
32 + ObjectModelBase* const model)
33 + : id_(id),
34 + last_known_position_(bounding_box),
35 + last_detection_position_(bounding_box),
36 + position_last_computed_time_(-1),
37 + object_model_(model),
38 + last_detection_thumbnail_(kNormalizedThumbnailSize,
39 + kNormalizedThumbnailSize),
40 + last_frame_thumbnail_(kNormalizedThumbnailSize, kNormalizedThumbnailSize),
41 + tracked_correlation_(0.0f),
42 + tracked_match_score_(0.0),
43 + num_consecutive_frames_below_threshold_(0),
44 + allowable_detection_distance_(Square(kInitialDistance)) {
45 + InitNormalized(image, bounding_box, &last_detection_thumbnail_);
46 +}
47 +
48 +TrackedObject::~TrackedObject() {}
49 +
50 +void TrackedObject::UpdatePosition(const BoundingBox& new_position,
51 + const int64_t timestamp,
52 + const ImageData& image_data,
53 + const bool authoritative) {
54 + last_known_position_ = new_position;
55 + position_last_computed_time_ = timestamp;
56 +
57 + InitNormalized(*image_data.GetImage(), new_position, &last_frame_thumbnail_);
58 +
59 + const float last_localization_correlation = ComputeCrossCorrelation(
60 + last_detection_thumbnail_.data(),
61 + last_frame_thumbnail_.data(),
62 + last_frame_thumbnail_.data_size_);
63 + LOGV("Tracked correlation to last localization: %.6f",
64 + last_localization_correlation);
65 +
66 + // Correlation to object model, if it exists.
67 + if (object_model_ != NULL) {
68 + tracked_correlation_ =
69 + object_model_->GetMaxCorrelation(last_frame_thumbnail_);
70 + LOGV("Tracked correlation to model: %.6f",
71 + tracked_correlation_);
72 +
73 + tracked_match_score_ =
74 + object_model_->GetMatchScore(new_position, image_data);
75 + LOGV("Tracked match score with model: %.6f",
76 + tracked_match_score_.value);
77 + } else {
78 + // If there's no model to check against, set the tracked correlation to
79 + // simply be the correlation to the last set position.
80 + tracked_correlation_ = last_localization_correlation;
81 + tracked_match_score_ = MatchScore(0.0f);
82 + }
83 +
84 + // Determine if it's still being tracked.
85 + if (tracked_correlation_ >= kMinimumCorrelationForTracking &&
86 + tracked_match_score_ >= kMinimumMatchScore) {
87 + num_consecutive_frames_below_threshold_ = 0;
88 +
89 + if (object_model_ != NULL) {
90 + object_model_->TrackStep(last_known_position_, *image_data.GetImage(),
91 + *image_data.GetIntegralImage(), authoritative);
92 + }
93 + } else if (tracked_match_score_ < kMatchScoreForImmediateTermination) {
94 + if (num_consecutive_frames_below_threshold_ < 1000) {
95 + LOGD("Tracked match score is way too low (%.6f), aborting track.",
96 + tracked_match_score_.value);
97 + }
98 +
99 + // Add an absurd amount of missed frames so that all heuristics will
100 + // consider it a lost track.
101 + num_consecutive_frames_below_threshold_ += 1000;
102 +
103 + if (object_model_ != NULL) {
104 + object_model_->TrackLost();
105 + }
106 + } else {
107 + ++num_consecutive_frames_below_threshold_;
108 + allowable_detection_distance_ *= 1.1f;
109 + }
110 +}
111 +
112 +void TrackedObject::OnDetection(ObjectModelBase* const model,
113 + const BoundingBox& detection_position,
114 + const MatchScore match_score,
115 + const int64_t timestamp,
116 + const ImageData& image_data) {
117 + const float overlap = detection_position.PascalScore(last_known_position_);
118 + if (overlap > kPositionOverlapThreshold) {
119 + // If the position agreement with the current tracked position is good
120 + // enough, lock all the current unlocked examples.
121 + object_model_->TrackConfirmed();
122 + num_consecutive_frames_below_threshold_ = 0;
123 + }
124 +
125 + // Before relocalizing, make sure the new proposed position is better than
126 + // the existing position by a small amount to prevent thrashing.
127 + if (match_score <= tracked_match_score_ + kMatchScoreBuffer) {
128 + LOGI("Not relocalizing since new match is worse: %.6f < %.6f + %.6f",
129 + match_score.value, tracked_match_score_.value,
130 + kMatchScoreBuffer.value);
131 + return;
132 + }
133 +
134 + LOGI("Relocalizing! From (%.1f, %.1f)[%.1fx%.1f] to "
135 + "(%.1f, %.1f)[%.1fx%.1f]: %.6f > %.6f",
136 + last_known_position_.left_, last_known_position_.top_,
137 + last_known_position_.GetWidth(), last_known_position_.GetHeight(),
138 + detection_position.left_, detection_position.top_,
139 + detection_position.GetWidth(), detection_position.GetHeight(),
140 + match_score.value, tracked_match_score_.value);
141 +
142 + if (overlap < kPositionOverlapThreshold) {
143 + // The path might be good, it might be bad, but it's no longer a path
144 + // since we're moving the box to a new position, so just nuke it from
145 + // orbit to be safe.
146 + object_model_->TrackLost();
147 + }
148 +
149 + object_model_ = model;
150 +
151 + // Reset the last detected appearance.
152 + InitNormalized(
153 + *image_data.GetImage(), detection_position, &last_detection_thumbnail_);
154 +
155 + num_consecutive_frames_below_threshold_ = 0;
156 + last_detection_position_ = detection_position;
157 +
158 + UpdatePosition(detection_position, timestamp, image_data, false);
159 + allowable_detection_distance_ = Square(kInitialDistance);
160 +}
161 +
162 +} // namespace tf_tracking
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_TRACKED_OBJECT_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_TRACKED_OBJECT_H_
18 +
19 +#ifdef __RENDER_OPENGL__
20 +#include "tensorflow/examples/android/jni/object_tracking/gl_utils.h"
21 +#endif
22 +#include "tensorflow/examples/android/jni/object_tracking/object_detector.h"
23 +
24 +namespace tf_tracking {
25 +
26 +// A TrackedObject is a specific instance of an ObjectModel, with a known
27 +// position in the world.
28 +// It provides the last known position and number of recent detection failures,
29 +// in addition to the more general appearance data associated with the object
30 +// class (which is in ObjectModel).
31 +// TODO(andrewharp): Make getters/setters follow styleguide.
32 +class TrackedObject {
33 + public:
34 + TrackedObject(const std::string& id, const Image<uint8_t>& image,
35 + const BoundingBox& bounding_box, ObjectModelBase* const model);
36 +
37 + ~TrackedObject();
38 +
39 + void UpdatePosition(const BoundingBox& new_position, const int64_t timestamp,
40 + const ImageData& image_data, const bool authoritative);
41 +
42 + // This method is called when the tracked object is detected at a
43 + // given position, and allows the associated Model to grow and/or prune
44 + // itself based on where the detection occurred.
45 + void OnDetection(ObjectModelBase* const model,
46 + const BoundingBox& detection_position,
47 + const MatchScore match_score, const int64_t timestamp,
48 + const ImageData& image_data);
49 +
50 + // Called when there's no detection of the tracked object. This will cause
51 + // a tracking failure after enough consecutive failures if the area under
52 + // the current bounding box also doesn't meet a minimum correlation threshold
53 + // with the model.
54 + void OnDetectionFailure() {}
55 +
56 + inline bool IsVisible() const {
57 + return tracked_correlation_ >= kMinimumCorrelationForTracking ||
58 + num_consecutive_frames_below_threshold_ < kMaxNumDetectionFailures;
59 + }
60 +
61 + inline float GetCorrelation() {
62 + return tracked_correlation_;
63 + }
64 +
65 + inline MatchScore GetMatchScore() {
66 + return tracked_match_score_;
67 + }
68 +
69 + inline BoundingBox GetPosition() const {
70 + return last_known_position_;
71 + }
72 +
73 + inline BoundingBox GetLastDetectionPosition() const {
74 + return last_detection_position_;
75 + }
76 +
77 + inline const ObjectModelBase* GetModel() const {
78 + return object_model_;
79 + }
80 +
81 + inline const std::string& GetName() const {
82 + return id_;
83 + }
84 +
85 + inline void Draw() const {
86 +#ifdef __RENDER_OPENGL__
87 + if (tracked_correlation_ < kMinimumCorrelationForTracking) {
88 + glColor4f(MAX(0.0f, -tracked_correlation_),
89 + MAX(0.0f, tracked_correlation_),
90 + 0.0f,
91 + 1.0f);
92 + } else {
93 + glColor4f(MAX(0.0f, -tracked_correlation_),
94 + MAX(0.0f, tracked_correlation_),
95 + 1.0f,
96 + 1.0f);
97 + }
98 +
99 + // Render the box itself.
100 + BoundingBox temp_box(last_known_position_);
101 + DrawBox(temp_box);
102 +
103 + // Render a box inside this one (in case the actual box is hidden).
104 + const float kBufferSize = 1.0f;
105 + temp_box.left_ -= kBufferSize;
106 + temp_box.top_ -= kBufferSize;
107 + temp_box.right_ += kBufferSize;
108 + temp_box.bottom_ += kBufferSize;
109 + DrawBox(temp_box);
110 +
111 + // Render one outside as well.
112 + temp_box.left_ -= -2.0f * kBufferSize;
113 + temp_box.top_ -= -2.0f * kBufferSize;
114 + temp_box.right_ += -2.0f * kBufferSize;
115 + temp_box.bottom_ += -2.0f * kBufferSize;
116 + DrawBox(temp_box);
117 +#endif
118 + }
119 +
120 + // Get current object's num_consecutive_frames_below_threshold_.
121 + inline int64_t GetNumConsecutiveFramesBelowThreshold() {
122 + return num_consecutive_frames_below_threshold_;
123 + }
124 +
125 + // Reset num_consecutive_frames_below_threshold_ to 0.
126 + inline void resetNumConsecutiveFramesBelowThreshold() {
127 + num_consecutive_frames_below_threshold_ = 0;
128 + }
129 +
130 + inline float GetAllowableDistanceSquared() const {
131 + return allowable_detection_distance_;
132 + }
133 +
134 + private:
135 + // The unique id used throughout the system to identify this
136 + // tracked object.
137 + const std::string id_;
138 +
139 + // The last known position of the object.
140 + BoundingBox last_known_position_;
141 +
142 + // The last known position of the object.
143 + BoundingBox last_detection_position_;
144 +
145 + // When the position was last computed.
146 + int64_t position_last_computed_time_;
147 +
148 + // The object model this tracked object is representative of.
149 + ObjectModelBase* object_model_;
150 +
151 + Image<float> last_detection_thumbnail_;
152 +
153 + Image<float> last_frame_thumbnail_;
154 +
155 + // The correlation of the object model with the preview frame at its last
156 + // tracked position.
157 + float tracked_correlation_;
158 +
159 + MatchScore tracked_match_score_;
160 +
161 + // The number of consecutive frames that the tracked position for this object
162 + // has been under the correlation threshold.
163 + int num_consecutive_frames_below_threshold_;
164 +
165 + float allowable_detection_distance_;
166 +
167 + friend std::ostream& operator<<(std::ostream& stream,
168 + const TrackedObject& tracked_object);
169 +
170 + TF_DISALLOW_COPY_AND_ASSIGN(TrackedObject);
171 +};
172 +
173 +inline std::ostream& operator<<(std::ostream& stream,
174 + const TrackedObject& tracked_object) {
175 + stream << tracked_object.id_
176 + << " " << tracked_object.last_known_position_
177 + << " " << tracked_object.position_last_computed_time_
178 + << " " << tracked_object.num_consecutive_frames_below_threshold_
179 + << " " << tracked_object.object_model_
180 + << " " << tracked_object.tracked_correlation_;
181 + return stream;
182 +}
183 +
184 +} // namespace tf_tracking
185 +
186 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_OBJECT_TRACKING_TRACKED_OBJECT_H_
This diff is collapsed. Click to expand it.
1 +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// NEON implementations of Image methods for compatible devices. Control
17 +// should never enter this compilation unit on incompatible devices.
18 +
19 +#ifdef __ARM_NEON
20 +
21 +#include <arm_neon.h>
22 +
23 +#include "tensorflow/examples/android/jni/object_tracking/geom.h"
24 +#include "tensorflow/examples/android/jni/object_tracking/image-inl.h"
25 +#include "tensorflow/examples/android/jni/object_tracking/image.h"
26 +#include "tensorflow/examples/android/jni/object_tracking/utils.h"
27 +
28 +namespace tf_tracking {
29 +
30 +inline static float GetSum(const float32x4_t& values) {
31 + static float32_t summed_values[4];
32 + vst1q_f32(summed_values, values);
33 + return summed_values[0]
34 + + summed_values[1]
35 + + summed_values[2]
36 + + summed_values[3];
37 +}
38 +
39 +
40 +float ComputeMeanNeon(const float* const values, const int num_vals) {
41 + SCHECK(num_vals >= 8, "Not enough values to merit NEON: %d", num_vals);
42 +
43 + const float32_t* const arm_vals = (const float32_t* const) values;
44 + float32x4_t accum = vdupq_n_f32(0.0f);
45 +
46 + int offset = 0;
47 + for (; offset <= num_vals - 4; offset += 4) {
48 + accum = vaddq_f32(accum, vld1q_f32(&arm_vals[offset]));
49 + }
50 +
51 + // Pull the accumulated values into a single variable.
52 + float sum = GetSum(accum);
53 +
54 + // Get the remaining 1 to 3 values.
55 + for (; offset < num_vals; ++offset) {
56 + sum += values[offset];
57 + }
58 +
59 + const float mean_neon = sum / static_cast<float>(num_vals);
60 +
61 +#ifdef SANITY_CHECKS
62 + const float mean_cpu = ComputeMeanCpu(values, num_vals);
63 + SCHECK(NearlyEqual(mean_neon, mean_cpu, EPSILON * num_vals),
64 + "Neon mismatch with CPU mean! %.10f vs %.10f",
65 + mean_neon, mean_cpu);
66 +#endif
67 +
68 + return mean_neon;
69 +}
70 +
71 +
72 +float ComputeStdDevNeon(const float* const values,
73 + const int num_vals, const float mean) {
74 + SCHECK(num_vals >= 8, "Not enough values to merit NEON: %d", num_vals);
75 +
76 + const float32_t* const arm_vals = (const float32_t* const) values;
77 + const float32x4_t mean_vec = vdupq_n_f32(-mean);
78 +
79 + float32x4_t accum = vdupq_n_f32(0.0f);
80 +
81 + int offset = 0;
82 + for (; offset <= num_vals - 4; offset += 4) {
83 + const float32x4_t deltas =
84 + vaddq_f32(mean_vec, vld1q_f32(&arm_vals[offset]));
85 +
86 + accum = vmlaq_f32(accum, deltas, deltas);
87 + }
88 +
89 + // Pull the accumulated values into a single variable.
90 + float squared_sum = GetSum(accum);
91 +
92 + // Get the remaining 1 to 3 values.
93 + for (; offset < num_vals; ++offset) {
94 + squared_sum += Square(values[offset] - mean);
95 + }
96 +
97 + const float std_dev_neon = sqrt(squared_sum / static_cast<float>(num_vals));
98 +
99 +#ifdef SANITY_CHECKS
100 + const float std_dev_cpu = ComputeStdDevCpu(values, num_vals, mean);
101 + SCHECK(NearlyEqual(std_dev_neon, std_dev_cpu, EPSILON * num_vals),
102 + "Neon mismatch with CPU std dev! %.10f vs %.10f",
103 + std_dev_neon, std_dev_cpu);
104 +#endif
105 +
106 + return std_dev_neon;
107 +}
108 +
109 +
110 +float ComputeCrossCorrelationNeon(const float* const values1,
111 + const float* const values2,
112 + const int num_vals) {
113 + SCHECK(num_vals >= 8, "Not enough values to merit NEON: %d", num_vals);
114 +
115 + const float32_t* const arm_vals1 = (const float32_t* const) values1;
116 + const float32_t* const arm_vals2 = (const float32_t* const) values2;
117 +
118 + float32x4_t accum = vdupq_n_f32(0.0f);
119 +
120 + int offset = 0;
121 + for (; offset <= num_vals - 4; offset += 4) {
122 + accum = vmlaq_f32(accum,
123 + vld1q_f32(&arm_vals1[offset]),
124 + vld1q_f32(&arm_vals2[offset]));
125 + }
126 +
127 + // Pull the accumulated values into a single variable.
128 + float sxy = GetSum(accum);
129 +
130 + // Get the remaining 1 to 3 values.
131 + for (; offset < num_vals; ++offset) {
132 + sxy += values1[offset] * values2[offset];
133 + }
134 +
135 + const float cross_correlation_neon = sxy / num_vals;
136 +
137 +#ifdef SANITY_CHECKS
138 + const float cross_correlation_cpu =
139 + ComputeCrossCorrelationCpu(values1, values2, num_vals);
140 + SCHECK(NearlyEqual(cross_correlation_neon, cross_correlation_cpu,
141 + EPSILON * num_vals),
142 + "Neon mismatch with CPU cross correlation! %.10f vs %.10f",
143 + cross_correlation_neon, cross_correlation_cpu);
144 +#endif
145 +
146 + return cross_correlation_neon;
147 +}
148 +
149 +} // namespace tf_tracking
150 +
151 +#endif // __ARM_NEON
1 +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// These utility functions allow for the conversion of RGB data to YUV data.
17 +
18 +#include "tensorflow/examples/android/jni/rgb2yuv.h"
19 +
20 +static inline void WriteYUV(const int x, const int y, const int width,
21 + const int r8, const int g8, const int b8,
22 + uint8_t* const pY, uint8_t* const pUV) {
23 + // Using formulas from http://msdn.microsoft.com/en-us/library/ms893078
24 + *pY = ((66 * r8 + 129 * g8 + 25 * b8 + 128) >> 8) + 16;
25 +
26 + // Odd widths get rounded up so that UV blocks on the side don't get cut off.
27 + const int blocks_per_row = (width + 1) / 2;
28 +
29 + // 2 bytes per UV block
30 + const int offset = 2 * (((y / 2) * blocks_per_row + (x / 2)));
31 +
32 + // U and V are the average values of all 4 pixels in the block.
33 + if (!(x & 1) && !(y & 1)) {
34 + // Explicitly clear the block if this is the first pixel in it.
35 + pUV[offset] = 0;
36 + pUV[offset + 1] = 0;
37 + }
38 +
39 + // V (with divide by 4 factored in)
40 +#ifdef __APPLE__
41 + const int u_offset = 0;
42 + const int v_offset = 1;
43 +#else
44 + const int u_offset = 1;
45 + const int v_offset = 0;
46 +#endif
47 + pUV[offset + v_offset] += ((112 * r8 - 94 * g8 - 18 * b8 + 128) >> 10) + 32;
48 +
49 + // U (with divide by 4 factored in)
50 + pUV[offset + u_offset] += ((-38 * r8 - 74 * g8 + 112 * b8 + 128) >> 10) + 32;
51 +}
52 +
53 +void ConvertARGB8888ToYUV420SP(const uint32_t* const input,
54 + uint8_t* const output, int width, int height) {
55 + uint8_t* pY = output;
56 + uint8_t* pUV = output + (width * height);
57 + const uint32_t* in = input;
58 +
59 + for (int y = 0; y < height; y++) {
60 + for (int x = 0; x < width; x++) {
61 + const uint32_t rgb = *in++;
62 +#ifdef __APPLE__
63 + const int nB = (rgb >> 8) & 0xFF;
64 + const int nG = (rgb >> 16) & 0xFF;
65 + const int nR = (rgb >> 24) & 0xFF;
66 +#else
67 + const int nR = (rgb >> 16) & 0xFF;
68 + const int nG = (rgb >> 8) & 0xFF;
69 + const int nB = rgb & 0xFF;
70 +#endif
71 + WriteYUV(x, y, width, nR, nG, nB, pY++, pUV);
72 + }
73 + }
74 +}
75 +
76 +void ConvertRGB565ToYUV420SP(const uint16_t* const input, uint8_t* const output,
77 + const int width, const int height) {
78 + uint8_t* pY = output;
79 + uint8_t* pUV = output + (width * height);
80 + const uint16_t* in = input;
81 +
82 + for (int y = 0; y < height; y++) {
83 + for (int x = 0; x < width; x++) {
84 + const uint32_t rgb = *in++;
85 +
86 + const int r5 = ((rgb >> 11) & 0x1F);
87 + const int g6 = ((rgb >> 5) & 0x3F);
88 + const int b5 = (rgb & 0x1F);
89 +
90 + // Shift left, then fill in the empty low bits with a copy of the high
91 + // bits so we can stretch across the entire 0 - 255 range.
92 + const int r8 = r5 << 3 | r5 >> 2;
93 + const int g8 = g6 << 2 | g6 >> 4;
94 + const int b8 = b5 << 3 | b5 >> 2;
95 +
96 + WriteYUV(x, y, width, r8, g8, b8, pY++, pUV);
97 + }
98 + }
99 +}
1 +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_RGB2YUV_H_
17 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_RGB2YUV_H_
18 +
19 +#include <stdint.h>
20 +
21 +#ifdef __cplusplus
22 +extern "C" {
23 +#endif
24 +
25 +void ConvertARGB8888ToYUV420SP(const uint32_t* const input,
26 + uint8_t* const output, int width, int height);
27 +
28 +void ConvertRGB565ToYUV420SP(const uint16_t* const input, uint8_t* const output,
29 + const int width, const int height);
30 +
31 +#ifdef __cplusplus
32 +}
33 +#endif
34 +
35 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_RGB2YUV_H_
1 +VERS_1.0 {
2 + # Export JNI symbols.
3 + global:
4 + Java_*;
5 + JNI_OnLoad;
6 + JNI_OnUnload;
7 +
8 + # Hide everything else.
9 + local:
10 + *;
11 +};
1 +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// This is a collection of routines which converts various YUV image formats
17 +// to ARGB.
18 +
19 +#include "tensorflow/examples/android/jni/yuv2rgb.h"
20 +
21 +#ifndef MAX
22 +#define MAX(a, b) ({__typeof__(a) _a = (a); __typeof__(b) _b = (b); _a > _b ? _a : _b; })
23 +#define MIN(a, b) ({__typeof__(a) _a = (a); __typeof__(b) _b = (b); _a < _b ? _a : _b; })
24 +#endif
25 +
26 +// This value is 2 ^ 18 - 1, and is used to clamp the RGB values before their ranges
27 +// are normalized to eight bits.
28 +static const int kMaxChannelValue = 262143;
29 +
30 +static inline uint32_t YUV2RGB(int nY, int nU, int nV) {
31 + nY -= 16;
32 + nU -= 128;
33 + nV -= 128;
34 + if (nY < 0) nY = 0;
35 +
36 + // This is the floating point equivalent. We do the conversion in integer
37 + // because some Android devices do not have floating point in hardware.
38 + // nR = (int)(1.164 * nY + 2.018 * nU);
39 + // nG = (int)(1.164 * nY - 0.813 * nV - 0.391 * nU);
40 + // nB = (int)(1.164 * nY + 1.596 * nV);
41 +
42 + int nR = 1192 * nY + 1634 * nV;
43 + int nG = 1192 * nY - 833 * nV - 400 * nU;
44 + int nB = 1192 * nY + 2066 * nU;
45 +
46 + nR = MIN(kMaxChannelValue, MAX(0, nR));
47 + nG = MIN(kMaxChannelValue, MAX(0, nG));
48 + nB = MIN(kMaxChannelValue, MAX(0, nB));
49 +
50 + nR = (nR >> 10) & 0xff;
51 + nG = (nG >> 10) & 0xff;
52 + nB = (nB >> 10) & 0xff;
53 +
54 + return 0xff000000 | (nR << 16) | (nG << 8) | nB;
55 +}
56 +
57 +// Accepts a YUV 4:2:0 image with a plane of 8 bit Y samples followed by
58 +// separate u and v planes with arbitrary row and column strides,
59 +// containing 8 bit 2x2 subsampled chroma samples.
60 +// Converts to a packed ARGB 32 bit output of the same pixel dimensions.
61 +void ConvertYUV420ToARGB8888(const uint8_t* const yData,
62 + const uint8_t* const uData,
63 + const uint8_t* const vData, uint32_t* const output,
64 + const int width, const int height,
65 + const int y_row_stride, const int uv_row_stride,
66 + const int uv_pixel_stride) {
67 + uint32_t* out = output;
68 +
69 + for (int y = 0; y < height; y++) {
70 + const uint8_t* pY = yData + y_row_stride * y;
71 +
72 + const int uv_row_start = uv_row_stride * (y >> 1);
73 + const uint8_t* pU = uData + uv_row_start;
74 + const uint8_t* pV = vData + uv_row_start;
75 +
76 + for (int x = 0; x < width; x++) {
77 + const int uv_offset = (x >> 1) * uv_pixel_stride;
78 + *out++ = YUV2RGB(pY[x], pU[uv_offset], pV[uv_offset]);
79 + }
80 + }
81 +}
82 +
83 +// Accepts a YUV 4:2:0 image with a plane of 8 bit Y samples followed by an
84 +// interleaved U/V plane containing 8 bit 2x2 subsampled chroma samples,
85 +// except the interleave order of U and V is reversed. Converts to a packed
86 +// ARGB 32 bit output of the same pixel dimensions.
87 +void ConvertYUV420SPToARGB8888(const uint8_t* const yData,
88 + const uint8_t* const uvData,
89 + uint32_t* const output, const int width,
90 + const int height) {
91 + const uint8_t* pY = yData;
92 + const uint8_t* pUV = uvData;
93 + uint32_t* out = output;
94 +
95 + for (int y = 0; y < height; y++) {
96 + for (int x = 0; x < width; x++) {
97 + int nY = *pY++;
98 + int offset = (y >> 1) * width + 2 * (x >> 1);
99 +#ifdef __APPLE__
100 + int nU = pUV[offset];
101 + int nV = pUV[offset + 1];
102 +#else
103 + int nV = pUV[offset];
104 + int nU = pUV[offset + 1];
105 +#endif
106 +
107 + *out++ = YUV2RGB(nY, nU, nV);
108 + }
109 + }
110 +}
111 +
112 +// The same as above, but downsamples each dimension to half size.
113 +void ConvertYUV420SPToARGB8888HalfSize(const uint8_t* const input,
114 + uint32_t* const output, int width,
115 + int height) {
116 + const uint8_t* pY = input;
117 + const uint8_t* pUV = input + (width * height);
118 + uint32_t* out = output;
119 + int stride = width;
120 + width >>= 1;
121 + height >>= 1;
122 +
123 + for (int y = 0; y < height; y++) {
124 + for (int x = 0; x < width; x++) {
125 + int nY = (pY[0] + pY[1] + pY[stride] + pY[stride + 1]) >> 2;
126 + pY += 2;
127 +#ifdef __APPLE__
128 + int nU = *pUV++;
129 + int nV = *pUV++;
130 +#else
131 + int nV = *pUV++;
132 + int nU = *pUV++;
133 +#endif
134 +
135 + *out++ = YUV2RGB(nY, nU, nV);
136 + }
137 + pY += stride;
138 + }
139 +}
140 +
141 +// Accepts a YUV 4:2:0 image with a plane of 8 bit Y samples followed by an
142 +// interleaved U/V plane containing 8 bit 2x2 subsampled chroma samples,
143 +// except the interleave order of U and V is reversed. Converts to a packed
144 +// RGB 565 bit output of the same pixel dimensions.
145 +void ConvertYUV420SPToRGB565(const uint8_t* const input, uint16_t* const output,
146 + const int width, const int height) {
147 + const uint8_t* pY = input;
148 + const uint8_t* pUV = input + (width * height);
149 + uint16_t* out = output;
150 +
151 + for (int y = 0; y < height; y++) {
152 + for (int x = 0; x < width; x++) {
153 + int nY = *pY++;
154 + int offset = (y >> 1) * width + 2 * (x >> 1);
155 +#ifdef __APPLE__
156 + int nU = pUV[offset];
157 + int nV = pUV[offset + 1];
158 +#else
159 + int nV = pUV[offset];
160 + int nU = pUV[offset + 1];
161 +#endif
162 +
163 + nY -= 16;
164 + nU -= 128;
165 + nV -= 128;
166 + if (nY < 0) nY = 0;
167 +
168 + // This is the floating point equivalent. We do the conversion in integer
169 + // because some Android devices do not have floating point in hardware.
170 + // nR = (int)(1.164 * nY + 2.018 * nU);
171 + // nG = (int)(1.164 * nY - 0.813 * nV - 0.391 * nU);
172 + // nB = (int)(1.164 * nY + 1.596 * nV);
173 +
174 + int nR = 1192 * nY + 1634 * nV;
175 + int nG = 1192 * nY - 833 * nV - 400 * nU;
176 + int nB = 1192 * nY + 2066 * nU;
177 +
178 + nR = MIN(kMaxChannelValue, MAX(0, nR));
179 + nG = MIN(kMaxChannelValue, MAX(0, nG));
180 + nB = MIN(kMaxChannelValue, MAX(0, nB));
181 +
182 + // Shift more than for ARGB8888 and apply appropriate bitmask.
183 + nR = (nR >> 13) & 0x1f;
184 + nG = (nG >> 12) & 0x3f;
185 + nB = (nB >> 13) & 0x1f;
186 +
187 + // R is high 5 bits, G is middle 6 bits, and B is low 5 bits.
188 + *out++ = (nR << 11) | (nG << 5) | nB;
189 + }
190 + }
191 +}
1 +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 +
3 +Licensed under the Apache License, Version 2.0 (the "License");
4 +you may not use this file except in compliance with the License.
5 +You may obtain a copy of the License at
6 +
7 + http://www.apache.org/licenses/LICENSE-2.0
8 +
9 +Unless required by applicable law or agreed to in writing, software
10 +distributed under the License is distributed on an "AS IS" BASIS,
11 +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 +See the License for the specific language governing permissions and
13 +limitations under the License.
14 +==============================================================================*/
15 +
16 +// This is a collection of routines which converts various YUV image formats
17 +// to (A)RGB.
18 +
19 +#ifndef TENSORFLOW_EXAMPLES_ANDROID_JNI_YUV2RGB_H_
20 +#define TENSORFLOW_EXAMPLES_ANDROID_JNI_YUV2RGB_H_
21 +
22 +#include <stdint.h>
23 +
24 +#ifdef __cplusplus
25 +extern "C" {
26 +#endif
27 +
28 +void ConvertYUV420ToARGB8888(const uint8_t* const yData,
29 + const uint8_t* const uData,
30 + const uint8_t* const vData, uint32_t* const output,
31 + const int width, const int height,
32 + const int y_row_stride, const int uv_row_stride,
33 + const int uv_pixel_stride);
34 +
35 +// Converts YUV420 semi-planar data to ARGB 8888 data using the supplied width
36 +// and height. The input and output must already be allocated and non-null.
37 +// For efficiency, no error checking is performed.
38 +void ConvertYUV420SPToARGB8888(const uint8_t* const pY,
39 + const uint8_t* const pUV, uint32_t* const output,
40 + const int width, const int height);
41 +
42 +// The same as above, but downsamples each dimension to half size.
43 +void ConvertYUV420SPToARGB8888HalfSize(const uint8_t* const input,
44 + uint32_t* const output, int width,
45 + int height);
46 +
47 +// Converts YUV420 semi-planar data to RGB 565 data using the supplied width
48 +// and height. The input and output must already be allocated and non-null.
49 +// For efficiency, no error checking is performed.
50 +void ConvertYUV420SPToRGB565(const uint8_t* const input, uint16_t* const output,
51 + const int width, const int height);
52 +
53 +#ifdef __cplusplus
54 +}
55 +#endif
56 +
57 +#endif // TENSORFLOW_EXAMPLES_ANDROID_JNI_YUV2RGB_H_
1 +<?xml version="1.0" encoding="utf-8"?><!--
2 + Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 +
4 + Licensed under the Apache License, Version 2.0 (the "License");
5 + you may not use this file except in compliance with the License.
6 + You may obtain a copy of the License at
7 +
8 + http://www.apache.org/licenses/LICENSE-2.0
9 +
10 + Unless required by applicable law or agreed to in writing, software
11 + distributed under the License is distributed on an "AS IS" BASIS,
12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 + See the License for the specific language governing permissions and
14 + limitations under the License.
15 +-->
16 +<set xmlns:android="http://schemas.android.com/apk/res/android"
17 + android:ordering="sequentially">
18 + <objectAnimator
19 + android:propertyName="backgroundColor"
20 + android:duration="375"
21 + android:valueFrom="0x00b3ccff"
22 + android:valueTo="0xffb3ccff"
23 + android:valueType="colorType"/>
24 + <objectAnimator
25 + android:propertyName="backgroundColor"
26 + android:duration="375"
27 + android:valueFrom="0xffb3ccff"
28 + android:valueTo="0x00b3ccff"
29 + android:valueType="colorType"/>
30 +</set>
1 +<?xml version="1.0" encoding="utf-8"?><!--
2 + Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 +
4 + Licensed under the Apache License, Version 2.0 (the "License");
5 + you may not use this file except in compliance with the License.
6 + You may obtain a copy of the License at
7 +
8 + http://www.apache.org/licenses/LICENSE-2.0
9 +
10 + Unless required by applicable law or agreed to in writing, software
11 + distributed under the License is distributed on an "AS IS" BASIS,
12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 + See the License for the specific language governing permissions and
14 + limitations under the License.
15 +-->
16 +<shape xmlns:android="http://schemas.android.com/apk/res/android" android:shape="rectangle" >
17 + <solid android:color="#00000000" />
18 + <stroke android:width="1dip" android:color="#cccccc" />
19 +</shape>
1 +<?xml version="1.0" encoding="utf-8"?><!--
2 + Copyright 2016 The TensorFlow Authors. All Rights Reserved.
3 +
4 + Licensed under the Apache License, Version 2.0 (the "License");
5 + you may not use this file except in compliance with the License.
6 + You may obtain a copy of the License at
7 +
8 + http://www.apache.org/licenses/LICENSE-2.0
9 +
10 + Unless required by applicable law or agreed to in writing, software
11 + distributed under the License is distributed on an "AS IS" BASIS,
12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 + See the License for the specific language governing permissions and
14 + limitations under the License.
15 +-->
16 +<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
17 + xmlns:tools="http://schemas.android.com/tools"
18 + android:id="@+id/container"
19 + android:layout_width="match_parent"
20 + android:layout_height="match_parent"
21 + android:background="#000"
22 + tools:context="org.tensorflow.demo.CameraActivity" />
1 +<?xml version="1.0" encoding="utf-8"?><!--
2 + Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 +
4 + Licensed under the Apache License, Version 2.0 (the "License");
5 + you may not use this file except in compliance with the License.
6 + You may obtain a copy of the License at
7 +
8 + http://www.apache.org/licenses/LICENSE-2.0
9 +
10 + Unless required by applicable law or agreed to in writing, software
11 + distributed under the License is distributed on an "AS IS" BASIS,
12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 + See the License for the specific language governing permissions and
14 + limitations under the License.
15 +-->
16 +<FrameLayout
17 + xmlns:android="http://schemas.android.com/apk/res/android"
18 + xmlns:app="http://schemas.android.com/apk/res-auto"
19 + xmlns:tools="http://schemas.android.com/tools"
20 + android:layout_width="match_parent"
21 + android:layout_height="match_parent"
22 + tools:context="org.tensorflow.demo.SpeechActivity">
23 +
24 + <TextView
25 + android:layout_width="wrap_content"
26 + android:layout_height="wrap_content"
27 + android:text="Say one of the words below!"
28 + android:id="@+id/textView"
29 + android:textAlignment="center"
30 + android:layout_gravity="top"
31 + android:textSize="24dp"
32 + android:layout_marginTop="10dp"
33 + android:layout_marginLeft="10dp"
34 + />
35 +
36 + <ListView
37 + android:id="@+id/list_view"
38 + android:layout_width="240dp"
39 + android:layout_height="wrap_content"
40 + android:background="@drawable/border"
41 + android:layout_gravity="top|center_horizontal"
42 + android:textAlignment="center"
43 + android:layout_marginTop="100dp"
44 + />
45 +
46 + <Button
47 + android:id="@+id/quit"
48 + android:layout_width="wrap_content"
49 + android:layout_height="wrap_content"
50 + android:text="Quit"
51 + android:layout_gravity="bottom|center_horizontal"
52 + android:layout_marginBottom="10dp"
53 + />
54 +
55 +</FrameLayout>
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.