diff --git a/tensorflow/stream_executor/stream.h b/tensorflow/stream_executor/stream.h index 6b32e36cb50..3a99e10356b 100644 --- a/tensorflow/stream_executor/stream.h +++ b/tensorflow/stream_executor/stream.h @@ -29,7 +29,6 @@ limitations under the License. #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/stream_executor/blas.h" -#include "tensorflow/stream_executor/cuda/cuda_dnn.h" #include "tensorflow/stream_executor/device_memory.h" #include "tensorflow/stream_executor/dnn.h" #include "tensorflow/stream_executor/event.h" @@ -42,6 +41,10 @@ limitations under the License. #include "tensorflow/stream_executor/stream_executor_pimpl.h" #include "tensorflow/stream_executor/temporary_memory_manager.h" +#if GOOGLE_CUDA +#include "tensorflow/stream_executor/cuda/cuda_dnn.h" +#endif // GOOGLE_CUDA + namespace stream_executor { namespace host { @@ -363,6 +366,7 @@ class Stream { DeviceMemory *output, ScratchAllocator *scratch_allocator, const dnn::AlgorithmConfig &plan_config, dnn::ProfileResult *output_profile_result) { +#if GOOGLE_CUDA dnn::DnnSupport *dnn = parent_->AsDnn(); if (dnn) { gpu::CudnnSupport *cudnn_dnn = dynamic_cast(dnn); @@ -373,6 +377,8 @@ class Stream { output_descriptor, *output, convolution_descriptor, plan_config, scratch_allocator, output_profile_result); } +#endif // GOOGLE_CUDA + return port::UnimplementedError("DNN library is not found."); } port::Status FusedConvolveWithAlgorithm(