Skip to content
This repository was archived by the owner on Aug 5, 2022. It is now read-only.

Commit 7a35532

Browse files
committed
Merge remote-tracking branch 'remotes/internal/release_1.0.5'
2 parents ce08100 + 1f7ce1f commit 7a35532

File tree

250 files changed

+58746
-995
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

250 files changed

+58746
-995
lines changed

CMakeLists.txt

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ include(cmake/ConfigGen.cmake)
2929
caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA
3030
caffe_option(USE_OPENMP "Build Caffe with OpenMP support" ON )
3131
caffe_option(USE_CUDNN "Build Caffe with cuDNN library support" ON IF NOT CPU_ONLY)
32-
caffe_option(USE_MKL2017_AS_DEFAULT_ENGINE "Use MKL2017 primitives for supported layers" ON)
33-
caffe_option(USE_MKLDNN_AS_DEFAULT_ENGINE "Use MKL-DNN primitives for supported layers" OFF)
32+
caffe_option(USE_MKL2017_AS_DEFAULT_ENGINE "Use MKL2017 primitives for supported layers" OFF)
33+
caffe_option(USE_MKLDNN_AS_DEFAULT_ENGINE "Use MKL-DNN primitives for supported layers" ON)
3434
caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON)
3535
caffe_option(BUILD_python "Build Python wrapper" ON)
3636
set(python_version "2" CACHE STRING "Specify which Python version to use")
@@ -68,6 +68,8 @@ if(UNIX OR APPLE)
6868
# GCC specific flags.
6969
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9)
7070
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector-strong")
71+
# auto enable SGD FUSION if gcc version >= 4.9
72+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_SGD_FUSION")
7173
else()
7274
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector")
7375
endif()
@@ -79,6 +81,8 @@ if(UNIX OR APPLE)
7981
# though it uses -pie linker option that require -fPIE during compilation. Checksec
8082
# shows that it generates correct PIE anyway if only -pie is provided.
8183
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector")
84+
# Auto enable SGD Fusion if use intel compiler
85+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_SGD_FUSION")
8286
endif()
8387

8488
# Generic flags.

Makefile

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so
103103
DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
104104
DYNAMIC_NAME := $(LIB_BUILD_DIR)/$(DYNAMIC_VERSIONED_NAME_SHORT)
105105
COMMON_FLAGS += -DCAFFE_VERSION=$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)
106+
COMMON_FLAGS += -std=c++11
106107

107108
##############################
108109
# Get all source files
@@ -318,6 +319,11 @@ else ifeq ($(UNAME), Darwin)
318319
OSX_MINOR_VERSION := $(shell sw_vers -productVersion | cut -f 2 -d .)
319320
endif
320321

322+
# Custom compiler
323+
ifdef CUSTOM_CXX
324+
CXX := $(CUSTOM_CXX)
325+
endif
326+
321327
# Linux
322328
ifeq ($(LINUX), 1)
323329
CXX ?= /usr/bin/g++
@@ -365,19 +371,19 @@ else
365371
ORIGIN := \$$ORIGIN
366372
endif
367373

368-
# Custom compiler
369-
ifdef CUSTOM_CXX
370-
CXX := $(CUSTOM_CXX)
371-
endif
372-
373374
# Compiler flags
374375
ifneq (,$(findstring icpc,$(CXX)))
375376
CXX_HARDENING_FLAGS += -fstack-protector
377+
#Enable SGD FUSION if use intel compiler
378+
COMMON_FLAGS += -DENABLE_SGD_FUSION
379+
376380
else ifneq (,$(findstring clang++,$(CXX)))
377381
CXX_HARDENING_FLAGS += -fPIE -fstack-protector
378382
else ifneq (,$(findstring g++,$(CXX)))
379-
ifeq ($(shell echo | awk '{exit $(GCCVERSION) >= 4.9;}'), 1)
383+
ifeq ($(shell echo | awk '{ print $(GCCVERSION) >= 4.9 }'), 1)
380384
CXX_HARDENING_FLAGS += -fPIE -fstack-protector-strong
385+
#Enable SGD FUSION if gcc version >= 4.9
386+
COMMON_FLAGS += -DENABLE_SGD_FUSION
381387
else
382388
CXX_HARDENING_FLAGS += -fPIE -fstack-protector
383389
endif
@@ -500,7 +506,7 @@ ifeq ($(MKL_EXTERNAL), 1)
500506
MKL_LDFLAGS+=-Wl,-rpath,$(MKLROOT)/lib
501507
endif
502508

503-
COMMON_FLAGS += -DUSE_MKL
509+
COMMON_FLAGS += -DUSE_MKL -DMKL_ILP64
504510
BLAS_INCLUDE ?= $(MKLROOT)/include
505511
BLAS_LIB ?= $(MKLROOT)/lib $(MKLROOT)/lib/intel64
506512

@@ -547,10 +553,6 @@ LIBRARY_DIRS += $(LIB_BUILD_DIR)
547553
# Automatic dependency generation (nvcc is handled separately)
548554
CXXFLAGS += -MMD -MP
549555

550-
##########SGD FUSION#######################
551-
ifeq ($(ENABLE_SGD_FUSION), 1)
552-
COMMON_FLAGS += -DENABLE_SGD_FUSION
553-
endif
554556
###########################################
555557
#
556558
# Complete build flags.
@@ -827,6 +829,7 @@ $(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)
827829
touch $(PY_PROTO_INIT)
828830

829831
clean: mkldnn_clean
832+
@echo "Will download the new version of MKL2017 and MLSL when clean and prepare the environment."
830833
@- $(RM) -rf $(ALL_BUILD_DIRS)
831834
@- $(RM) -rf $(OTHER_BUILD_DIR)
832835
@- $(RM) -rf $(BUILD_DIR_LINK)

Makefile.config.example

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -43,13 +43,13 @@
4343
# CPU-only switch (uncomment to build without GPU support).
4444
CPU_ONLY := 1
4545

46-
USE_MKL2017_AS_DEFAULT_ENGINE := 1
46+
# USE_MKL2017_AS_DEFAULT_ENGINE := 1
4747
# or put this at the top your train_val.protoxt or solver.prototxt file:
4848
# engine: "MKL2017"
4949
# or use this option with caffe tool:
5050
# -engine "MKL2017"
5151

52-
# USE_MKLDNN_AS_DEFAULT_ENGINE flag is OBSOLETE
52+
USE_MKLDNN_AS_DEFAULT_ENGINE := 1
5353
# Put this at the top your train_val.protoxt or solver.prototxt file:
5454
# engine: "MKLDNN"
5555
# or use this option with caffe tool:
@@ -170,8 +170,5 @@ DISTRIBUTE_DIR := distribute
170170
# The ID of the GPU that 'make runtest' will use to run unit tests.
171171
TEST_GPUID := 0
172172

173-
# Uncomment for enabling SGD fusion
174-
# ENABLE_SGD_FUSION := 1
175-
176173
# enable pretty build (comment to see full commands)
177174
Q ?= @

cmake/Cuda.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ macro(caffe_cuda_compile objlist_variable)
145145
endforeach()
146146

147147
if(UNIX OR APPLE)
148-
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC)
148+
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC -std=c++11)
149149
endif()
150150

151151
if(APPLE)

docker/README.md

Lines changed: 49 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1,49 +1,49 @@
1-
# Building Caffe using standalone Dockerfile
2-
3-
The `standalone` subfolder contains docker files for generating both CPU and GPU executable images for Caffe. The images can be built using make, or by running:
4-
5-
```
6-
docker build -t caffe:cpu standalone/cpu-ubuntu
7-
```
8-
for example. (Here `ubuntu` can be substituted for `centos`, `gpu` can be substituted for `cpu`, but to keep the readme simple, only the `cpu` case will be discussed in detail).
9-
10-
Note that the GPU standalone requires a CUDA 7.5 capable driver to be installed on the system and [nvidia-docker] for running the Docker containers. Here it is generally sufficient to use `nvidia-docker` instead of `docker` in any of the commands mentioned.
11-
12-
# Running Caffe using the docker image
13-
14-
In order to test the Caffe image, run:
15-
```
16-
docker run -ti caffe:cpu caffe --version
17-
```
18-
which should show a message like:
19-
```
20-
caffe version 1.0.0-rc3
21-
```
22-
23-
One can also build and run the Caffe tests in the image using:
24-
```
25-
docker run -ti caffe:cpu bash -c "cd /opt/caffe/build; make runtest"
26-
```
27-
28-
In order to get the most out of the caffe image, some more advanced `docker run` options could be used. For example, running:
29-
```
30-
docker run -ti caffe:cpu caffe time -model /opt/caffe/models/bvlc_alexnet/deploy.prototxt -engine MKLDNN
31-
```
32-
will measure the performance of AlexNet. You can also run caffe train as well. Note that docker runs all commands as root by default, and thus any output files (e.g. snapshots) generated will be owned by the root user. In order to ensure that the current user is used instead, the following command can be used:
33-
```
34-
docker run -ti --volume=$(pwd):/workspace -u $(id -u):$(id -g) caffe:cpu caffe train --solver=/opt/caffe/models/bvlc_alexnet/solver.prototxt -engine MKLDNN
35-
```
36-
where the `-u` Docker command line option runs the commands in the container as the specified user, and the shell command `id` is used to determine the user and group ID of the current user. Note that the Caffe docker images have `/workspace` defined as the default working directory. This can be overridden using the `--workdir=` Docker command line option. Note that you need to prepare dataset before training.
37-
38-
# Other use-cases
39-
40-
Although running the `caffe` command in the docker containers as described above serves many purposes, the container can also be used for more interactive use cases. For example, specifying `bash` as the command instead of `caffe` yields a shell that can be used for interactive tasks. (Since the caffe build requirements are included in the container, this can also be used to build and run local versions of caffe).
41-
42-
Another use case is to run python scripts that depend on `caffe`'s Python modules. Using the `python` command instead of `bash` or `caffe` will allow this, and an interactive interpreter can be started by running:
43-
```
44-
docker run -ti caffe:cpu python
45-
```
46-
(`ipython` is also available in the container).
47-
48-
Since the `caffe/python` folder is also added to the path, the utility executable scripts defined there can also be used as executables. This includes `draw_net.py`, `classify.py`, and `detect.py`
49-
1+
# Building Caffe using standalone Dockerfile
2+
3+
The `standalone` subfolder contains docker files for generating both CPU and GPU executable images for Caffe. The images can be built using make, or by running:
4+
5+
```
6+
docker build -t caffe:cpu standalone/cpu-ubuntu
7+
```
8+
for example. (Here `ubuntu` can be substituted for `centos`, `gpu` can be substituted for `cpu`, but to keep the readme simple, only the `cpu` case will be discussed in detail).
9+
10+
Note that the GPU standalone requires a CUDA 7.5 capable driver to be installed on the system and [nvidia-docker] for running the Docker containers. Here it is generally sufficient to use `nvidia-docker` instead of `docker` in any of the commands mentioned.
11+
12+
# Running Caffe using the docker image
13+
14+
In order to test the Caffe image, run:
15+
```
16+
docker run -ti caffe:cpu caffe --version
17+
```
18+
which should show a message like:
19+
```
20+
caffe version 1.0.0-rc3
21+
```
22+
23+
One can also build and run the Caffe tests in the image using:
24+
```
25+
docker run -ti caffe:cpu bash -c "cd /opt/caffe/build; make runtest"
26+
```
27+
28+
In order to get the most out of the caffe image, some more advanced `docker run` options could be used. For example, running:
29+
```
30+
docker run -ti caffe:cpu caffe time -model /opt/caffe/models/bvlc_alexnet/deploy.prototxt -engine MKLDNN
31+
```
32+
will measure the performance of AlexNet. You can also run caffe train as well. Note that docker runs all commands as root by default, and thus any output files (e.g. snapshots) generated will be owned by the root user. In order to ensure that the current user is used instead, the following command can be used:
33+
```
34+
docker run -ti --volume=$(pwd):/workspace -u $(id -u):$(id -g) caffe:cpu caffe train --solver=/opt/caffe/models/bvlc_alexnet/solver.prototxt -engine MKLDNN
35+
```
36+
where the `-u` Docker command line option runs the commands in the container as the specified user, and the shell command `id` is used to determine the user and group ID of the current user. Note that the Caffe docker images have `/workspace` defined as the default working directory. This can be overridden using the `--workdir=` Docker command line option. Note that you need to prepare dataset before training.
37+
38+
# Other use-cases
39+
40+
Although running the `caffe` command in the docker containers as described above serves many purposes, the container can also be used for more interactive use cases. For example, specifying `bash` as the command instead of `caffe` yields a shell that can be used for interactive tasks. (Since the caffe build requirements are included in the container, this can also be used to build and run local versions of caffe).
41+
42+
Another use case is to run python scripts that depend on `caffe`'s Python modules. Using the `python` command instead of `bash` or `caffe` will allow this, and an interactive interpreter can be started by running:
43+
```
44+
docker run -ti caffe:cpu python
45+
```
46+
(`ipython` is also available in the container).
47+
48+
Since the `caffe/python` folder is also added to the path, the utility executable scripts defined there can also be used as executables. This includes `draw_net.py`, `classify.py`, and `detect.py`
49+
Lines changed: 56 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -1,56 +1,56 @@
1-
FROM centos:7
2-
3-
4-
#ENV http_proxy proxy:port
5-
#ENV https_proxy proxy:port
6-
7-
RUN rpm -iUvh http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm
8-
9-
RUN yum install -y \
10-
redhat-rpm-config \
11-
tar \
12-
findutils \
13-
make \
14-
gcc-c++ \
15-
cmake \
16-
git \
17-
wget \
18-
atlas-devel \
19-
boost-devel \
20-
gflags-devel \
21-
glog-devel \
22-
hdf5-devel \
23-
leveldb-devel \
24-
lmdb-devel \
25-
opencv-devel \
26-
protobuf-devel \
27-
snappy-devel \
28-
protobuf-compiler \
29-
freetype-devel \
30-
libpng-devel \
31-
python-devel \
32-
python-numpy \
33-
python-pip \
34-
python-scipy \
35-
gcc-gfortran \
36-
libjpeg-turbo-devel
37-
38-
RUN yum clean all
39-
ENV CAFFE_ROOT=/opt/caffe
40-
WORKDIR $CAFFE_ROOT
41-
42-
# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this.
43-
ENV CLONE_TAG=master
44-
45-
RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/intel/caffe.git . && \
46-
for req in $(cat python/requirements.txt) pydot; do pip --no-cache-dir install $req; done && \
47-
mkdir build && cd build && \
48-
cmake -DCPU_ONLY=1 -DCMAKE_BUILD_TYPE=Release .. && \
49-
make all -j"$(nproc)"
50-
51-
ENV PYCAFFE_ROOT $CAFFE_ROOT/python
52-
ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
53-
ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
54-
RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig
55-
56-
WORKDIR /workspace
1+
FROM centos:7
2+
3+
4+
#ENV http_proxy proxy:port
5+
#ENV https_proxy proxy:port
6+
7+
RUN rpm -iUvh http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm
8+
9+
RUN yum install -y \
10+
redhat-rpm-config \
11+
tar \
12+
findutils \
13+
make \
14+
gcc-c++ \
15+
cmake \
16+
git \
17+
wget \
18+
atlas-devel \
19+
boost-devel \
20+
gflags-devel \
21+
glog-devel \
22+
hdf5-devel \
23+
leveldb-devel \
24+
lmdb-devel \
25+
opencv-devel \
26+
protobuf-devel \
27+
snappy-devel \
28+
protobuf-compiler \
29+
freetype-devel \
30+
libpng-devel \
31+
python-devel \
32+
python-numpy \
33+
python-pip \
34+
python-scipy \
35+
gcc-gfortran \
36+
libjpeg-turbo-devel
37+
38+
RUN yum clean all
39+
ENV CAFFE_ROOT=/opt/caffe
40+
WORKDIR $CAFFE_ROOT
41+
42+
# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this.
43+
ENV CLONE_TAG=master
44+
45+
RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/intel/caffe.git . && \
46+
for req in $(cat python/requirements.txt) pydot; do pip --no-cache-dir install $req; done && \
47+
mkdir build && cd build && \
48+
cmake -DCPU_ONLY=1 -DCMAKE_BUILD_TYPE=Release .. && \
49+
make all -j"$(nproc)"
50+
51+
ENV PYCAFFE_ROOT $CAFFE_ROOT/python
52+
ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH
53+
ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH
54+
RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig
55+
56+
WORKDIR /workspace

0 commit comments

Comments
 (0)