forked from NervanaSystems/ngraph-python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Makefile
executable file
·319 lines (273 loc) · 11.3 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# set empty to prevent any implicit rules from firing.
.SUFFIXES:
SHELL := /bin/bash
# Extract Python version
PY := $(shell python --version 2>&1 | cut -c8)
ifeq ($(PY), 2)
PYLINT3K_ARGS := --disable=no-absolute-import,setslice-method,getslice-method,nonzero-method,zip-builtin-not-iterating
else
PYLINT3K_ARGS :=
endif
# style checking related
STYLE_CHECK_OPTS :=
STYLE_CHECK_DIRS := ngraph tests examples
# pytest options
TEST_OPTS := --timeout=600 --cov=ngraph --timeout_method=thread
TEST_DIRS := tests/
TEST_DIRS_COMMON := ngraph/frontends/common/tests
TEST_DIRS_NEON := ngraph/frontends/neon/tests
TEST_DIRS_TENSORFLOW := ngraph/frontends/tensorflow/tests
TEST_DIRS_CAFFE2 := ngraph/frontends/caffe2/tests
TEST_DIRS_MXNET := ngraph/frontends/mxnet/tests
TEST_DIRS_CNTK := ngraph/frontends/cntk/tests
TEST_DIRS_ONNX := ngraph/frontends/onnx/tests
TEST_DIRS_INTEGRATION := integration_tests/
# Set parallel execution by setting the NUM_PROCS variable in the environment
# export NUM_PROCS=8
# make test_gpu
# OR
# make test_gpu NUM_PROCS=8
#
# If NUM_PROCS is unset, serial excution will be used
# if NUM_PROCS = 0, serial execution will be used
#
PARALLEL_OPTS := ""
ifdef NUM_PROCS
ifneq ($(NUM_PROCS),0)
PARALLEL_OPTS=-n $(NUM_PROCS)
endif
endif
# this variable controls where we publish Sphinx docs to
DOC_DIR := doc
DOC_PUB_RELEASE_PATH := $(DOC_PUB_PATH)/$(RELEASE)
.PHONY: env default install install_all uninstall uninstall_all clean test testflex style lint lint3k check doc viz_prepare
default: install
install:
pip install -U pip
# setuptools is autoflex's requirement, but it is here for now
# added setuptools separately since it is needed for building packages from source
pip install setuptools==18.5
# cython added separately due to h5py dependency ordering bug. See:
# https://github.com/h5py/h5py/issues/535
pip install cython==0.23.1
pip install -r requirements.txt
pip install -e .
install_all: gpu_prepare test_prepare examples_prepare doc_prepare install
# viz_prepare is ignored since it requires installation of system package
gpu_prepare:
pip install -r gpu_requirements.txt > /dev/null 2>&1
multinode_prepare:
pip install -r multinode_requirements.txt > /dev/null 2>&1
test_prepare:
pip install -r test_requirements.txt > /dev/null 2>&1
examples_prepare:
pip install -r examples_requirements.txt > /dev/null 2>&1
doc_prepare:
pip install -r doc_requirements.txt > /dev/null 2>&1
# for internal use only
# the private autoflex repo is expected to be installed in ../autoflex
# update the pip install command below to reference the path to the autoflex directory
autoflex_prepare:
@echo
@echo Attempting to update autoflex to the latest version in ../autoflex
pip install ../autoflex --upgrade
uninstall:
pip uninstall -y ngraph
pip uninstall -r requirements.txt
uninstall_all: uninstall
pip uninstall -y -r gpu_requirements.txt -r test_requirements.txt \
-r examples_requirements.txt -r doc_requirements.txt -r viz_requirements.txt \
-r multinode_requirements.txt
clean:
find . -name "*.py[co]" -type f -delete
find . -name "__pycache__" -type d -delete
rm -f .coverage .coverage.*
rm -rf ngraph.egg-info
@echo
test_all_transformers: test_cpu test_hetr test_gpu test_flex
test_flex: gpu_prepare test_prepare clean
@echo
@echo The autoflex package is required for flex testing ...
@echo WARNING: flex tests will report the following message if autoflex has not been installed:
@echo
@echo " argument --transformer: invalid choice: 'flexgpu' (choose from 'cpu', 'gpu', \
'hetr')"
@echo
@echo "In case of test failures, clone the private autoflex repo in ../autoflex and execute \
make autoflex_prepare"
@echo
@echo Running flex unit tests...
py.test --boxed --transformer flexgpu -m "transformer_dependent and not hetr_only or flex_only" \
--junit-xml=testout_test_flex_$(PY).xml --timeout=1200 --cov=ngraph \
$(TEST_DIRS) $(TEST_DIRS_NEON) ${TEST_DIRS_TENSORFLOW} ${TEST_DIRS_ONNX} ${TEST_DIRS_COMMON}
coverage xml -i -o coverage_test_flex_$(PY).xml
test_mkldnn: export PYTHONHASHSEED=0
test_mkldnn: export MKL_TEST_ENABLE=1
test_mkldnn: export LD_PRELOAD+=:./mkldnn_engine.so
test_mkldnn: export LD_PRELOAD+=:${WARP_CTC_PATH}/libwarpctc.so
test_mkldnn: multinode_prepare test_prepare clean
test_mkldnn:
@echo Running unit tests for core and cpu transformer tests...
py.test -m "transformer_dependent and not hetr_only and not flex_only" --boxed \
--junit-xml=testout_test_mkldnn_cpu_$(PY).xml \
$(TEST_OPTS) $(TEST_DIRS)
@echo Running unit tests for hetr dependent transformer tests...
unset http_proxy && \
unset https_proxy && \
unset HTTP_PROXY && \
unset HTTPS_PROXY && \
py.test --transformer hetr -m "transformer_dependent and not flex_only or hetr_only" --boxed \
--junit-xml=testout_test_mkldnn_hetr_$(PY).xml \
--cov-append \
$(TEST_OPTS) $(TEST_DIRS) $(TEST_DIRS_NEON) $(TEST_DIRS_TENSORFLOW) ${TEST_DIRS_ONNX} ${TEST_DIRS_COMMON}
coverage xml -i -o coverage_test_mkldnn_$(PY).xml
test_cpu: export LD_PRELOAD+=:${WARP_CTC_PATH}/libwarpctc.so
test_cpu: export PYTHONHASHSEED=0
test_cpu: test_prepare clean
echo Running unit tests for core and cpu transformer tests...
py.test -m "not hetr_only and not flex_only and not hetr_gpu_only" --boxed \
--junit-xml=testout_test_cpu_$(PY).xml \
$(TEST_OPTS) $(TEST_DIRS) $(TEST_DIRS_NEON) $(TEST_DIRS_TENSORFLOW) ${TEST_DIRS_ONNX} ${TEST_DIRS_COMMON}
coverage xml -i -o coverage_test_cpu_$(PY).xml
test_gpu: export LD_PRELOAD+=:${WARP_CTC_PATH}/libwarpctc.so
test_gpu: export PYTHONHASHSEED=0
test_gpu: gpu_prepare test_prepare clean
echo Running unit tests for gpu dependent transformer tests...
py.test --transformer hetr -m "hetr_gpu_only" \
--boxed \
--junit-xml=testout_test_gpu_hetr_only_$(PY).xml \
$(TEST_OPTS) $(TEST_DIRS)
py.test --transformer gpu -m "transformer_dependent and not flex_only and not hetr_only and \
not separate_execution" \
--boxed $(PARALLEL_OPTS) --junit-xml=testout_test_gpu_tx_dependent_$(PY).xml --cov-append \
$(TEST_OPTS) $(TEST_DIRS) $(TEST_DIRS_NEON) $(TEST_DIRS_TENSORFLOW) ${TEST_DIRS_COMMON}
py.test --transformer gpu -m "transformer_dependent and not flex_only and not hetr_only and \
separate_execution" \
--boxed --junit-xml=testout_test_gpu_tx_dependent_separate_execution_$(PY).xml --cov-append \
$(TEST_OPTS) $(TEST_DIRS)
coverage xml -i -o coverage_test_gpu_$(PY).xml
test_hetr: export LD_PRELOAD+=:${WARP_CTC_PATH}/libwarpctc.so
test_hetr: export PYTHONHASHSEED=0
test_hetr: multinode_prepare test_prepare clean
echo Running unit tests for hetr dependent transformer tests...
unset http_proxy && \
unset https_proxy && \
unset HTTP_PROXY && \
unset HTTPS_PROXY && \
py.test --transformer hetr -m "transformer_dependent and not flex_only or hetr_only" \
--hetr_device cpu --boxed \
--junit-xml=testout_test_hetr_$(PY).xml \
$(TEST_OPTS) $(TEST_DIRS) $(TEST_DIRS_NEON) $(TEST_DIRS_TENSORFLOW) ${TEST_DIRS_ONNX} ${TEST_DIRS_COMMON}
coverage xml -i -o coverage_test_hetr_$(PY).xml
test_mgpu: export LD_PRELOAD+=:${WARP_CTC_PATH}/libwarpctc.so
test_mgpu: export PYTHONHASHSEED=0
test_mgpu: multinode_prepare test_prepare clean
echo Running unit tests for hetr dependent transformer tests...
unset http_proxy && \
unset https_proxy && \
unset HTTP_PROXY && \
unset HTTPS_PROXY && \
py.test --transformer hetr -m multi_device --hetr_device gpu --boxed \
--junit-xml=testout_test_mgpu_$(PY).xml \
$(TEST_OPTS) $(TEST_DIRS) $(TEST_DIRS_NEON) $(TEST_DIRS_TENSORFLOW) ${TEST_DIRS_ONNX} ${TEST_DIRS_COMMON}
coverage xml -i -o coverage_test_mgpu_$(PY).xml
test_mxnet: test_prepare clean
echo Running unit tests for mxnet frontend...
py.test --cov=ngraph \
--junit-xml=testout_test_mxnet_$(PY).xml \
$(TEST_OPTS) $(TEST_DIRS_MXNET)
coverage xml -i coverage_test_mxnet_$(PY).xml
test_cntk: test_prepare clean
echo Running unit tests for cntk frontend...
py.test --cov=ngraph --junit-xml=testout_test_cntk_$(PY).xml $(TEST_OPTS) $(TEST_DIRS_CNTK)
coverage xml -i -o coverage_test_cntk_$(PY).xml
test_caffe2: test_prepare clean
echo Running unit tests for caffe2 frontend...
py.test --cov=ngraph --junit-xml=testout_test_caffe2_$(PY).xml $(TEST_OPTS) $(TEST_DIRS_CAFFE2)
coverage xml -i -o coverage_test_caffe2_$(PY).xml
test_integration: test_prepare clean
echo Running integration tests...
py.test --junit-xml=testout_test_integration_$(PY).xml \
$(TEST_OPTS) $(TEST_DIRS_INTEGRATION)
coverage xml -i coverage_test_integration_$(PY).xml
examples: examples_prepare
for file in `find examples -type f -executable`; do echo Running $$file... ; ./$$file ; done
gpu_examples: examples_prepare gpu_prepare
for file in `find examples -type f -executable | grep -v hetr`; do echo Running $$file... ; ./$$file -b gpu; done
style: test_prepare
flake8 --output-file style.txt --tee $(STYLE_CHECK_OPTS) $(STYLE_CHECK_DIRS)
pylint --reports=n --output-format=colorized --py3k $(PYLINT3K_ARGS) --ignore=.venv *
lint: test_prepare
pylint --output-format=colorized ngraph
lint3k:
pylint --py3k $(PYLINT3K_ARGS) --ignore=.venv *
check: test_prepare
echo "Running style checks. Number of style faults is... "
-flake8 --count $(STYLE_CHECK_OPTS) $(STYLE_CHECK_DIRS) \
> /dev/null
echo
echo "Number of missing docstrings is..."
-pylint --disable=all --enable=missing-docstring -r n \
ngraph | grep "^C" | wc -l
echo
echo "Running unit tests..."
-py.test $(TEST_DIRS) | tail -1 | cut -f 2,3 -d ' '
echo
fixstyle: autopep8
autopep8:
autopep8 -a -a --global-config setup.cfg --in-place `find . -name \*.py`
echo run "git diff" to see what may need to be checked in and "make style" to see what work remains
doc: doc_prepare
$(MAKE) -C $(DOC_DIR) clean
$(MAKE) -C $(DOC_DIR) html
echo "Documentation built in $(DOC_DIR)/build/html"
echo
publish_doc: doc
ifneq (,$(DOC_PUB_HOST))
-cd $(DOC_DIR)/build/html && \
rsync -avz -essh --perms --chmod=ugo+rX . \
$(DOC_PUB_USER)$(DOC_PUB_HOST):$(DOC_PUB_RELEASE_PATH)
-ssh $(DOC_PUB_USER)$(DOC_PUB_HOST) \
'rm -f $(DOC_PUB_PATH)/latest && \
ln -sf $(DOC_PUB_RELEASE_PATH) $(DOC_PUB_PATH)/latest'
else
echo "Can't publish. Ensure DOC_PUB_HOST, DOC_PUB_USER, DOC_PUB_PATH set"
endif
release: check
echo "Bump version number in setup.py"
vi setup.py
echo "Bump version number in doc/source/conf.py"
vi doc/source/conf.py
echo "Update ChangeLog"
vi ChangeLog
echo "TODO (manual steps): release on github and update docs with 'make publish_doc'"
echo
UNAME=$(shell uname)
onnx_dependency:
ifeq ("$(UNAME)", "Darwin")
brew install protobuf
else ifeq ("$(UNAME)", "Linux")
sudo apt-get install protobuf-compiler libprotobuf-dev
endif
viz_prepare:
ifeq ("$(UNAME)", "Darwin")
brew install graphviz
else ifeq ("$(UNAME)", "Linux")
apt-get install graphviz
endif
pip install -r viz_requirements.txt > /dev/null 2>&1