Skip to content
This repository has been archived by the owner on Dec 1, 2021. It is now read-only.

Adding checker of hardware constrain for Conv #639

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions dlk/python/dlk/core/operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -1017,7 +1017,31 @@ def __init__(self,
# if kernel shape is not assigned, estimate kernel shape from input W's shape

def _check_consistency(self) -> None:
"""
This checks the following condition:
1. Kernel size must be 1x1 or 3x3.
2. Max input channel size allowed is 1024.
3. Input channel size is multiple of 32.
"""
super()._check_consistency()
self._assert(self.kernel_shape[0] == self.kernel_shape[1] and self.kernel_shape[0] in (1, 3),
f" Kernel size needs to be 1x1 or 3x3 but got "
f"{self.kernel_shape[0]}x{self.kernel_shape[1]} for {self.name} of {self.op_type}")

if self.input_ops['X'].channel > 1024 or self.channel > 1024:
warnings.warn(warning_sign +
f" Input and output channel size need to be less than 1024, but got "
f"input: {self.input_ops['X'].channel} and output: {self.channel} "
f"for {self.name} of {self.op_type}",
stacklevel=2)
if self.input_ops['W'].op_type in ('QTZ_binary_channel_wise_mean_scaling', 'QTZ_binary_mean_scaling') and \
(self.input_ops['X'].preserve_quantization or
self.input_ops['X'].op_type == 'QTZ_linear_mid_tread_half'):
if self.input_ops['X'].channel % 32 != 0:
warnings.warn(warning_sign +
" Input channel size should be multiple of 32 to have best performance.",
stacklevel=2)

self._assert(len(self.shape) == self._num_dimensions + 2,
f'{self.name} has illegal shape {self.shape}')
self._assert(len(self.kernel_shape) == self._num_dimensions,
Expand Down
4 changes: 2 additions & 2 deletions dlk/tests/test_consistency_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def test_conv_consistency(self) -> None:
w = Constant(
'weight',
Float32(),
np.zeros([1, 2, 2, 3])
np.zeros([1, 1, 1, 3])
)
input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}

Expand All @@ -115,7 +115,7 @@ def test_conv_consistency(self) -> None:
[1, 3, 3, 3],
Float32(),
input_ops,
pads=[1, 1, 2, 2],
pads=[1, 1, 1, 1],
strides=[2, 2]
)

Expand Down
4 changes: 2 additions & 2 deletions dlk/tests/test_dynamic_create_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def test_dynamic_create_binary(self) -> None:
w = Constant(
'const2',
Float32(),
np.zeros([1, 2, 2, 3])
np.zeros([1, 1, 1, 3])
)

binary_ops = [
Expand All @@ -99,7 +99,7 @@ def test_dynamic_create_binary(self) -> None:
module = importlib.import_module('core.operators')
try:
op_def = getattr(module, op)
shape = [1, 2, 2, 3] if op == 'Conv' else shape
shape = [1, 3, 3, 3] if op == 'Conv' else shape
input_ops = {n: opw for n, opw in zip(op_def.input_names, [x, w])} \
if op == 'Conv' else {n: x for n in op_def.input_names}
args = [name, shape, dtype, input_ops]
Expand Down
8 changes: 4 additions & 4 deletions dlk/tests/test_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,22 +40,22 @@ def test_graph_conv(self) -> None:
w = Constant(
'weight',
Float32(),
np.zeros([1, 2, 2, 3])
np.zeros([1, 1, 1, 3])
)

# Conv
conv = Conv(
'conv',
[1, 4, 4, 3],
[1, 5, 5, 3],
Float32(),
{'X': x, 'W': w}, # you can get these keys by 'Conv.input_names'
kernel_shape=[2, 2]
kernel_shape=[1, 1]
)

# One output
y = Output(
'output',
[1, 4, 4, 3],
[1, 5, 5, 3],
Float32(),
{'input': conv} # you can get this key by 'Output.input_names'
)
Expand Down
14 changes: 7 additions & 7 deletions dlk/tests/test_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,23 +64,23 @@ def test_conv(self) -> None:
w = Constant(
'weight',
Float32(),
np.zeros([1, 2, 2, 5])
np.zeros([1, 1, 1, 5])
)
inputs: Dict[str, Operator] = {i_names[0]: x, i_names[1]: w}
c = Conv(
"conv1",
[1, 2, 2, 3],
[1, 3, 3, 3],
Float32(),
inputs,
kernel_shape=[2, 2]
kernel_shape=[1, 1]
)

self.assertEqual(c.batchsize, 1)
self.assertEqual(c.height, 2)
self.assertEqual(c.width, 2)
self.assertEqual(c.height, 3)
self.assertEqual(c.width, 3)
self.assertEqual(c.channel, 3)
self.assertEqual(c.kernel_height, 2)
self.assertEqual(c.kernel_width, 2)
self.assertEqual(c.kernel_height, 1)
self.assertEqual(c.kernel_width, 1)

print("Conv test passed!")

Expand Down
Loading