diff --git a/automated_test.py b/automated_test.py index dc62064..68b7eba 100644 --- a/automated_test.py +++ b/automated_test.py @@ -360,56 +360,56 @@ def test_downsample_segmentation_4x_x(): result, = downsamplefn(result, (1,2,2)) assert result.shape == (1024, 16, 128, 1) -def test_downsample_max_pooling(): - for dtype in (np.int8, np.float32): - cases = [ - np.array([ [ -1, 0 ], [ 0, 0 ] ], dtype=dtype), - np.array([ [ 0, 0 ], [ 0, 0 ] ], dtype=dtype), - np.array([ [ 0, 1 ], [ 0, 0 ] ], dtype=dtype), - np.array([ [ 0, 1 ], [ 1, 0 ] ], dtype=dtype), - np.array([ [ 0, 1 ], [ 0, 2 ] ], dtype=dtype) - ] - - for i in range(len(cases)): - case = cases[i] - result, = tinybrain.downsample.downsample_with_max_pooling(case, (1, 1)) - assert np.all(result == cases[i]) +@pytest.mark.parametrize("dtype", [np.int8, np.float32]) +def test_downsample_max_pooling(dtype): + cases = [ + np.array([ [ -1, 0 ], [ 0, 0 ] ], dtype=dtype), + np.array([ [ 0, 0 ], [ 0, 0 ] ], dtype=dtype), + np.array([ [ 0, 1 ], [ 0, 0 ] ], dtype=dtype), + np.array([ [ 0, 1 ], [ 1, 0 ] ], dtype=dtype), + np.array([ [ 0, 1 ], [ 0, 2 ] ], dtype=dtype) + ] - answers = [ 0, 0, 1, 1, 2 ] + for i in range(len(cases)): + case = cases[i] + result, = tinybrain.downsample.downsample_with_max_pooling(case, (1, 1)) + assert np.all(result == cases[i]) - for i in range(len(cases)): - case = cases[i] - result, = tinybrain.downsample.downsample_with_max_pooling(case, (2, 2)) - assert result == answers[i] + answers = [ 0, 0, 1, 1, 2 ] + for i in range(len(cases)): + case = cases[i] + result, = tinybrain.downsample.downsample_with_max_pooling(case, (2, 2)) + assert result == answers[i] - cast = lambda arr: np.array(arr, dtype=dtype) - answers = list(map(cast, [ - [[ 0, 0 ]], - [[ 0, 0 ]], - [[ 0, 1 ]], - [[ 1, 1 ]], - [[ 0, 2 ]], - ])) + cast = lambda arr: np.array(arr, dtype=dtype) - for i in range(len(cases)): - case = cases[i] - result, = tinybrain.downsample.downsample_with_max_pooling(case, (2, 1)) - assert np.all(result == answers[i]) + answers = list(map(cast, [ + [[ 0, 0 ]], + [[ 0, 0 ]], + [[ 0, 1 ]], + [[ 1, 1 ]], + [[ 0, 2 ]], + ])) - answers = list(map(cast, [ - [[ 0 ], [ 0 ]], - [[ 0 ], [ 0 ]], - [[ 1 ], [ 0 ]], - [[ 1 ], [ 1 ]], - [[ 1 ], [ 2 ]], - ])) + for i in range(len(cases)): + case = cases[i] + result, = tinybrain.downsample.downsample_with_max_pooling(case, (2, 1)) + assert np.all(result == answers[i]) + + answers = list(map(cast, [ + [[ 0 ], [ 0 ]], + [[ 0 ], [ 0 ]], + [[ 1 ], [ 0 ]], + [[ 1 ], [ 1 ]], + [[ 1 ], [ 2 ]], + ])) - for i in range(len(cases)): - case = cases[i] - result, = tinybrain.downsample.downsample_with_max_pooling(case, (1, 2)) - assert np.all(result == answers[i]) + for i in range(len(cases)): + case = cases[i] + result, = tinybrain.downsample.downsample_with_max_pooling(case, (1, 2)) + assert np.all(result == answers[i]) result, = tinybrain.downsample.downsample_with_max_pooling(image4x4x4, (2, 2, 2)) answer = cast([ @@ -449,6 +449,57 @@ def test_downsample_max_pooling(): assert np.all(result == answer) +@pytest.mark.parametrize("dtype", [np.int8, np.float32]) +def test_downsample_min_pooling(dtype): + cases = [ + np.array([ [ -1, 0 ], [ 0, 0 ] ], dtype=dtype), + np.array([ [ 0, 0 ], [ 0, 0 ] ], dtype=dtype), + np.array([ [ 0, 1 ], [ 0, 0 ] ], dtype=dtype), + np.array([ [ 0, 1 ], [ 1, 0 ] ], dtype=dtype), + np.array([ [ 0, 1 ], [ 0, 2 ] ], dtype=dtype) + ] + + for i in range(len(cases)): + case = cases[i] + result, = tinybrain.downsample.downsample_with_min_pooling(case, (1, 1)) + assert np.all(result == cases[i]) + + answers = [ -1, 0, 0, 0, 0 ] + + for i in range(len(cases)): + case = cases[i] + result, = tinybrain.downsample.downsample_with_min_pooling(case, (2, 2)) + assert result == answers[i] + + cast = lambda arr: np.array(arr, dtype=dtype) + + answers = list(map(cast, [ + [[ -1, 0 ]], + [[ 0, 0 ]], + [[ 0, 0 ]], + [[ 0, 0 ]], + [[ 0, 1 ]], + ])) + + for i in range(len(cases)): + case = cases[i] + result, = tinybrain.downsample.downsample_with_min_pooling(case, (2, 1)) + print(result) + assert np.all(result == answers[i]) + + answers = list(map(cast, [ + [[ -1 ], [ 0 ]], + [[ 0 ], [ 0 ]], + [[ 0 ], [ 0 ]], + [[ 0 ], [ 0 ]], + [[ 0 ], [ 0 ]], + ])) + + for i in range(len(cases)): + case = cases[i] + result, = tinybrain.downsample.downsample_with_min_pooling(case, (1, 2)) + assert np.all(result == answers[i]) + def test_countless3d(): def test_all_cases(fn): alldifferent = [ diff --git a/tinybrain/downsample.py b/tinybrain/downsample.py index 38f7c50..4300f61 100644 --- a/tinybrain/downsample.py +++ b/tinybrain/downsample.py @@ -154,11 +154,25 @@ def downsample_with_max_pooling(array, factor, num_mips=1): """ results = [] for mip in range(num_mips): - array = _downsample_with_max_pooling(array, factor) + array = _downsample_with(array, factor, fn=np.maximum) results.append(array) return results -def _downsample_with_max_pooling(array, factor): +def downsample_with_min_pooling(array, factor, num_mips=1): + """ + Downsample by picking the minimum value within a + cuboid specified by factor. That is, a reduction factor + of 2x2 works by summarizing many 2x2 cuboids. If factor's + length is smaller than array.shape, the remaining factors will + be filled with 1. + """ + results = [] + for mip in range(num_mips): + array = _downsample_with(array, factor, fn=np.minimum) + results.append(array) + return results + +def _downsample_with(array, factor, fn): """ Downsample by picking the maximum value within a cuboid specified by factor. That is, a reduction factor @@ -179,7 +193,7 @@ def _downsample_with_max_pooling(array, factor): output = sections[0].copy() for section in sections[1:]: - np.maximum(output, section, output) + fn(output, section, output) return output