diff --git a/dgp/annotations/camera_transforms.py b/dgp/annotations/camera_transforms.py index 39ed070a..2bba670e 100644 --- a/dgp/annotations/camera_transforms.py +++ b/dgp/annotations/camera_transforms.py @@ -780,7 +780,7 @@ def _calc_A( box = [newx / 2, 0, w - newx / 2, h] else: newy = h - w * aspect_ratio - box = [0, newy / 2, w, h - newy] + box = [0, newy / 2, w, h - newy / 2] return box_crop_affine_transform(box, self.shape) diff --git a/requirements.txt b/requirements.txt index b4f051f6..ff578255 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ botocore Click>=7.1.2 diskcache>=4.1.0 protobuf>=3.20.1,<4.0.0 -matplotlib>=3.0.3,<4.0 +matplotlib>=3.0.3,<3.9.0 opencv-python>=4.5.3.56 Pillow-SIMD>=8.3.2 pycocotools>=2.0.0 diff --git a/tests/test_camera_transforms.py b/tests/test_camera_transforms.py index 02c76992..317c7617 100644 --- a/tests/test_camera_transforms.py +++ b/tests/test_camera_transforms.py @@ -1,9 +1,11 @@ # Copyright 2021-2022 Woven Planet. All rights reserved. import os import unittest +from typing import OrderedDict import cv2 import numpy as np +import PIL from dgp.annotations.camera_transforms import ( AffineCameraTransform, @@ -18,6 +20,7 @@ from dgp.annotations.ontology import KeyLineOntology, KeyPointOntology from dgp.datasets.synchronized_dataset import SynchronizedSceneDataset from dgp.proto.ontology_pb2 import Ontology as OntologyV2Pb2 +from dgp.utils.pose import Pose from dgp.utils.structures.key_line_2d import KeyLine2D from dgp.utils.structures.key_point_2d import KeyPoint2D from dgp.utils.visualization_utils import visualize_cameras @@ -428,6 +431,28 @@ def test_crop_scale_transform(self): assert_almost_equal(cam_datum, cam_datum3, valid_region=valid_region) + def test_crop_scale_transform_simple(self): + """Simple test case for crop scale transform with fixed input""" + + datum = OrderedDict({ + 'datum_name': 'test', + 'datum_type': 'image', + 'pose': Pose(), + 'extrinsics': Pose(), + 'intrinsics': np.array([ + [1976.45, 0, 2692 / 2], + [0, 1977.05, 1836 / 2], + [0, 0, 1.0], + ]), + 'rgb': PIL.Image.new('RGB', (2692, 1836)), + }) + + target_shape = (544, 960) + tr = CropScaleTransform(target_shape=target_shape, fix_h=False) + datum2 = tr(datum) + assert np.isclose(datum2['intrinsics'][0, 0], 704.82, atol=1e-3) + assert np.isclose(datum2['intrinsics'][1, 1], 705.04, atol=1e-3) + def test_composite_transform(self): """Test that we can compose transforms correctly. We test that we can get the same datum by applying multiple transformation consecutively vs all at once.