From c0d4eb310b4aab4915a655f7545a2aa8bf983e50 Mon Sep 17 00:00:00 2001 From: DocGarbanzo <47540921+DocGarbanzo@users.noreply.github.com> Date: Tue, 20 Jun 2023 22:19:11 +0100 Subject: [PATCH] Upgrade to python 3.9 and TF 2.9 (#1119) * Updating TF to 2.9 and python to 3.9 * On Jetson 5.0.X use system python 3.8 with Nvidia's TF 2.9 * Tensorrt models cannot be build on the PC any longer, but can be created at runtime on the Jetson during first inference. This might be slow. * Update gh actions accordingly * Small updates in the UI * On RPi use 64-bit Bullseye using Picamera2 * Switch to using savedmodel format by default instead of legacy h5 * Factor out predict method into interpreter base class, as now TF interfaces between different interpreters are aligned and can all be called by input dictionary. * Complying with pep-440 for version numbering of dev versions --- .github/workflows/python-package-conda.yml | 6 +- donkeycar/__init__.py | 4 +- donkeycar/management/base.py | 2 +- donkeycar/management/kivy_ui.py | 60 +++-- donkeycar/management/makemovie.py | 5 +- donkeycar/management/ui.kv | 7 +- donkeycar/parts/camera.py | 85 ++++--- donkeycar/parts/datastore.py | 2 +- donkeycar/parts/datastore_v2.py | 23 +- donkeycar/parts/fastai.py | 9 +- donkeycar/parts/interpreter.py | 265 +++++++++++---------- donkeycar/parts/keras.py | 132 +++++----- donkeycar/parts/network.py | 10 +- donkeycar/parts/tub_v2.py | 5 + donkeycar/parts/web_controller/web.py | 34 +-- donkeycar/pipeline/database.py | 7 +- donkeycar/pipeline/sequence.py | 45 +++- donkeycar/pipeline/training.py | 31 ++- donkeycar/pipeline/types.py | 5 + donkeycar/templates/basic.py | 13 +- donkeycar/templates/cfg_basic.py | 2 + donkeycar/templates/cfg_complete.py | 2 + donkeycar/templates/complete.py | 3 +- donkeycar/templates/simulator.py | 3 +- donkeycar/tests/test_keras.py | 39 ++- donkeycar/tests/test_kinematics.py | 2 +- donkeycar/tests/test_scripts.py | 2 +- donkeycar/tests/test_torch.py | 9 +- donkeycar/tests/test_train.py | 4 +- donkeycar/utils.py | 4 +- install/envs/jetson.yml | 41 ++++ install/envs/jetson46.yml | 34 +++ install/envs/mac.yml | 9 +- install/envs/ubuntu.yml | 10 +- install/envs/windows.yml | 6 +- install/pi/install.sh | 1 - scripts/convert_to_tflite.py | 40 ++++ setup.py | 34 ++- 38 files changed, 616 insertions(+), 379 deletions(-) create mode 100644 install/envs/jetson.yml create mode 100644 install/envs/jetson46.yml create mode 100755 scripts/convert_to_tflite.py diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 9c6c84ef2..a50c08a8f 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -22,11 +22,11 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v3 - - name: Create python 3.7 conda env + - name: Create python 3.9 conda env uses: conda-incubator/setup-miniconda@v2 with: - python-version: 3.7 - mamba-version: "*" + python-version: 3.9 + mamba-version: 1.3 # "*" activate-environment: donkey environment-file: ${{matrix.ENV_FILE}} auto-activate-base: false diff --git a/donkeycar/__init__.py b/donkeycar/__init__.py index f13cc178c..6338ce4dc 100644 --- a/donkeycar/__init__.py +++ b/donkeycar/__init__.py @@ -14,8 +14,8 @@ print(f.renderText('Donkey Car')) print(f'using donkey v{__version__} ...') -if sys.version_info.major < 3 or sys.version_info.minor < 6: - msg = f'Donkey Requires Python 3.6 or greater. You are using {sys.version}' +if sys.version_info.major < 3 or sys.version_info.minor < 8: + msg = f'Donkey Requires Python 3.8 or greater. You are using {sys.version}' raise ValueError(msg) # The default recursion limits in CPython are too small. diff --git a/donkeycar/management/base.py b/donkeycar/management/base.py index da5e649f5..130353ac5 100644 --- a/donkeycar/management/base.py +++ b/donkeycar/management/base.py @@ -480,8 +480,8 @@ def plot_predictions(self, cfg, tub_paths, model_path, limit, model_type, pilot_angles.append(pilot_angle) pilot_throttles.append(pilot_throttle) bar.next() - print() # to break the line after progress bar finishes. + bar.finish() angles_df = pd.DataFrame({'user_angle': user_angles, 'pilot_angle': pilot_angles}) throttles_df = pd.DataFrame({'user_throttle': user_throttles, diff --git a/donkeycar/management/kivy_ui.py b/donkeycar/management/kivy_ui.py index 4930a9b42..6842ccaf3 100644 --- a/donkeycar/management/kivy_ui.py +++ b/donkeycar/management/kivy_ui.py @@ -716,7 +716,7 @@ def on_model_type(self, obj, model_type): if 'tflite' in self.model_type: self.filters = ['*.tflite'] elif 'tensorrt' in self.model_type: - self.filters = ['*.trt'] + self.filters = ['*.trt', '*.savedmodel'] else: self.filters = ['*.h5', '*.savedmodel'] @@ -971,7 +971,7 @@ class DataFrameLabel(Label): class TransferSelector(BoxLayout, FileChooserBase): """ Class to select transfer model""" - filters = ['*.h5'] + filters = ['*.h5', '*.savedmodel'] class TrainScreen(Screen): @@ -980,13 +980,24 @@ class TrainScreen(Screen): database = ObjectProperty() pilot_df = ObjectProperty(force_dispatch=True) tub_df = ObjectProperty(force_dispatch=True) + train_checker = False - def train_call(self, model_type, *args): - # remove car directory from path + def train_call(self, *args): tub_path = tub_screen().ids.tub_loader.tub.base_path transfer = self.ids.transfer_spinner.text + model_type = self.ids.train_spinner.text if transfer != 'Choose transfer model': - transfer = os.path.join(self.config.MODELS_PATH, transfer + '.h5') + h5 = os.path.join(self.config.MODELS_PATH, transfer + '.h5') + sm = os.path.join(self.config.MODELS_PATH, transfer + '.savedmodel') + if os.path.exists(sm): + transfer = sm + elif os.path.exists(h5): + transfer = h5 + else: + transfer = None + self.ids.status.text = \ + f'Could find neither {sm} nor {trans_h5} - training ' \ + f'without transfer' else: transfer = None try: @@ -994,20 +1005,32 @@ def train_call(self, model_type, *args): model_type=model_type, transfer=transfer, comment=self.ids.comment.text) - self.ids.status.text = f'Training completed.' - self.ids.comment.text = 'Comment' - self.ids.transfer_spinner.text = 'Choose transfer model' - self.reload_database() except Exception as e: Logger.error(e) self.ids.status.text = f'Train failed see console' - finally: - self.ids.train_button.state = 'normal' - def train(self, model_type): + def train(self): self.config.SHOW_PLOT = False - Thread(target=self.train_call, args=(model_type,)).start() - self.ids.status.text = f'Training started.' + t = Thread(target=self.train_call) + self.ids.status.text = 'Training started.' + + def func(dt): + t.start() + + def check_training_done(dt): + if not t.is_alive(): + self.train_checker.cancel() + self.ids.comment.text = 'Comment' + self.ids.transfer_spinner.text = 'Choose transfer model' + self.ids.train_button.state = 'normal' + self.ids.status.text = 'Training completed.' + self.ids.train_button.disabled = False + self.reload_database() + + # schedules the call after the current frame + Clock.schedule_once(func, 0) + # checks if training finished and updates the window if + self.train_checker = Clock.schedule_interval(check_training_done, 0.5) def set_config_attribute(self, input): try: @@ -1197,19 +1220,20 @@ def connected(self, event): self.is_connected = False if return_val is None: # command still running, do nothing and check next time again - status = 'Awaiting connection...' + status = 'Awaiting connection to...' self.ids.connected.color = 0.8, 0.8, 0.0, 1 else: # command finished, check if successful and reset connection if return_val == 0: - status = 'Connected' + status = 'Connected to' self.ids.connected.color = 0, 0.9, 0, 1 self.is_connected = True else: - status = 'Disconnected' + status = 'Disconnected from' self.ids.connected.color = 0.9, 0, 0, 1 self.connection = None - self.ids.connected.text = status + self.ids.connected.text \ + = f'{status} {getattr(self.config, "PI_HOSTNAME")}' def drive(self): model_args = '' diff --git a/donkeycar/management/makemovie.py b/donkeycar/management/makemovie.py index bcc68ef66..40b1047ed 100755 --- a/donkeycar/management/makemovie.py +++ b/donkeycar/management/makemovie.py @@ -104,7 +104,8 @@ def draw_user_input(self, record, img, img_drawon): user_angle = float(record["user/angle"]) user_throttle = float(record["user/throttle"]) green = (0, 255, 0) - self.draw_line_into_image(user_angle, user_throttle, False, img_drawon, green) + self.draw_line_into_image(user_angle, user_throttle, False, + img_drawon, green) def draw_model_prediction(self, img, img_drawon): """ @@ -114,7 +115,7 @@ def draw_model_prediction(self, img, img_drawon): if self.keras_part is None: return - expected = tuple(self.keras_part.get_input_shapes()[0][1:]) + expected = tuple(self.keras_part.get_input_shape('img_in')[1:]) actual = img.shape # if model expects grey-scale but got rgb, covert diff --git a/donkeycar/management/ui.kv b/donkeycar/management/ui.kv index 034be84af..96d72e9a4 100644 --- a/donkeycar/management/ui.kv +++ b/donkeycar/management/ui.kv @@ -588,8 +588,9 @@ ToggleButton: id: train_button text: 'Training running...' if self.state == 'down' else 'Train' - on_press: root.train(train_spinner.text) - + on_press: + root.train() + self.disabled = True ScrollableLabel: DataFrameLabel: id: scroll_pilots @@ -604,7 +605,7 @@ spacing: layout_pad_x MySpinner: id: delete_spinner - text: 'Pilot' + text_autoupdate: True Button: id: delete_btn on_press: diff --git a/donkeycar/parts/camera.py b/donkeycar/parts/camera.py index 180cc5428..1377089c7 100644 --- a/donkeycar/parts/camera.py +++ b/donkeycar/parts/camera.py @@ -9,29 +9,37 @@ logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) + class CameraError(Exception): pass + class BaseCamera: def run_threaded(self): return self.frame -class PiCamera(BaseCamera): - def __init__(self, image_w=160, image_h=120, image_d=3, framerate=20, vflip=False, hflip=False): - from picamera.array import PiRGBArray - from picamera import PiCamera - resolution = (image_w, image_h) - # initialize the camera and stream - self.camera = PiCamera() #PiCamera gets resolution (height, width) - self.camera.resolution = resolution - self.camera.framerate = framerate - self.camera.vflip = vflip - self.camera.hflip = hflip - self.rawCapture = PiRGBArray(self.camera, size=resolution) - self.stream = self.camera.capture_continuous(self.rawCapture, - format="rgb", use_video_port=True) +class PiCamera(BaseCamera): + """ + RPi Camera class based on Bullseye's python class Picamera2. + """ + def __init__(self, image_w=160, image_h=120, image_d=3, + vflip=False, hflip=False): + from picamera2 import Picamera2 + from libcamera import Transform + + # it's weird but BGR returns RGB images + config_dict = {"size": (image_w, image_h), "format": "BGR888"} + transform = Transform(hflip=hflip, vflip=vflip) + self.camera = Picamera2() + config = self.camera.create_preview_configuration( + config_dict, transform=transform) + self.camera.align_configuration(config) + self.camera.configure(config) + # try min / max frame rate as 0.1 / 1 ms (it will be slower though) + self.camera.set_controls({"FrameDurationLimits": (100, 1000)}) + self.camera.start() # initialize the frame and the variable used to indicate # if the thread should be stopped @@ -40,31 +48,23 @@ def __init__(self, image_w=160, image_h=120, image_d=3, framerate=20, vflip=Fals self.image_d = image_d # get the first frame or timeout - logger.info('PiCamera loaded...') - if self.stream is not None: - logger.info('PiCamera opened...') - warming_time = time.time() + 5 # quick after 5 seconds - while self.frame is None and time.time() < warming_time: - logger.info("...warming camera") - self.run() - time.sleep(0.2) + logger.info('PiCamera opened...') + warming_time = time.time() + 5 # quick after 5 seconds + while self.frame is None and time.time() < warming_time: + logger.info("...warming camera") + self.run() + time.sleep(0.2) + + if self.frame is None: + raise CameraError("Unable to start PiCamera.") - if self.frame is None: - raise CameraError("Unable to start PiCamera.") - else: - raise CameraError("Unable to open PiCamera.") logger.info("PiCamera ready.") def run(self): - # grab the frame from the stream and clear the stream in - # preparation for the next frame - if self.stream is not None: - f = next(self.stream) - if f is not None: - self.frame = f.array - self.rawCapture.truncate(0) - if self.image_d == 1: - self.frame = rgb2gray(self.frame) + # grab the next frame from the camera buffer + self.frame = self.camera.capture_array("main") + if self.image_d == 1: + self.frame = rgb2gray(self.frame) return self.frame @@ -78,16 +78,13 @@ def shutdown(self): self.on = False logger.info('Stopping PiCamera') time.sleep(.5) - self.stream.close() - self.rawCapture.close() self.camera.close() - self.stream = None - self.rawCapture = None self.camera = None class Webcam(BaseCamera): - def __init__(self, image_w=160, image_h=120, image_d=3, framerate = 20, camera_index = 0): + def __init__(self, image_w=160, image_h=120, image_d=3, + framerate=20, camera_index=0): # # pygame is not installed by default. # Installation on RaspberryPi (with env activated): @@ -270,18 +267,20 @@ def shutdown(self): self.running = False logger.info('Stopping CSICamera') time.sleep(.5) - del(self.camera) + del self.camera class V4LCamera(BaseCamera): ''' - uses the v4l2capture library from this fork for python3 support: https://github.com/atareao/python3-v4l2capture + uses the v4l2capture library from this fork for python3 support: + https://github.com/atareao/python3-v4l2capture sudo apt-get install libv4l-dev cd python3-v4l2capture python setup.py build pip install -e . ''' - def __init__(self, image_w=160, image_h=120, image_d=3, framerate=20, dev_fn="/dev/video0", fourcc='MJPG'): + def __init__(self, image_w=160, image_h=120, image_d=3, framerate=20, + dev_fn="/dev/video0", fourcc='MJPG'): self.running = True self.frame = None diff --git a/donkeycar/parts/datastore.py b/donkeycar/parts/datastore.py index 87d455c41..28b3c8349 100644 --- a/donkeycar/parts/datastore.py +++ b/donkeycar/parts/datastore.py @@ -219,7 +219,7 @@ def put_record(self, data): elif typ in ['str', 'float', 'int', 'boolean', 'vector']: json_data[key] = val - elif typ is 'image': + elif typ == 'image': path = self.make_file_path(key) val.save(path) json_data[key]=path diff --git a/donkeycar/parts/datastore_v2.py b/donkeycar/parts/datastore_v2.py index 8bc359d35..ac48bf503 100644 --- a/donkeycar/parts/datastore_v2.py +++ b/donkeycar/parts/datastore_v2.py @@ -247,18 +247,21 @@ def __init__(self, base_path, inputs=[], types=[], metadata=[], has_catalogs = False if self.manifest_path.exists(): - self.seekeable = Seekable(self.manifest_path, read_only=self.read_only) + self.seekeable = Seekable(self.manifest_path, + read_only=self.read_only) if self.seekeable.has_content(): self._read_contents() has_catalogs = len(self.catalog_paths) > 0 - + logger.info(f'Found datastore at {self.base_path.as_posix()}') else: created_at = time.time() self.manifest_metadata['created_at'] = created_at if not self.base_path.exists(): self.base_path.mkdir(parents=True, exist_ok=True) - print(f'Created a new datastore at {self.base_path.as_posix()}') - self.seekeable = Seekable(self.manifest_path, read_only=self.read_only) + logger.info(f'Created a new datastore at' + f' {self.base_path.as_posix()}') + self.seekeable = Seekable(self.manifest_path, + read_only=self.read_only) if not has_catalogs: self._write_contents() @@ -266,7 +269,7 @@ def __init__(self, base_path, inputs=[], types=[], metadata=[], else: last_known_catalog = os.path.join(self.base_path, self.catalog_paths[-1]) - print(f'Using catalog {last_known_catalog}') + logger.info(f'Using last catalog {last_known_catalog}') self.current_catalog = Catalog(last_known_catalog, read_only=self.read_only, start_index=self.current_index) @@ -296,6 +299,9 @@ def delete_records(self, record_indexes): record_indexes = {record_indexes} self.deleted_indexes.update(record_indexes) self._update_catalog_metadata(update=True) + if record_indexes: + logger.info(f'Deleted records {min(record_indexes)} - ' + f'{max(record_indexes)}') def restore_records(self, record_indexes): # Does not actually delete the record, but marks it as deleted. @@ -303,6 +309,9 @@ def restore_records(self, record_indexes): record_indexes = {record_indexes} self.deleted_indexes.difference_update(record_indexes) self._update_catalog_metadata(update=True) + if record_indexes: + logger.info(f'Restored records {min(record_indexes)} - ' + f'{max(record_indexes)}') def _add_catalog(self): current_length = len(self.catalog_paths) @@ -357,7 +366,7 @@ def _update_catalog_metadata(self, update=True): catalog_metadata['paths'] = self.catalog_paths catalog_metadata['current_index'] = self.current_index catalog_metadata['max_len'] = self.max_len - catalog_metadata['deleted_indexes'] = list(self.deleted_indexes) + catalog_metadata['deleted_indexes'] = sorted(list(self.deleted_indexes)) self.catalog_metadata = catalog_metadata self.seekeable.writeline(json.dumps(catalog_metadata)) @@ -439,7 +448,7 @@ def __next__(self): record = json.loads(contents) return record except Exception: - print(f'Ignoring record at index {current_index}') + logger.error(f'Failed loading record {current_index}') continue else: self.current_catalog = None diff --git a/donkeycar/parts/fastai.py b/donkeycar/parts/fastai.py index ead252eef..f7b9a8523 100644 --- a/donkeycar/parts/fastai.py +++ b/donkeycar/parts/fastai.py @@ -84,8 +84,8 @@ def set_optimizer(self, optimizer_type: str, self.interpreter.set_optimizer(optimizer) # shape - def get_input_shapes(self): - return self.interpreter.get_input_shapes() + def get_input_shape(self, input_name): + return self.interpreter.get_input_shape(input_name) def seq_size(self) -> int: return 0 @@ -237,12 +237,13 @@ def y_transform(self, record: Union[TubRecord, List[TubRecord]]) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img')[1:] + return img_shape class Linear(nn.Module): def __init__(self): - super(Linear, self).__init__() + super().__init__() self.dropout = 0.1 # init the layers self.conv24 = nn.Conv2d(3, 24, kernel_size=(5, 5), stride=(2, 2)) diff --git a/donkeycar/parts/interpreter.py b/donkeycar/parts/interpreter.py index d2912e190..b4cf1bd5a 100755 --- a/donkeycar/parts/interpreter.py +++ b/donkeycar/parts/interpreter.py @@ -6,17 +6,24 @@ import tensorflow as tf from tensorflow import keras - -from tensorflow.python.framework.convert_to_constants import \ - convert_variables_to_constants_v2 as convert_var_to_const from tensorflow.python.saved_model import tag_constants, signature_constants +from tensorflow.python.compiler.tensorrt import trt_convert as trt logger = logging.getLogger(__name__) +def has_trt_support(): + try: + converter = trt.TrtGraphConverterV2() + return True + except RuntimeError as e: + logger.warning(e) + return False + + def keras_model_to_tflite(in_filename, out_filename, data_gen=None): logger.info(f'Convert model {in_filename} to TFLite {out_filename}') - model = tf.keras.models.load_model(in_filename) + model = tf.keras.models.load_model(in_filename, compile=False) keras_to_tflite(model, out_filename, data_gen) logger.info('TFLite conversion done.') @@ -49,31 +56,29 @@ def keras_to_tflite(model, out_filename, data_gen=None): open(out_filename, "wb").write(tflite_model) -def saved_model_to_tensor_rt(saved_path: str, tensor_rt_path: str): +def saved_model_to_tensor_rt(saved_path: str, tensor_rt_path: str) -> bool: """ Converts TF SavedModel format into TensorRT for cuda. Note, this works also without cuda as all GPU specific magic is handled within TF now. """ logger.info(f'Converting SavedModel {saved_path} to TensorRT' f' {tensor_rt_path}') - from tensorflow.python.compiler.tensorrt import trt_convert as trt - - params = trt.DEFAULT_TRT_CONVERSION_PARAMS - params = params._replace(max_workspace_size_bytes=(1 << 32)) - params = params._replace(precision_mode="FP16") - params = params._replace(maximum_cached_engines=100) try: - converter = trt.TrtGraphConverterV2( - input_saved_model_dir=saved_path, - conversion_params=params) + converter = trt.TrtGraphConverterV2(input_saved_model_dir=saved_path) converter.convert() converter.save(tensor_rt_path) logger.info(f'TensorRT conversion done.') + return True except Exception as e: logger.error(f'TensorRT conversion failed because: {e}') + return False class Interpreter(ABC): """ Base class to delegate between Keras, TFLite and TensorRT """ + def __init__(self): + self.input_keys: list[str] = None + self.output_keys: list[str] = None + self.shapes: tuple[dict] = None @abstractmethod def load(self, model_path: str) -> None: @@ -93,13 +98,19 @@ def compile(self, **kwargs): raise NotImplementedError('Requires implementation') @abstractmethod - def get_input_shapes(self) -> List[tf.TensorShape]: + def get_input_shape(self, input_name) -> tf.TensorShape: pass - @abstractmethod - def predict(self, img_arr: np.ndarray, other_arr: np.ndarray) \ + def predict(self, img_arr: np.ndarray, *other_arr: np.ndarray) \ -> Sequence[Union[float, np.ndarray]]: - pass + """ + This inference interface just converts the inputs into a dictionary + :param img_arr: input image array + :param other_arr: second input array + :return: model output, Iterable over scalar and/or vectors + """ + input_dict = dict(zip(self.input_keys, (img_arr, *other_arr))) + return self.predict_from_dict(input_dict) def predict_from_dict(self, input_dict) -> Sequence[Union[float, np.ndarray]]: pass @@ -120,20 +131,36 @@ def __init__(self): def set_model(self, pilot: 'KerasPilot') -> None: self.model = pilot.create_model() + # input_shape and output_shape in keras are unfortunately not a list + # if there is only a single input / output. So pack them into a list + # if they are single: + input_shape = self.model.input_shape + if type(input_shape) is not list: + input_shape = [input_shape] + output_shape = self.model.output_shape + if type(output_shape) is not list: + output_shape = [output_shape] + + self.input_keys = self.model.input_names + self.output_keys = self.model.output_names + self.shapes = (dict(zip(self.input_keys, input_shape)), + dict(zip(self.output_keys, output_shape))) def set_optimizer(self, optimizer: tf.keras.optimizers.Optimizer) -> None: self.model.optimizer = optimizer - def get_input_shapes(self) -> List[tf.TensorShape]: + def get_input_shape(self, input_name) -> tf.TensorShape: assert self.model, 'Model not set' - return [inp.shape for inp in self.model.inputs] + return self.shapes[0][input_name] def compile(self, **kwargs): assert self.model, 'Model not set' self.model.compile(**kwargs) - def invoke(self, inputs): - outputs = self.model(inputs, training=False) + def predict_from_dict(self, input_dict): + for k, v in input_dict.items(): + input_dict[k] = self.expand_and_convert(v) + outputs = self.model(input_dict, training=False) # for functional models the output here is a list if type(outputs) is list: # as we invoke the interpreter with a batch size of one we remove @@ -144,20 +171,6 @@ def invoke(self, inputs): else: return outputs.numpy().squeeze(axis=0) - def predict(self, img_arr: np.ndarray, other_arr: np.ndarray) \ - -> Sequence[Union[float, np.ndarray]]: - img_arr = np.expand_dims(img_arr, axis=0) - inputs = img_arr - if other_arr is not None: - other_arr = np.expand_dims(other_arr, axis=0) - inputs = [img_arr, other_arr] - return self.invoke(inputs) - - def predict_from_dict(self, input_dict): - for k, v in input_dict.items(): - input_dict[k] = np.expand_dims(v, axis=0) - return self.invoke(input_dict) - def load(self, model_path: str) -> None: logger.info(f'Loading model {model_path}') self.model = keras.models.load_model(model_path, compile=False) @@ -170,14 +183,19 @@ def load_weights(self, model_path: str, by_name: bool = True) -> \ def summary(self) -> str: return self.model.summary() + @staticmethod + def expand_and_convert(arr): + """ Helper function. """ + # expand each input to shape from [x, y, z] to [1, x, y, z] and + arr_exp = np.expand_dims(arr, axis=0) + return arr_exp + class FastAIInterpreter(Interpreter): def __init__(self): super().__init__() - self.model: None - from fastai import learner as fastai_learner - from fastai import optimizer as fastai_optimizer + self.model = None def set_model(self, pilot: 'FastAiPilot') -> None: self.model = pilot.create_model() @@ -185,9 +203,9 @@ def set_model(self, pilot: 'FastAiPilot') -> None: def set_optimizer(self, optimizer: 'fastai_optimizer') -> None: self.model.optimizer = optimizer - def get_input_shapes(self): + def get_input_shape(self, input_name): assert self.model, 'Model not set' - return [inp.shape for inp in self.model.inputs] + return self.model.inputs[0].shape def compile(self, **kwargs): pass @@ -209,7 +227,6 @@ def predict(self, img_arr: np.ndarray, other_arr: np.ndarray) \ import torch inputs = torch.unsqueeze(img_arr, 0) if other_arr is not None: - #other_arr = np.expand_dims(other_arr, axis=0) inputs = [img_arr, other_arr] return self.invoke(inputs) @@ -238,65 +255,45 @@ class TfLite(Interpreter): def __init__(self): super().__init__() self.interpreter = None - self.input_shapes = None - self.input_details = None - self.output_details = None + self.runner = None + self.signatures = None def load(self, model_path): assert os.path.splitext(model_path)[1] == '.tflite', \ 'TFlitePilot should load only .tflite files' logger.info(f'Loading model {model_path}') - # Load TFLite model and allocate tensors. + # Load TFLite model and extract input and output keys self.interpreter = tf.lite.Interpreter(model_path=model_path) - self.interpreter.allocate_tensors() - - # Get input and output tensors. - self.input_details = self.interpreter.get_input_details() - self.output_details = self.interpreter.get_output_details() - - # Get Input shape - self.input_shapes = [] - logger.info('Load model with tflite input tensor details:') - for detail in self.input_details: - logger.debug(detail) - self.input_shapes.append(detail['shape']) + self.signatures = self.interpreter.get_signature_list() + self.runner = self.interpreter.get_signature_runner() + self.input_keys = self.signatures['serving_default']['inputs'] + self.output_keys = self.signatures['serving_default']['outputs'] def compile(self, **kwargs): pass - def invoke(self) -> Sequence[Union[float, np.ndarray]]: - self.interpreter.invoke() - outputs = [] - for tensor in self.output_details: - output_data = self.interpreter.get_tensor(tensor['index']) - # as we invoke the interpreter with a batch size of one we remove - # the additional dimension here again - outputs.append(output_data[0]) - # don't return list if output is 1d - return outputs if len(outputs) > 1 else outputs[0] - - def predict(self, img_arr, other_arr) \ - -> Sequence[Union[float, np.ndarray]]: - assert self.input_shapes and self.input_details, \ - "Tflite model not loaded" - input_arrays = (img_arr, other_arr) - for arr, shape, detail \ - in zip(input_arrays, self.input_shapes, self.input_details): - in_data = arr.reshape(shape).astype(np.float32) - self.interpreter.set_tensor(detail['index'], in_data) - return self.invoke() - def predict_from_dict(self, input_dict): - for detail in self.input_details: - k = detail['name'] - inp_k = input_dict[k] - inp_k_res = inp_k.reshape(detail['shape']).astype(np.float32) - self.interpreter.set_tensor(detail['index'], inp_k_res) - return self.invoke() + for k, v in input_dict.items(): + input_dict[k] = self.expand_and_convert(v) + outputs = self.runner(**input_dict) + ret = list(outputs[k][0] for k in self.output_keys) + return ret if len(ret) > 1 else ret[0] + + def get_input_shape(self, input_name): + assert self.interpreter is not None, "Need to load tflite model first" + details = self.interpreter.get_input_details() + for detail in details: + if detail['name'] == f"serving_default_{input_name}:0": + return detail['shape'] + raise RuntimeError(f'{input_name} not found in TFlite model') - def get_input_shapes(self): - assert self.input_shapes is not None, "Need to load model first" - return self.input_shapes + @staticmethod + def expand_and_convert(arr): + """ Helper function. """ + # expand each input to shape from [x, y, z] to [1, x, y, z] and + # convert to float32 for expression: + arr_exp = np.expand_dims(arr, axis=0).astype(np.float32) + return arr_exp class TensorRT(Interpreter): @@ -304,57 +301,65 @@ class TensorRT(Interpreter): Uses TensorRT to do the inference. """ def __init__(self): - self.frozen_func = None - self.input_shapes = None + super().__init__() + self.graph_func = None + self.pilot = None - def get_input_shapes(self) -> List[tf.TensorShape]: - return self.input_shapes + def set_model(self, pilot: 'KerasPilot') -> None: + # We can't determine the output shape neither here, nor in the + # constructor, because calling output_shapes() on the model will call + # get_input_shape() from the interpreter which will fail at that + # state as the trt model hasn't been loaded yet + self.pilot = pilot + + def get_input_shape(self, input_name) -> tf.TensorShape: + assert self.graph_func, "Requires loadin the tensorrt model first" + return self.graph_func.structured_input_signature[1][input_name].shape def compile(self, **kwargs): pass def load(self, model_path: str) -> None: - saved_model_loaded = tf.saved_model.load(model_path, - tags=[tag_constants.SERVING]) - graph_func = saved_model_loaded.signatures[ - signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] - self.frozen_func = convert_var_to_const(graph_func) - self.input_shapes = [inp.shape for inp in graph_func.inputs] - - def predict(self, img_arr: np.ndarray, other_arr: np.ndarray) \ - -> Sequence[Union[float, np.ndarray]]: - # first reshape as usual - img_arr = np.expand_dims(img_arr, axis=0).astype(np.float32) - img_tensor = self.convert(img_arr) - if other_arr is not None: - other_arr = np.expand_dims(other_arr, axis=0).astype(np.float32) - other_tensor = self.convert(other_arr) - output_tensors = self.frozen_func(img_tensor, other_tensor) - else: - output_tensors = self.frozen_func(img_tensor) - - # because we send a batch of size one, pick first element - outputs = [out.numpy().squeeze(axis=0) for out in output_tensors] - # don't return list if output is 1d - return outputs if len(outputs) > 1 else outputs[0] + logger.info(f'Loading TensorRT model {model_path}') + assert self.pilot, "Need to set pilot first" + try: + ext = os.path.splitext(model_path)[1] + if ext == '.savedmodel': + # first load tf model format to extract input and output keys + model = tf.keras.models.load_model(model_path, compile=False) + self.input_keys = model.input_names + self.output_keys = model.output_names + converter \ + = trt.TrtGraphConverterV2(input_saved_model_dir=model_path) + self.graph_func = converter.convert() + else: + trt_model_loaded = tf.saved_model.load( + model_path, tags=[tag_constants.SERVING]) + self.graph_func = trt_model_loaded.signatures[ + signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] + inputs, outputs = self.pilot.output_shapes() + self.input_keys = list(inputs.keys()) + self.output_keys = list(outputs.keys()) + logger.info(f'Finished loading TensorRT model.') + except Exception as e: + logger.error(f'Could not load TensorRT model because: {e}') def predict_from_dict(self, input_dict): - args = [] - for inp in self.frozen_func.inputs: - name = inp.name.split(':')[0] - val = input_dict[name] - val_res = np.expand_dims(val, axis=0).astype(np.float32) - val_conv = self.convert(val_res) - args.append(val_conv) - output_tensors = self.frozen_func(*args) - # because we send a batch of size one, pick first element - outputs = [out.numpy().squeeze(axis=0) for out in output_tensors] + for k, v in input_dict.items(): + input_dict[k] = self.expand_and_convert(v) + out_dict = self.graph_func(**input_dict) + # Squeeze here because we send a batch of size one, so pick first + # element. To return the order of outputs as defined in the model we + # need to iterate through the model's output shapes here + outputs = [out_dict[k].numpy().squeeze(axis=0) for k in + self.output_keys] # don't return list if output is 1d return outputs if len(outputs) > 1 else outputs[0] @staticmethod - def convert(arr): + def expand_and_convert(arr): """ Helper function. """ - value = tf.compat.v1.get_variable("features", dtype=tf.float32, - initializer=tf.constant(arr)) - return tf.convert_to_tensor(value=value) + # expand each input to shape from [x, y, z] to [1, x, y, z] and + # convert to float32 for expression: + arr_exp = np.expand_dims(arr, axis=0) + return tf.convert_to_tensor(value=arr_exp, dtype=tf.float32) diff --git a/donkeycar/parts/keras.py b/donkeycar/parts/keras.py index 7e8ffe487..9c89f81a5 100644 --- a/donkeycar/parts/keras.py +++ b/donkeycar/parts/keras.py @@ -7,12 +7,12 @@ include one or more models to help direct the vehicles motion. """ - +import datetime from abc import ABC, abstractmethod from collections import deque import numpy as np -from typing import Dict, Tuple, Optional, Union, List, Sequence, Callable +from typing import Dict, Tuple, Optional, Union, List, Sequence, Callable, Any from logging import getLogger from tensorflow.python.data.ops.dataset_ops import DatasetV1, DatasetV2 @@ -24,16 +24,14 @@ import tensorflow as tf from tensorflow import keras -from tensorflow.keras.layers import Input, Dense -from tensorflow.keras.layers import Convolution2D, MaxPooling2D, \ - BatchNormalization -from tensorflow.keras.layers import Activation, Dropout, Flatten -from tensorflow.keras.layers import LSTM +from tensorflow.keras.layers import (Dense, Input,Convolution2D, + MaxPooling2D, Activation, Dropout, Flatten, LSTM, BatchNormalization, + Conv3D, MaxPooling3D, Conv2DTranspose) + from tensorflow.keras.layers import TimeDistributed as TD -from tensorflow.keras.layers import Conv3D, MaxPooling3D, Conv2DTranspose from tensorflow.keras.backend import concatenate from tensorflow.keras.models import Model -from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint +from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint ONE_BYTE_SCALE = 1.0 / 255.0 @@ -88,13 +86,13 @@ def set_optimizer(self, optimizer_type: str, raise Exception(f"Unknown optimizer type: {optimizer_type}") self.interpreter.set_optimizer(optimizer) - def get_input_shapes(self) -> List[tf.TensorShape]: - return self.interpreter.get_input_shapes() + def get_input_shape(self, input_name) -> tf.TensorShape: + return self.interpreter.get_input_shape(input_name) def seq_size(self) -> int: return 0 - def run(self, img_arr: np.ndarray, other_arr: List[float] = None) \ + def run(self, img_arr: np.ndarray, *other_arr: List[float]) \ -> Tuple[Union[float, np.ndarray], ...]: """ Donkeycar parts interface to run the part in the loop. @@ -105,22 +103,17 @@ def run(self, img_arr: np.ndarray, other_arr: List[float] = None) \ state vector in the Behavioural model :return: tuple of (angle, throttle) """ - norm_arr = normalize_image(img_arr) - np_other_array = np.array(other_arr) if other_arr else None - return self.inference(norm_arr, np_other_array) - - def inference(self, img_arr: np.ndarray, other_arr: Optional[np.ndarray]) \ - -> Tuple[Union[float, np.ndarray], ...]: - """ Inferencing using the interpreter - :param img_arr: float32 [0,1] numpy array with normalized image - data - :param other_arr: numpy array of additional data to be used in the - pilot, like IMU array for the IMU model or a - state vector in the Behavioural model - :return: tuple of (angle, throttle) - """ - out = self.interpreter.predict(img_arr, other_arr) - return self.interpreter_to_output(out) + norm_img_arr = normalize_image(img_arr) + np_other_array = tuple(np.array(arr) for arr in other_arr) + # create dictionary on the fly, we expect the order of the arguments: + # img_arr, *other_arr to exactly match the order of the + # self.output_shape() first dictionary keys, because that's how we + # set up the model + values = (norm_img_arr, ) + np_other_array + # note output_shapes() returns a 2-tuple of dicts for input shapes + # and output shapes(), so we need the first tuple here + input_dict = dict(zip(self.output_shapes()[0].keys(), values)) + return self.inference_from_dict(input_dict) def inference_from_dict(self, input_dict: Dict[str, np.ndarray]) \ -> Tuple[Union[float, np.ndarray], ...]: @@ -170,6 +163,8 @@ def train(self, save_best_only=True, verbose=verbose)] + tic = datetime.datetime.now() + logger.info('////////// Starting training //////////') history: tf.keras.callbacks.History = model.fit( x=train_data, steps_per_epoch=train_steps, @@ -181,7 +176,9 @@ def train(self, verbose=verbose, workers=1, use_multiprocessing=False) - + toc = datetime.datetime.now() + logger.info(f'////////// Finished training in: {toc - tic} //////////') + if show_plot: try: import matplotlib.pyplot as plt @@ -271,8 +268,8 @@ def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), throttle_range: float = 0.5): - super().__init__(interpreter, input_shape) self.throttle_range = throttle_range + super().__init__(interpreter, input_shape) def create_model(self): return default_categorical(self.input_shape) @@ -304,12 +301,16 @@ def y_transform(self, record: Union[TubRecord, List[TubRecord]]) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'angle_out': tf.TensorShape([15]), 'throttle_out': tf.TensorShape([20])}) return shapes + def __str__(self) -> str: + """ For printing model initialisation """ + return super().__str__() + f'-R:{self.throttle_range}' + class KerasLinear(KerasPilot): """ @@ -344,7 +345,7 @@ def y_transform(self, record: Union[TubRecord, List[TubRecord]]) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'n_outputs0': tf.TensorShape([]), 'n_outputs1': tf.TensorShape([])}) @@ -362,33 +363,46 @@ def __init__(self, input_shape: Tuple[int, ...] = (120, 160, 3), mem_length: int = 3, mem_depth: int = 0, - mem_start_speed: float = 0.0): + mem_start_speed: float = 0.0, + **kwargs): self.mem_length = mem_length self.mem_start_speed = mem_start_speed - self.mem_seq = deque([[0, mem_start_speed]] * mem_length) + # create memory of [anlge=0, throttle=mem_start_speed] * mem_length + self.mem_seq = deque([[0.0, mem_start_speed]] * mem_length) self.mem_depth = mem_depth - super().__init__(interpreter, input_shape) + super().__init__(interpreter, input_shape, **kwargs) def seq_size(self) -> int: return self.mem_length + 1 def create_model(self): return default_memory(self.input_shape, - self.mem_length, self.mem_depth, ) + self.mem_length, self.mem_depth) def load(self, model_path: str) -> None: super().load(model_path) - self.mem_length = self.interpreter.get_input_shapes()[1][1] // 2 - self.mem_seq = deque([[0, self.mem_start_speed]] * self.mem_length) - logger.info(f'Loaded memory model with mem length {self.mem_length}') - - def run(self, img_arr: np.ndarray, other_arr: List[float] = None) -> \ + mem_shape = self.interpreter.get_input_shape('mem_in') + # take the mem_shape (index 1), the length (index 1) and divide by 2. + self.mem_length = mem_shape[1] // 2 + # create memory of [anlge=0, throttle=mem_start_speed] * mem_length + self.mem_seq = deque([[0.0, self.mem_start_speed]] * self.mem_length) + logger.info(f'Loaded {type(self).__name__} model with mem length' + f' {self.mem_length}') + + def run(self, img_arr: np.ndarray, *other_arr: List[float]) -> \ Tuple[Union[float, np.ndarray], ...]: # Only called at start to fill the previous values - np_mem_arr = np.array(self.mem_seq).reshape((2 * self.mem_length,)) - img_arr_norm = normalize_image(img_arr) - angle, throttle = super().inference(img_arr_norm, np_mem_arr) + norm_img_arr = normalize_image(img_arr) + # create dictionary on the fly, we expect the order of the arguments: + # img_arr, *other_arr to exactly match the order of the + # self.output_shape() first dictionary keys, because that's how we + # set up the model + values = (norm_img_arr, np_mem_arr) + # note output_shapes() returns a 2-tuple of dicts for input shapes + # and output shapes(), so we need the first tuple here + input_dict = dict(zip(self.output_shapes()[0].keys(), values)) + angle, throttle = self.inference_from_dict(input_dict) # fill new values into back of history list for next call self.mem_seq.popleft() self.mem_seq.append([angle, throttle]) @@ -418,7 +432,7 @@ def y_transform(self, records: Union[TubRecord, List[TubRecord]]) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] shapes = ({'img_in': tf.TensorShape(img_shape), 'mem_in': tf.TensorShape(2 * self.mem_length)}, {'n_outputs0': tf.TensorShape([]), @@ -455,7 +469,7 @@ def y_transform(self, record: Union[TubRecord, List[TubRecord]]) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'n_outputs0': tf.TensorShape([])}) return shapes @@ -511,7 +525,7 @@ def y_transform(self, record: Union[TubRecord, List[TubRecord]]) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] # the keys need to match the models input/output layers shapes = ({'img_in': tf.TensorShape(img_shape), 'imu_in': tf.TensorShape([self.num_imu_inputs])}, @@ -550,7 +564,7 @@ def x_transform( def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] # the keys need to match the models input/output layers shapes = ({'img_in': tf.TensorShape(img_shape), 'xbehavior_in': tf.TensorShape([self.num_behavior_inputs])}, @@ -578,7 +592,7 @@ def create_model(self): def compile(self): self.interpreter.compile(optimizer=self.optimizer, metrics=['acc'], loss='mse') - + def interpreter_to_output(self, interpreter_out) \ -> Tuple[Union[float, np.ndarray], ...]: angle, throttle, track_loc = interpreter_out @@ -597,7 +611,7 @@ def y_transform(self, record: Union[TubRecord, List[TubRecord]]) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] # the keys need to match the models input/output layers shapes = ({'img_in': tf.TensorShape(img_shape)}, {'angle': tf.TensorShape([]), @@ -651,7 +665,7 @@ def y_transform(self, records: Union[TubRecord, List[TubRecord]]) \ throttle = records[-1].underlying['user/throttle'] return {'model_outputs': [angle, throttle]} - def run(self, img_arr, other_arr=None): + def run(self, img_arr, *other_arr): if img_arr.shape[2] == 3 and self.input_shape[2] == 1: img_arr = dk.utils.rgb2gray(img_arr) @@ -663,7 +677,8 @@ def run(self, img_arr, other_arr=None): new_shape = (self.seq_length, *self.input_shape) img_arr = np.array(self.img_seq).reshape(new_shape) img_arr_norm = normalize_image(img_arr) - return self.inference(img_arr_norm, other_arr) + input_dict = {'img_in': img_arr_norm} + return self.inference_from_dict(input_dict) def interpreter_to_output(self, interpreter_out) \ -> Tuple[Union[float, np.ndarray], ...]: @@ -673,7 +688,7 @@ def interpreter_to_output(self, interpreter_out) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] # the keys need to match the models input/output layers shapes = ({'img_in': tf.TensorShape(img_shape)}, {'model_outputs': tf.TensorShape([self.num_outputs])}) @@ -727,7 +742,7 @@ def y_transform(self, records: Union[TubRecord, List[TubRecord]]) \ throttle = records[-1].underlying['user/throttle'] return {'outputs': [angle, throttle]} - def run(self, img_arr, other_arr=None): + def run(self, img_arr, *other_arr): if img_arr.shape[2] == 3 and self.input_shape[2] == 1: img_arr = dk.utils.rgb2gray(img_arr) @@ -739,7 +754,8 @@ def run(self, img_arr, other_arr=None): new_shape = (self.seq_length, *self.input_shape) img_arr = np.array(self.img_seq).reshape(new_shape) img_arr_norm = normalize_image(img_arr) - return self.inference(img_arr_norm, other_arr) + input_dict = {'img_in': img_arr_norm} + return self.inference_from_dict(input_dict) def interpreter_to_output(self, interpreter_out) \ -> Tuple[Union[float, np.ndarray], ...]: @@ -749,7 +765,7 @@ def interpreter_to_output(self, interpreter_out) \ def output_shapes(self): # need to cut off None from [None, 120, 160, 3] tensor shape - img_shape = self.get_input_shapes()[0][1:] + img_shape = self.get_input_shape('img_in')[1:] # the keys need to match the models input/output layers shapes = ({'img_in': tf.TensorShape(img_shape)}, {'outputs': tf.TensorShape([self.num_outputs])}) @@ -941,7 +957,7 @@ def default_bhv(num_bvh_inputs, input_shape): angle_out = Dense(15, activation='softmax', name='angle_out')(z) # Categorical output of throttle into 20 bins throttle_out = Dense(20, activation='softmax', name='throttle_out')(z) - + model = Model(inputs=[img_in, bvh_in], outputs=[angle_out, throttle_out], name='behavioral') return model @@ -1088,7 +1104,7 @@ def default_latent(num_outputs, input_shape): x = Convolution2D(64, 3, strides=2, activation='relu', name="conv2d_7")(x) x = Dropout(drop)(x) x = Convolution2D(64, 1, strides=2, activation='relu', name="latent")(x) - + y = Conv2DTranspose(filters=64, kernel_size=3, strides=2, name="deconv2d_1")(x) y = Conv2DTranspose(filters=64, kernel_size=3, strides=2, diff --git a/donkeycar/parts/network.py b/donkeycar/parts/network.py index 181fe2980..9acfae308 100644 --- a/donkeycar/parts/network.py +++ b/donkeycar/parts/network.py @@ -394,6 +394,7 @@ def test_pub_sub(ip): print("got:", res) time.sleep(1) + def test_udp_broadcast(ip): if ip is None: @@ -401,7 +402,7 @@ def test_udp_broadcast(ip): p = UDPValuePub('camera') from donkeycar.parts.camera import PiCamera from donkeycar.parts.image import ImgArrToJpg - cam = PiCamera(160, 120, 3, framerate=4) + cam = PiCamera(160, 120, 3) img_conv = ImgArrToJpg() time.sleep(1) @@ -420,13 +421,14 @@ def test_udp_broadcast(ip): res = s.run() time.sleep(0.1) + def test_tcp_client_server(ip): if ip is None: p = TCPServeValue("camera") from donkeycar.parts.camera import PiCamera from donkeycar.parts.image import ImgArrToJpg - cam = PiCamera(160, 120, 3, framerate=4) + cam = PiCamera(160, 120, 3) img_conv = ImgArrToJpg() while True: cam_img = cam.run() @@ -439,6 +441,7 @@ def test_tcp_client_server(ip): c.run() time.sleep(0.01) + def test_mqtt_pub_sub(ip): if ip is None: @@ -446,7 +449,7 @@ def test_mqtt_pub_sub(ip): p = MQTTValuePub('donkey/camera') from donkeycar.parts.camera import PiCamera from donkeycar.parts.image import ImgArrToJpg - cam = PiCamera(160, 120, 3, framerate=4) + cam = PiCamera(160, 120, 3) img_conv = ImgArrToJpg() while True: cam_img = cam.run() @@ -463,6 +466,7 @@ def test_mqtt_pub_sub(ip): print("got:", res) time.sleep(0.1) + if __name__ == "__main__": import time import sys diff --git a/donkeycar/parts/tub_v2.py b/donkeycar/parts/tub_v2.py index 07c2faefc..1a804379b 100644 --- a/donkeycar/parts/tub_v2.py +++ b/donkeycar/parts/tub_v2.py @@ -6,10 +6,14 @@ import numpy as np from PIL import Image +import logging from donkeycar.parts.datastore_v2 import Manifest, ManifestIterator +logger = logging.getLogger(__name__) + + class Tub(object): """ A datastore to store sensor data in a key, value format. \n @@ -92,6 +96,7 @@ def restore_records(self, record_indexes): self.manifest.restore_records(record_indexes) def close(self): + logger.info(f'Closing tub {self.base_path}') self.manifest.close() def __iter__(self): diff --git a/donkeycar/parts/web_controller/web.py b/donkeycar/parts/web_controller/web.py index 7a8eef072..8af4669fc 100644 --- a/donkeycar/parts/web_controller/web.py +++ b/donkeycar/parts/web_controller/web.py @@ -102,12 +102,11 @@ def shutdown(self): class LocalWebController(tornado.web.Application): def __init__(self, port=8887, mode='user'): - ''' + """ Create and publish variables needed on many of the web handlers. - ''' - - print('Starting Donkey Server...', end='') + """ + logger.info('Starting Donkey Server...') this_dir = os.path.dirname(os.path.realpath(__file__)) self.static_file_path = os.path.join(this_dir, 'templates', 'static') @@ -141,11 +140,11 @@ def __init__(self, port=8887, mode='user'): settings = {'debug': True} super().__init__(handlers, **settings) - print("... you can now go to {}.local:{} to drive " - "your car.".format(gethostname(), port)) + logger.info(f"You can now go to {gethostname()}.local:{port} to " + f"drive your car.") def update(self): - ''' Start the tornado webserver. ''' + """ Start the tornado webserver. """ asyncio.set_event_loop(asyncio.new_event_loop()) self.listen(self.port) self.loop = IOLoop.instance() @@ -159,7 +158,8 @@ def update_wsclients(self, data): logger.debug(f"Updating web client: {data_str}") wsclient.write_message(data_str) except Exception as e: - logger.warn("Error writing websocket message", exc_info=e) + logger.warning("Error writing websocket message", + exc_info=e) pass def run_threaded(self, img_arr=None, num_records=0, mode=None, recording=None): @@ -280,7 +280,7 @@ def check_origin(self, origin): return True def open(self): - print("New client connected") + logger.info("New client connected") self.application.wsclients.append(self) def on_message(self, message): @@ -297,7 +297,7 @@ def on_message(self, message): latch_buttons(self.application.buttons, data['buttons']) def on_close(self): - # print("Client disconnected") + logger.info("Client disconnected") self.application.wsclients.remove(self) @@ -306,10 +306,10 @@ def check_origin(self, origin): return True def open(self): - print("New client connected") + logger.info("New client connected") def on_message(self, message): - print(f"wsCalibrate {message}") + logger.info(f"wsCalibrate {message}") data = json.loads(message) if 'throttle' in data: print(data['throttle']) @@ -347,13 +347,14 @@ def on_message(self, message): self.application.drive_train.MAX_REVERSE = config['MM1_MAX_REVERSE'] def on_close(self): - print("Client disconnected") + logger.info("Client disconnected") class VideoAPI(RequestHandler): ''' Serves a MJPEG of the images posted from the vehicle. ''' + async def get(self): placeholder_image = utils.load_image_sized( os.path.join(self.application.static_file_path, @@ -366,7 +367,7 @@ async def get(self): my_boundary = "--boundarydonotcross\n" while True: - interval = .01 + interval = .005 if served_image_timestamp + interval < time.time(): # # if we have an image, then use it. @@ -420,9 +421,10 @@ def __init__(self, port=8890): ] settings = {'debug': True} + self.img_arr = None super().__init__(handlers, **settings) - print("Started Web FPV server. You can now go to {}.local:{} to " - "view the car camera".format(gethostname(), self.port)) + logger.info(f"Started Web FPV server. You can now go to " + f"{gethostname()}.local:{self.port} to view the car camera") def update(self): """ Start the tornado webserver. """ diff --git a/donkeycar/pipeline/database.py b/donkeycar/pipeline/database.py index ac6e44dc8..4dc5a52fd 100644 --- a/donkeycar/pipeline/database.py +++ b/donkeycar/pipeline/database.py @@ -43,7 +43,10 @@ def generate_model_name(self) -> Tuple[str, int]: else: this_num = 0 date = time.strftime('%y-%m-%d') - name = f'pilot_{date}_{this_num}.h5' + ext = 'h5' if getattr(self.cfg, 'SAVE_MODEL_AS_H5', False) \ + else 'savedmodel' + name = f'pilot_{date}_{this_num}.{ext}' + return os.path.join(self.cfg.MODELS_PATH, name), this_num def to_df(self) -> pd.DataFrame: @@ -118,7 +121,7 @@ def time_fmt(t): return datetime.fromtimestamp(t).strftime(fmt) def transfer_fmt(model_name): - return model_name.replace('.h5', '') + return model_name.replace('.h5', '').replace('.savedmodel', '') return {'Time': time_fmt, 'Transfer': transfer_fmt} diff --git a/donkeycar/pipeline/sequence.py b/donkeycar/pipeline/sequence.py index 3449ebd53..5a37da164 100644 --- a/donkeycar/pipeline/sequence.py +++ b/donkeycar/pipeline/sequence.py @@ -1,5 +1,6 @@ -from typing import (Any, Callable, Generic, Iterable, Iterator, List, Sized, Tuple, TypeVar) +from typing import (Any, Callable, Generic, Iterable, Iterator, List, Sized, + Tuple, TypeVar) from donkeycar.pipeline.types import TubRecord @@ -13,7 +14,7 @@ YOut = TypeVar('YOut', covariant=True) -class SizedIterator(Generic[X], Iterator[X], Sized): +class SizedIterator(Generic[R], Sized): def __init__(self) -> None: # This is a protocol type without explicitly using a `Protocol` # Using `Protocol` requires Python 3.7 @@ -52,20 +53,25 @@ def __init__(self, self.x_transform = x_transform self.y_transform = y_transform self.iterator = BaseTfmIterator_( - iterable=self.iterable, x_transform=self.x_transform, y_transform=self.y_transform) + iterable=self.iterable, + x_transform=self.x_transform, + y_transform=self.y_transform) def __len__(self): return len(self.iterator) def __iter__(self) -> SizedIterator[Tuple[XOut, YOut]]: return BaseTfmIterator_( - iterable=self.iterable, x_transform=self.x_transform, y_transform=self.y_transform) + iterable=self.iterable, + x_transform=self.x_transform, + y_transform=self.y_transform) def __next__(self): return next(self.iterator) -class TfmTupleIterator(Generic[X, Y, XOut, YOut], SizedIterator[Tuple[XOut, YOut]]): +class TfmTupleIterator(Generic[X, Y, XOut, YOut], + SizedIterator[Tuple[XOut, YOut]]): def __init__(self, iterable: Iterable[Tuple[X, Y]], x_transform: Callable[[X], XOut], @@ -75,14 +81,18 @@ def __init__(self, self.x_transform = x_transform self.y_transform = y_transform self.iterator = BaseTfmIterator_( - iterable=self.iterable, x_transform=self.x_transform, y_transform=self.y_transform) + iterable=self.iterable, + x_transform=self.x_transform, + y_transform=self.y_transform) def __len__(self): return len(self.iterator) def __iter__(self) -> SizedIterator[Tuple[XOut, YOut]]: return BaseTfmIterator_( - iterable=self.iterable, x_transform=self.x_transform, y_transform=self.y_transform) + iterable=self.iterable, + x_transform=self.x_transform, + y_transform=self.y_transform) def __next__(self): return next(self.iterator) @@ -109,7 +119,8 @@ def __len__(self): return len(self.iterator) def __iter__(self) -> SizedIterator[Tuple[XOut, YOut]]: - return BaseTfmIterator_(self.iterable, self.x_transform, self.y_transform) + return BaseTfmIterator_( + self.iterable, self.x_transform, self.y_transform) def __next__(self): record = next(self.iterator) @@ -134,22 +145,30 @@ def build_pipeline(self, x_transform: Callable[[TubRecord], X], y_transform: Callable[[TubRecord], Y]) \ -> TfmIterator: - return TfmIterator(self, x_transform=x_transform, y_transform=y_transform) + return TfmIterator(self, + x_transform=x_transform, + y_transform=y_transform) @classmethod def map_pipeline( cls, x_transform: Callable[[X], XOut], y_transform: Callable[[Y], YOut], - pipeline: SizedIterator[Tuple[X, Y]]) -> SizedIterator[Tuple[XOut, YOut]]: - return TfmTupleIterator(pipeline, x_transform=x_transform, y_transform=y_transform) + pipeline: SizedIterator[Tuple[X, Y]]) \ + -> SizedIterator[Tuple[XOut, YOut]]: + return TfmTupleIterator(pipeline, + x_transform=x_transform, + y_transform=y_transform) @classmethod def map_pipeline_factory( cls, x_transform: Callable[[X], XOut], y_transform: Callable[[Y], YOut], - factory: Callable[[], SizedIterator[Tuple[X, Y]]]) -> SizedIterator[Tuple[XOut, YOut]]: + factory: Callable[[], SizedIterator[Tuple[X, Y]]]) \ + -> SizedIterator[Tuple[XOut, YOut]]: pipeline = factory() - return cls.map_pipeline(pipeline=pipeline, x_transform=x_transform, y_transform=y_transform) + return cls.map_pipeline(pipeline=pipeline, + x_transform=x_transform, + y_transform=y_transform) diff --git a/donkeycar/pipeline/training.py b/donkeycar/pipeline/training.py index 5dd2714ea..9f0a8d3dd 100644 --- a/donkeycar/pipeline/training.py +++ b/donkeycar/pipeline/training.py @@ -2,6 +2,7 @@ import os from time import time from typing import List, Dict, Union, Tuple +import logging from tensorflow.python.keras.models import load_model @@ -18,6 +19,8 @@ import tensorflow as tf import numpy as np +logger = logging.getLogger(__name__) + class BatchSequence(object): """ @@ -109,12 +112,12 @@ def train(cfg: Config, tub_paths: str, model: str = None, model_path, model_num = \ get_model_train_details(database, model) - base_path = os.path.splitext(model_path)[0] + base_path, ext = tuple(os.path.splitext(model_path)) kl = get_model_by_type(model_type, cfg) if transfer: kl.load(transfer) if cfg.PRINT_MODEL_SUMMARY: - print(kl.interpreter.summary()) + kl.interpreter.summary() tubs = tub_paths.split(',') all_tub_paths = [os.path.expanduser(tub) for tub in tubs] @@ -123,8 +126,9 @@ def train(cfg: Config, tub_paths: str, model: str = None, training_records, validation_records \ = train_test_split(dataset.get_records(), shuffle=True, test_size=(1. - cfg.TRAIN_TEST_SPLIT)) - print(f'Records # Training {len(training_records)}') - print(f'Records # Validation {len(validation_records)}') + logger.info(f'Records # Training {len(training_records)}') + logger.info(f'Records # Validation {len(validation_records)}') + dataset.close() # We need augmentation in validation when using crop / trapeze @@ -148,7 +152,8 @@ def train(cfg: Config, tub_paths: str, model: str = None, assert val_size > 0, "Not enough validation data, decrease the batch " \ "size or add more data." - + logger.info(f'Train with image caching: ' + f'{getattr(cfg, "CACHE_IMAGES", True)}') history = kl.train(model_path=model_path, train_data=dataset_train, train_steps=train_size, @@ -161,15 +166,21 @@ def train(cfg: Config, tub_paths: str, model: str = None, patience=cfg.EARLY_STOP_PATIENCE, show_plot=cfg.SHOW_PLOT) + # We are doing the tflite/trt conversion here on a previously saved model + # and not on the kl.interpreter.model object directly. The reason is that + # we want to convert the best model which is not the model in its current + # state, but in the state it was saved the last time during training. if getattr(cfg, 'CREATE_TF_LITE', True): tf_lite_model_path = f'{base_path}.tflite' keras_model_to_tflite(model_path, tf_lite_model_path) if getattr(cfg, 'CREATE_TENSOR_RT', False): - # load h5 (ie. keras) model - model_rt = load_model(model_path) - # save in tensorflow savedmodel format (i.e. directory) - model_rt.save(f'{base_path}.savedmodel') + # convert .h5 model to .savedmodel, only if we are using h5 format + if ext == '.h5': + logger.info(f"Converting from .h5 to .savedmodel first") + model_tmp = load_model(model_path, compile=False) + # save in tensorflow savedmodel format (i.e. directory) + model_tmp.save(f'{base_path}.savedmodel') # pass savedmodel to the rt converter saved_model_to_tensor_rt(f'{base_path}.savedmodel', f'{base_path}.trt') @@ -182,7 +193,7 @@ def train(cfg: Config, tub_paths: str, model: str = None, 'History': history, 'Transfer': os.path.basename(transfer) if transfer else None, 'Comment': comment, - 'Config': str(cfg) + 'Config': cfg.__dict__ } database.add_entry(database_entry) database.write() diff --git a/donkeycar/pipeline/types.py b/donkeycar/pipeline/types.py index 6885918b1..3cc61cfd7 100644 --- a/donkeycar/pipeline/types.py +++ b/donkeycar/pipeline/types.py @@ -17,6 +17,7 @@ 'TubRecordDict', { '_index': int, + '_session_id': str, 'cam/image_array': str, 'user/angle': float, 'user/throttle': float, @@ -103,6 +104,10 @@ def get_records(self): self.records = list(seq) return self.records + def close(self): + for tub in self.tubs: + tub.close() + class Collator(Iterable[List[TubRecord]]): """" Builds a sequence of continuous records for RNN and similar models. """ diff --git a/donkeycar/templates/basic.py b/donkeycar/templates/basic.py index 3c0bdc9cd..7d2b71aeb 100755 --- a/donkeycar/templates/basic.py +++ b/donkeycar/templates/basic.py @@ -73,7 +73,7 @@ def drive(cfg, model_path=None, model_type=None): elif cfg.CAMERA_TYPE == "PICAM": from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, - image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, + image_d=cfg.IMAGE_DEPTH, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP) elif cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam @@ -191,11 +191,12 @@ def drive(cfg, model_path=None, model_type=None): types = ['image_array', 'float', 'float', 'str'] # do we want to store new records into own dir or append to existing - tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \ - cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH - tub_writer = TubWriter(base_path=tub_path, inputs=inputs, types=types) - car.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], - run_condition='recording') + if model_path is None or cfg.RECORD_DURING_AI: + tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \ + cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH + tub_writer = TubWriter(base_path=tub_path, inputs=inputs, types=types) + car.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], + run_condition='recording') if not model_path and cfg.USE_RC: tub_wiper = TubWiper(tub_writer.tub, num_records=cfg.DRIVE_LOOP_HZ) car.add(tub_wiper, inputs=['user/wiper_on']) diff --git a/donkeycar/templates/cfg_basic.py b/donkeycar/templates/cfg_basic.py index 90549af18..f718e8ae4 100755 --- a/donkeycar/templates/cfg_basic.py +++ b/donkeycar/templates/cfg_basic.py @@ -75,6 +75,7 @@ DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent) CREATE_TF_LITE = True # automatically create tflite model in training CREATE_TENSOR_RT = False # automatically create tensorrt model in training +SAVE_MODEL_AS_H5 = False # if old keras format should be used instead of savedmodel BATCH_SIZE = 128 TRAIN_TEST_SPLIT = 0.8 MAX_EPOCHS = 100 @@ -87,6 +88,7 @@ OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default LEARNING_RATE = 0.001 #only used when OPTIMIZER specified LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified +CACHE_IMAGES = True # if images are cached in training for speed up PRUNE_CNN = False PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning. PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration. diff --git a/donkeycar/templates/cfg_complete.py b/donkeycar/templates/cfg_complete.py index 108cecaec..ac790f202 100644 --- a/donkeycar/templates/cfg_complete.py +++ b/donkeycar/templates/cfg_complete.py @@ -383,6 +383,8 @@ SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training CREATE_TF_LITE = True # automatically create tflite model in training CREATE_TENSOR_RT = False # automatically create tensorrt model in training +SAVE_MODEL_AS_H5 = False # if old keras format should be used instead of savedmodel +CACHE_IMAGES = True # if images are cached in training for speed up PRUNE_CNN = False #This will remove weights from your model. The primary goal is to increase performance. PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning. diff --git a/donkeycar/templates/complete.py b/donkeycar/templates/complete.py index e2c807a78..32c4a7168 100644 --- a/donkeycar/templates/complete.py +++ b/donkeycar/templates/complete.py @@ -315,7 +315,7 @@ def load_model_json(kl, json_fnm): # model_reload_cb = None if '.h5' in model_path or '.trt' in model_path or '.tflite' in \ - model_path or '.savedmodel' in model_path or '.pth': + model_path or '.savedmodel' in model_path or '.pth' in model_path: # load the whole model with weigths, etc load_model(kl, model_path) @@ -796,7 +796,6 @@ def get_camera(cfg): if cfg.CAMERA_TYPE == "PICAM": from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, - framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP) elif cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam diff --git a/donkeycar/templates/simulator.py b/donkeycar/templates/simulator.py index 9effcf8c1..8330c025b 100644 --- a/donkeycar/templates/simulator.py +++ b/donkeycar/templates/simulator.py @@ -273,7 +273,8 @@ def load_model_json(kl, json_fnm): model_reload_cb = None - if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path: + if '.h5' in model_path or '.savedmodel' in model_path or '.uff' in \ + model_path or 'tflite' in model_path or '.pkl' in model_path: #when we have a .h5 extension #load everything from the model file load_model(kl, model_path) diff --git a/donkeycar/tests/test_keras.py b/donkeycar/tests/test_keras.py index 2c71bff0e..366bba788 100644 --- a/donkeycar/tests/test_keras.py +++ b/donkeycar/tests/test_keras.py @@ -1,12 +1,11 @@ import shutil import tempfile -import numpy as np from pytest import approx import pytest import os from donkeycar.parts.interpreter import keras_to_tflite, \ - saved_model_to_tensor_rt, TfLite, TensorRT + saved_model_to_tensor_rt, TfLite, TensorRT, has_trt_support from donkeycar.parts.keras import * from donkeycar.utils import get_test_img @@ -29,24 +28,19 @@ def create_models(keras_pilot, dir): # build with keras interpreter interpreter = KerasInterpreter() km = keras_pilot(interpreter=interpreter) - - kl = krt = None # build tflite model from TfLite interpreter tflite_model_path = os.path.join(dir, 'model.tflite') - if keras_pilot is not Keras3D_CNN: - keras_to_tflite(interpreter.model, tflite_model_path) - kl = keras_pilot(interpreter=TfLite()) - kl.load(tflite_model_path) + keras_to_tflite(interpreter.model, tflite_model_path) + kl = keras_pilot(interpreter=TfLite()) + kl.load(tflite_model_path) # save model in savedmodel format savedmodel_path = os.path.join(dir, 'model.savedmodel') interpreter.model.save(savedmodel_path) - - if keras_pilot is not KerasLSTM: - # convert to tensorrt and load - tensorrt_path = os.path.join(dir, 'model.trt') - saved_model_to_tensor_rt(savedmodel_path, tensorrt_path) + krt = None + # load tensorrt only if supported + if has_trt_support(): krt = keras_pilot(interpreter=TensorRT()) - krt.load(tensorrt_path) + krt.load(savedmodel_path) return km, kl, krt @@ -55,10 +49,10 @@ def create_models(keras_pilot, dir): def test_keras_vs_tflite_and_tensorrt(keras_pilot, tmp_dir): """ This test cannot run for the 3D CNN model in tflite and the LSTM model in """ - km, kl, krt = create_models(keras_pilot, tmp_dir) + k_keras, k_tflite, k_trt = create_models(keras_pilot, tmp_dir) # prepare data - img = get_test_img(km) + img = get_test_img(k_keras) if keras_pilot is KerasIMU: # simulate 6 imu data in [0, 1] imu = np.random.rand(6).tolist() @@ -76,16 +70,15 @@ def test_keras_vs_tflite_and_tensorrt(keras_pilot, tmp_dir): # run all three interpreters and check results are numerically close out2 = out3 = None - out1 = km.run(*args) - if keras_pilot is not Keras3D_CNN: - # conv3d in tflite requires TF > 2.3.0 - out2 = kl.run(*args) + out1 = k_keras.run(*args) + if k_tflite: + out2 = k_tflite.run(*args) assert out2 == approx(out1, rel=TOLERANCE, abs=TOLERANCE) - if keras_pilot is not KerasLSTM: + if k_trt: # lstm cells are not yet supported in tensor RT - out3 = krt.run(*args) + out3 = k_trt.run(*args) assert out3 == approx(out1, rel=TOLERANCE, abs=TOLERANCE) - print(out1, out2, out3) + print("\n", out1, out2, out3) diff --git a/donkeycar/tests/test_kinematics.py b/donkeycar/tests/test_kinematics.py index 4875748c4..8baab0bd8 100644 --- a/donkeycar/tests/test_kinematics.py +++ b/donkeycar/tests/test_kinematics.py @@ -725,7 +725,7 @@ def do_drive(self, cfg, forward_velocity, steering_angle): if steering_angle != 0.0: self.assertLessEqual(abs((ending_x - pose_x) / loop_distance), 0.005) # final error less than 0.5% self.assertLessEqual(abs((ending_y - pose_y) / loop_distance), 0.005) # final error less than 0.5% - self.assertLessEqual(abs((ending_angle - pose_angle) / (2 * math.pi)), 0.03) # final error less than 3% + self.assertLessEqual(abs((ending_angle - pose_angle) / (2 * math.pi)), 0.04) # final error less than 4% def test_drive_straight(self): # 12.5 degrees steering angle diff --git a/donkeycar/tests/test_scripts.py b/donkeycar/tests/test_scripts.py index afc0ef9a3..8d69f9261 100755 --- a/donkeycar/tests/test_scripts.py +++ b/donkeycar/tests/test_scripts.py @@ -56,7 +56,7 @@ def test_tubplot(cardir): # create empy KerasLinear model in car directory model_dir = os.path.join(cardir, 'models') os.mkdir(model_dir) - model_path = os.path.join(model_dir, 'model.h5') + model_path = os.path.join(model_dir, 'model.savedmodel') from donkeycar.parts.keras import KerasLinear KerasLinear().interpreter.model.save(model_path) diff --git a/donkeycar/tests/test_torch.py b/donkeycar/tests/test_torch.py index 3d6bb9fd6..5f9cb38ad 100644 --- a/donkeycar/tests/test_torch.py +++ b/donkeycar/tests/test_torch.py @@ -1,7 +1,7 @@ import pytest import tarfile import os -import numpy as np +import platform from collections import defaultdict, namedtuple import torch @@ -15,6 +15,11 @@ Data = namedtuple('Data', ['type', 'name', 'convergence', 'pretrained']) +is_jetson = pytest.mark.skipif( + platform.machine() == 'aarch64', + reason="pytorch not yet supported on jetson") + + @pytest.fixture def config() -> Config: """ Config for the test with relevant parameters""" @@ -50,6 +55,7 @@ def car_dir(tmpdir_factory): test_data = [d1] +@is_jetson @pytest.mark.skipif("GITHUB_ACTIONS" in os.environ, reason='Suppress training test in CI') @pytest.mark.parametrize('data', test_data) @@ -74,6 +80,7 @@ def pilot_path(name): assert loss[-1] < loss[0] * data.convergence +@is_jetson @pytest.mark.skipif("GITHUB_ACTIONS" in os.environ, reason='Suppress training test in CI') @pytest.mark.parametrize('model_type', ['resnet18']) diff --git a/donkeycar/tests/test_train.py b/donkeycar/tests/test_train.py index 0abe0dc6f..6f08828fb 100644 --- a/donkeycar/tests/test_train.py +++ b/donkeycar/tests/test_train.py @@ -135,13 +135,13 @@ def car_dir(tmpdir_factory, base_config, imu_fields) -> str: @pytest.mark.parametrize('data', test_data) def test_train(config: Config, data: Data) -> None: """ - Testing convergence of the linear an categorical models + Testing convergence of the above models :param config: donkey config :param data: test case data :return: None """ def pilot_path(name): - pilot_name = f'pilot_{name}.h5' + pilot_name = f'pilot_{name}.savedmodel' return os.path.join(config.MODELS_PATH, pilot_name) cfg = copy(config) diff --git a/donkeycar/utils.py b/donkeycar/utils.py index bd2e892c2..3418cc2ac 100644 --- a/donkeycar/utils.py +++ b/donkeycar/utils.py @@ -560,10 +560,10 @@ def get_test_img(keras_pilot): :return np.ndarry(np.uint8): numpy random img array """ try: - count, h, w, ch = keras_pilot.get_input_shapes()[0] + count, h, w, ch = keras_pilot.get_input_shape('img_in') seq_len = 0 except Exception as e: - count, seq_len, h, w, ch = keras_pilot.get_input_shapes()[0] + count, seq_len, h, w, ch = keras_pilot.get_input_shape('img_in') # generate random array in the right shape img = np.random.randint(0, 255, size=(h, w, ch)) diff --git a/install/envs/jetson.yml b/install/envs/jetson.yml new file mode 100644 index 000000000..c27b4db2d --- /dev/null +++ b/install/envs/jetson.yml @@ -0,0 +1,41 @@ +name: donkey + +channels: + - defaults + - conda-forge + - fastai + +dependencies: + - python=3.8 + - h5py + - pillow + - opencv + - matplotlib + - tornado + - docopt + - pandas + - pylint + - pytest + - pytest-cov + - codecov + - pip + - progress + - moviepy + - paho-mqtt + - PrettyTable + - pyfiglet + - mypy + - numpy + - psutil + - plotly + - pyyaml + - fastai + - pynmea2 + - pyserial + - utm + - pip: + - --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v502 + - tensorflow==2.9.1+nv22.09 + - git+https://github.com/autorope/keras-vis.git + - simple-pid + - kivy diff --git a/install/envs/jetson46.yml b/install/envs/jetson46.yml new file mode 100644 index 000000000..1a1740ea9 --- /dev/null +++ b/install/envs/jetson46.yml @@ -0,0 +1,34 @@ +name: donkey + +channels: + - defaults + - conda-forge + - fastai + +dependencies: + - python=3.9 + - h5py + - pillow + - matplotlib + - tornado + - docopt + - pandas + - pylint + - pytest + - pytest-cov + - codecov + - progress + - moviepy + - paho-mqtt + - PrettyTable + - pyfiglet + - mypy + - numpy + - psutil + - plotly + - pyyaml + - fastai + - pynmea2 + - pyserial + - utm + diff --git a/install/envs/mac.yml b/install/envs/mac.yml index 4b64681e1..be4f9844b 100644 --- a/install/envs/mac.yml +++ b/install/envs/mac.yml @@ -7,9 +7,8 @@ channels: - fastai dependencies: - - python=3.7 - - numpy=1.19 - - protobuf=3.20 + - python=3.9 + - numpy - h5py - pillow - opencv @@ -32,7 +31,7 @@ dependencies: - torchaudio - pytorch-lightning>=1.9,<2.0 - psutil - - kivy=2.0.0 + - kivy=2.1 - plotly - pyyaml - fastai @@ -41,7 +40,7 @@ dependencies: - utm - albumentations - pip: - - tensorflow==2.2.0 + - tensorflow==2.9 - git+https://github.com/autorope/keras-vis.git - simple-pid - moviepy diff --git a/install/envs/ubuntu.yml b/install/envs/ubuntu.yml index c64a71642..a43798092 100644 --- a/install/envs/ubuntu.yml +++ b/install/envs/ubuntu.yml @@ -7,8 +7,8 @@ channels: - fastai dependencies: - - python=3.7 - - numpy=1.19 + - python=3.9 + - numpy - h5py - pillow - opencv @@ -22,7 +22,6 @@ dependencies: - codecov - pip - progress - - moviepy - paho-mqtt - PrettyTable - pyfiglet @@ -32,10 +31,9 @@ dependencies: - torchaudio - pytorch-lightning>=1.9,<2.0 - psutil - - kivy=2.0.0 - plotly - pyyaml - - tensorflow=2.2.0 + - tensorflow=2.9 - fastai - pynmea2 - pyserial @@ -44,3 +42,5 @@ dependencies: - pip: - git+https://github.com/autorope/keras-vis.git - simple-pid + - kivy==2.1 + - moviepy diff --git a/install/envs/windows.yml b/install/envs/windows.yml index 2bdf15a0a..4a0d89d2a 100644 --- a/install/envs/windows.yml +++ b/install/envs/windows.yml @@ -7,8 +7,7 @@ channels: - fastai dependencies: - - python=3.7 - - numpy=1.19 + - python=3.9 - h5py - pillow - opencv @@ -31,7 +30,8 @@ dependencies: - torchvision=0.12 - torchaudio - pytorch-lightning - - kivy=2.0.0 + - numpy + - kivy=2.1 - plotly - pyyaml - psutil diff --git a/install/pi/install.sh b/install/pi/install.sh index 30470ac2e..bdf69f237 100644 --- a/install/pi/install.sh +++ b/install/pi/install.sh @@ -42,6 +42,5 @@ source ~/env/bin/activate # install pandas and numpy and Adafruit CircuitPython pip install pandas #also installs numpy -pip install adafruit-circuitpython-lis3dh pip install tensorflow==1.9 \ No newline at end of file diff --git a/scripts/convert_to_tflite.py b/scripts/convert_to_tflite.py new file mode 100755 index 000000000..19b43d29d --- /dev/null +++ b/scripts/convert_to_tflite.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +""" +Usage: + convert_to_tflite.py [-o | --overwrite] ... + +Options: + -h --help Show this screen. + -o --overwrite Force overwriting existing TFLite files + +Note: + This script converts a keras (.h5) or tensorflow (.savedmodel) into + TFlite. Supports multiple input files. + +""" + +from docopt import docopt +from donkeycar.parts.interpreter import keras_model_to_tflite +from os.path import splitext, exists + +if __name__ == '__main__': + args = docopt(__doc__) + model_path_list = args[''] + overwrite = args['-o'] or args['--overwrite'] + print(f"Found {len(model_path_list)} models to process.") + print(f"Overwrite set to: {overwrite}.") + count = 0 + for model_path in model_path_list: + base_path, ext = splitext(model_path) + if ext not in ['.h5', '.savedmodel']: + print(f"Can only convert '.h5' or '.savedmodel' but not {ext}") + continue + tflite_filename = base_path + '.tflite' + if exists(tflite_filename) and not overwrite: + print(f"Found existing tflite mode {tflite_filename}, will skip. " + f"If you want to overwrite existing files, please specify " + f"the option --overwrite or -o ") + continue + keras_model_to_tflite(model_path, tflite_filename) + count += 1 + print(f"Finished converting {count} models") diff --git a/setup.py b/setup.py index 52c6d0c1a..2bd0bc21c 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ def package_files(directory, strip_leading): long_description = fh.read() setup(name='donkeycar', - version="5.0.dev0", + version="5.0.dev1", long_description=long_description, description='Self driving library for python.', url='https://github.com/autorope/donkeycar', @@ -37,7 +37,7 @@ def package_files(directory, strip_leading): ], }, install_requires=[ - 'numpy==1.19', + 'numpy', 'pillow', 'docopt', 'tornado', @@ -53,29 +53,43 @@ def package_files(directory, strip_leading): "pynmea2", 'pyserial', "utm", + 'pandas' ], extras_require={ + # if installing into a conda (i.e. miniforge) env on Pi we have to + # run 'sudo apt-get install libcap-dev' first. 'pi': [ - 'picamera', + 'picamera2', 'Adafruit_PCA9685', - 'adafruit-circuitpython-lis3dh', 'adafruit-circuitpython-ssd1306', 'adafruit-circuitpython-rplidar', 'RPi.GPIO', - 'imgaug' + 'tensorflow @ https://github.com/PINTO0309/Tensorflow-bin/releases/download/v2.9.0/tensorflow-2.9.0-cp39-none-linux_aarch64.whl' + ], + 'nano45': [ + 'Adafruit_PCA9685', + 'adafruit-circuitpython-ssd1306', + 'adafruit-circuitpython-rplidar', + 'Jetson.GPIO', + 'albumentations', + 'matplotlib', + 'kivy-jetson', + 'pyyaml', + 'plotly' ], 'nano': [ 'Adafruit_PCA9685', - 'adafruit-circuitpython-lis3dh', 'adafruit-circuitpython-ssd1306', 'adafruit-circuitpython-rplidar', 'Jetson.GPIO', - 'albumentations' + 'matplotlib', + 'kivy', + 'pyyaml', + 'plotly' ], 'pc': [ 'matplotlib', 'kivy', - 'protobuf', 'pandas', 'pyyaml', 'plotly', @@ -88,7 +102,7 @@ def package_files(directory, strip_leading): 'mypy' ], 'ci': ['codecov'], - 'tf': ['tensorflow==2.2.0'], + 'tf': ['tensorflow==2.9'], 'torch': [ 'pytorch', 'torchvision==0.12', @@ -119,4 +133,4 @@ def package_files(directory, strip_leading): ], keywords='selfdriving cars donkeycar diyrobocars', packages=find_packages(exclude=(['tests', 'docs', 'site', 'env'])), - ) + )