diff --git a/repro.sh b/repro.sh new file mode 100755 index 00000000000000..17b7a207350934 --- /dev/null +++ b/repro.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +while : +do + rm selfdrive/modeld/models/supercombo_tinygrad.pkl + ./system/manager/build.py +done + diff --git a/selfdrive/modeld/SConscript b/selfdrive/modeld/SConscript index d572915c721a48..74757e3351babe 100644 --- a/selfdrive/modeld/SConscript +++ b/selfdrive/modeld/SConscript @@ -13,15 +13,6 @@ common_src = [ "transforms/transform.cc", ] -thneed_src_common = [ - "thneed/thneed_common.cc", - "thneed/serialize.cc", -] - -thneed_src_qcom = thneed_src_common + ["thneed/thneed_qcom2.cc"] -thneed_src_pc = thneed_src_common + ["thneed/thneed_pc.cc"] -thneed_src = thneed_src_qcom if arch == "larch64" else thneed_src_pc - # SNPE except on Mac and ARM Linux snpe_lib = [] if arch != "Darwin" and arch != "aarch64": @@ -59,16 +50,13 @@ fn = File("models/supercombo").abspath cmd = f'python3 {Dir("#selfdrive/modeld").abspath}/get_model_metadata.py {fn}.onnx' lenv.Command(fn + "_metadata.pkl", [fn + ".onnx"] + tinygrad_files, cmd) -# Build thneed model -if arch == "larch64" or GetOption('pc_thneed'): - tinygrad_opts = [] - if not GetOption('pc_thneed'): - # use FLOAT16 on device for speed + don't cache the CL kernels for space - tinygrad_opts += ["FLOAT16=1", "PYOPENCL_NO_CACHE=1"] - cmd = f"cd {Dir('#').abspath}/tinygrad_repo && " + ' '.join(tinygrad_opts) + f" python3 openpilot/compile2.py {fn}.onnx {fn}.thneed" - - lenv.Command(fn + ".thneed", [fn + ".onnx"] + tinygrad_files, cmd) - - thneed_lib = env.SharedLibrary('thneed', thneed_src, LIBS=[gpucommon, common, 'OpenCL', 'dl']) - thneedmodel_lib = env.Library('thneedmodel', ['runners/thneedmodel.cc']) - lenvCython.Program('runners/thneedmodel_pyx.so', 'runners/thneedmodel_pyx.pyx', LIBS=envCython["LIBS"]+[thneedmodel_lib, thneed_lib, gpucommon, common, 'dl', 'OpenCL']) +# Compile tinygrad model +# TODO this is all super hacky +pythonpath_string = 'PYTHONPATH="${PYTHONPATH}:' + env.Dir("#tinygrad_repo").abspath + '"' +if arch == 'larch64': + device_string = 'QCOM=1' +else: + device_string = 'GPU=1' +fn = File("models/supercombo").abspath +cmd = f'{pythonpath_string} {device_string} python3 {Dir("#tinygrad_repo").abspath}/examples/openpilot/compile3.py {fn}.onnx && mv /tmp/openpilot.pkl {fn}_tinygrad.pkl' +lenv.Command(fn + "_tinygrad.pkl", [fn + ".onnx"] + tinygrad_files, cmd) diff --git a/selfdrive/modeld/dmonitoringmodeld b/selfdrive/modeld/dmonitoringmodeld new file mode 100755 index 00000000000000..90b43800fedf14 --- /dev/null +++ b/selfdrive/modeld/dmonitoringmodeld @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" +exec "$DIR/dmonitoringmodeld.py" "$@" diff --git a/selfdrive/modeld/modeld.py b/selfdrive/modeld/modeld.py index 86813fcb8ecd66..e0956861158549 100755 --- a/selfdrive/modeld/modeld.py +++ b/selfdrive/modeld/modeld.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 import os +## TODO this is hack +os.environ['QCOM'] = '1' import time import pickle import numpy as np @@ -19,19 +21,21 @@ from openpilot.system import sentry from openpilot.selfdrive.car.card import convert_to_capnp from openpilot.selfdrive.controls.lib.desire_helper import DesireHelper -from openpilot.selfdrive.modeld.runners import ModelRunner, Runtime from openpilot.selfdrive.modeld.parse_model_outputs import Parser from openpilot.selfdrive.modeld.fill_model_msg import fill_model_msg, fill_pose_msg, PublishState from openpilot.selfdrive.modeld.constants import ModelConstants from openpilot.selfdrive.modeld.models.commonmodel_pyx import ModelFrame, CLContext +from tinygrad.tensor import Tensor +Tensor.manual_seed(1337) +Tensor.no_grad = True + PROCESS_NAME = "selfdrive.modeld.modeld" SEND_RAW_PRED = os.getenv('SEND_RAW_PRED') -MODEL_PATHS = { - ModelRunner.THNEED: Path(__file__).parent / 'models/supercombo.thneed', - ModelRunner.ONNX: Path(__file__).parent / 'models/supercombo.onnx'} +MODEL_PATH = Path(__file__).parent / 'models/supercombo.onnx' +MODEL_PKL_PATH = Path(__file__).parent / 'models/supercombo_tinygrad.pkl' METADATA_PATH = Path(__file__).parent / 'models/supercombo_metadata.pkl' class FrameMeta: @@ -49,18 +53,19 @@ class ModelState: inputs: dict[str, np.ndarray] output: np.ndarray prev_desire: np.ndarray # for tracking the rising edge of the pulse - model: ModelRunner def __init__(self, context: CLContext): self.frame = ModelFrame(context) self.wide_frame = ModelFrame(context) self.prev_desire = np.zeros(ModelConstants.DESIRE_LEN, dtype=np.float32) self.inputs = { - 'desire': np.zeros(ModelConstants.DESIRE_LEN * (ModelConstants.HISTORY_BUFFER_LEN+1), dtype=np.float32), - 'traffic_convention': np.zeros(ModelConstants.TRAFFIC_CONVENTION_LEN, dtype=np.float32), - 'lateral_control_params': np.zeros(ModelConstants.LATERAL_CONTROL_PARAMS_LEN, dtype=np.float32), - 'prev_desired_curv': np.zeros(ModelConstants.PREV_DESIRED_CURV_LEN * (ModelConstants.HISTORY_BUFFER_LEN+1), dtype=np.float32), - 'features_buffer': np.zeros(ModelConstants.HISTORY_BUFFER_LEN * ModelConstants.FEATURE_LEN, dtype=np.float32), + 'input_imgs': np.zeros((1, 12, 128, 256), dtype=np.float16), + 'big_input_imgs': np.zeros((1, 12, 128, 256), dtype=np.float16), + 'desire': np.zeros((1, (ModelConstants.HISTORY_BUFFER_LEN+1), ModelConstants.DESIRE_LEN), dtype=np.float16), + 'traffic_convention': np.zeros((1, ModelConstants.TRAFFIC_CONVENTION_LEN), dtype=np.float16), + 'lateral_control_params': np.zeros((1, ModelConstants.LATERAL_CONTROL_PARAMS_LEN), dtype=np.float16), + 'prev_desired_curv': np.zeros((1,(ModelConstants.HISTORY_BUFFER_LEN+1), ModelConstants.PREV_DESIRED_CURV_LEN), dtype=np.float16), + 'features_buffer': np.zeros((1, ModelConstants.HISTORY_BUFFER_LEN, ModelConstants.FEATURE_LEN), dtype=np.float16), } with open(METADATA_PATH, 'rb') as f: @@ -71,11 +76,8 @@ def __init__(self, context: CLContext): self.output = np.zeros(net_output_size, dtype=np.float32) self.parser = Parser() - self.model = ModelRunner(MODEL_PATHS, self.output, Runtime.GPU, False, context) - self.model.addInput("input_imgs", None) - self.model.addInput("big_input_imgs", None) - for k,v in self.inputs.items(): - self.model.addInput(k, v) + with open(MODEL_PKL_PATH, "rb") as f: + self.model_run = pickle.load(f) def slice_outputs(self, model_outputs: np.ndarray) -> dict[str, np.ndarray]: parsed_model_outputs = {k: model_outputs[np.newaxis, v] for k,v in self.output_slices.items()} @@ -93,16 +95,15 @@ def run(self, buf: VisionBuf, wbuf: VisionBuf, transform: np.ndarray, transform_ self.inputs['traffic_convention'][:] = inputs['traffic_convention'] self.inputs['lateral_control_params'][:] = inputs['lateral_control_params'] - - # if getCLBuffer is not None, frame will be None - self.model.setInputBuffer("input_imgs", self.frame.prepare(buf, transform.flatten(), self.model.getCLBuffer("input_imgs"))) + self.inputs['input_imgs'] = self.frame.prepare(buf, transform.flatten(), None).astype(np.float16).reshape(self.inputs['input_imgs'].shape) if wbuf is not None: - self.model.setInputBuffer("big_input_imgs", self.wide_frame.prepare(wbuf, transform_wide.flatten(), self.model.getCLBuffer("big_input_imgs"))) + self.inputs['big_input_imgs'] = self.wide_frame.prepare(wbuf, transform_wide.flatten(), None).astype(np.float16).reshape(self.inputs['input_imgs'].shape) if prepare_only: return None - self.model.execute() + self.tensor_inputs = {k: Tensor(v) for k, v in self.inputs.items()} + self.output = self.model_run(**self.tensor_inputs)['outputs'].numpy().flatten() outputs = self.parser.parse_outputs(self.slice_outputs(self.output)) self.inputs['features_buffer'][:-ModelConstants.FEATURE_LEN] = self.inputs['features_buffer'][ModelConstants.FEATURE_LEN:] diff --git a/selfdrive/modeld/models/commonmodel.cc b/selfdrive/modeld/models/commonmodel.cc index 57c14dfa881189..5dab3f6d94f231 100644 --- a/selfdrive/modeld/models/commonmodel.cc +++ b/selfdrive/modeld/models/commonmodel.cc @@ -24,19 +24,12 @@ float* ModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, int yuv_cl, frame_width, frame_height, frame_stride, frame_uv_offset, y_cl, u_cl, v_cl, MODEL_WIDTH, MODEL_HEIGHT, projection); - if (output == NULL) { - loadyuv_queue(&loadyuv, q, y_cl, u_cl, v_cl, net_input_cl); - - std::memmove(&input_frames[0], &input_frames[MODEL_FRAME_SIZE], sizeof(float) * MODEL_FRAME_SIZE); - CL_CHECK(clEnqueueReadBuffer(q, net_input_cl, CL_TRUE, 0, MODEL_FRAME_SIZE * sizeof(float), &input_frames[MODEL_FRAME_SIZE], 0, nullptr, nullptr)); - clFinish(q); - return &input_frames[0]; - } else { - loadyuv_queue(&loadyuv, q, y_cl, u_cl, v_cl, *output, true); - // NOTE: Since thneed is using a different command queue, this clFinish is needed to ensure the image is ready. - clFinish(q); - return NULL; - } + loadyuv_queue(&loadyuv, q, y_cl, u_cl, v_cl, net_input_cl); + + std::memmove(&input_frames[0], &input_frames[MODEL_FRAME_SIZE], sizeof(float) * MODEL_FRAME_SIZE); + CL_CHECK(clEnqueueReadBuffer(q, net_input_cl, CL_TRUE, 0, MODEL_FRAME_SIZE * sizeof(float), &input_frames[MODEL_FRAME_SIZE], 0, nullptr, nullptr)); + clFinish(q); + return &input_frames[0]; } ModelFrame::~ModelFrame() { diff --git a/selfdrive/modeld/runners/__init__.py b/selfdrive/modeld/runners/__init__.py index 4c29bf3f1cfbf7..afcb97df5203c9 100644 --- a/selfdrive/modeld/runners/__init__.py +++ b/selfdrive/modeld/runners/__init__.py @@ -3,18 +3,18 @@ from openpilot.selfdrive.modeld.runners.runmodel_pyx import RunModel, Runtime assert Runtime -USE_THNEED = int(os.getenv('USE_THNEED', str(int(TICI)))) +USE_TINYGRAD = int(os.getenv('USE_TINYGRAD', str(int(TICI)))) USE_SNPE = int(os.getenv('USE_SNPE', str(int(TICI)))) class ModelRunner(RunModel): - THNEED = 'THNEED' + TINYGRAD = 'TINYGRAD' SNPE = 'SNPE' ONNX = 'ONNX' def __new__(cls, paths, *args, **kwargs): - if ModelRunner.THNEED in paths and USE_THNEED: - from openpilot.selfdrive.modeld.runners.thneedmodel_pyx import ThneedModel as Runner - runner_type = ModelRunner.THNEED + if ModelRunner.TINYGRAD in paths and USE_TINYGRAD: + from openpilot.selfdrive.modeld.runners.tinygradmodel import TinygradModel as Runner + runner_type = ModelRunner.TINYGRAD elif ModelRunner.SNPE in paths and USE_SNPE: from openpilot.selfdrive.modeld.runners.snpemodel_pyx import SNPEModel as Runner runner_type = ModelRunner.SNPE diff --git a/selfdrive/modeld/runners/thneedmodel.cc b/selfdrive/modeld/runners/thneedmodel.cc deleted file mode 100644 index a16d8b42aab223..00000000000000 --- a/selfdrive/modeld/runners/thneedmodel.cc +++ /dev/null @@ -1,58 +0,0 @@ -#include "selfdrive/modeld/runners/thneedmodel.h" - -#include - -#include "common/swaglog.h" - -ThneedModel::ThneedModel(const std::string path, float *_output, size_t _output_size, int runtime, bool luse_tf8, cl_context context) { - thneed = new Thneed(true, context); - thneed->load(path.c_str()); - thneed->clexec(); - - recorded = false; - output = _output; -} - -void* ThneedModel::getCLBuffer(const std::string name) { - int index = -1; - for (int i = 0; i < inputs.size(); i++) { - if (name == inputs[i]->name) { - index = i; - break; - } - } - - if (index == -1) { - LOGE("Tried to get CL buffer for input `%s` but no input with this name exists", name.c_str()); - assert(false); - } - - if (thneed->input_clmem.size() >= inputs.size()) { - return &thneed->input_clmem[inputs.size() - index - 1]; - } else { - return nullptr; - } -} - -void ThneedModel::execute() { - if (!recorded) { - thneed->record = true; - float *input_buffers[inputs.size()]; - for (int i = 0; i < inputs.size(); i++) { - input_buffers[inputs.size() - i - 1] = inputs[i]->buffer; - } - - thneed->copy_inputs(input_buffers); - thneed->clexec(); - thneed->copy_output(output); - thneed->stop(); - - recorded = true; - } else { - float *input_buffers[inputs.size()]; - for (int i = 0; i < inputs.size(); i++) { - input_buffers[inputs.size() - i - 1] = inputs[i]->buffer; - } - thneed->execute(input_buffers, output); - } -} diff --git a/selfdrive/modeld/runners/thneedmodel.h b/selfdrive/modeld/runners/thneedmodel.h deleted file mode 100644 index 6ed479c081634d..00000000000000 --- a/selfdrive/modeld/runners/thneedmodel.h +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include - -#include "selfdrive/modeld/runners/runmodel.h" -#include "selfdrive/modeld/thneed/thneed.h" - -class ThneedModel : public RunModel { -public: - ThneedModel(const std::string path, float *_output, size_t _output_size, int runtime, bool use_tf8 = false, cl_context context = NULL); - void *getCLBuffer(const std::string name); - void execute(); -private: - Thneed *thneed = NULL; - bool recorded; - float *output; -}; diff --git a/selfdrive/modeld/runners/thneedmodel.pxd b/selfdrive/modeld/runners/thneedmodel.pxd deleted file mode 100644 index 79e24dbdd62518..00000000000000 --- a/selfdrive/modeld/runners/thneedmodel.pxd +++ /dev/null @@ -1,9 +0,0 @@ -# distutils: language = c++ - -from libcpp.string cimport string - -from msgq.visionipc.visionipc cimport cl_context - -cdef extern from "selfdrive/modeld/runners/thneedmodel.h": - cdef cppclass ThneedModel: - ThneedModel(string, float*, size_t, int, bool, cl_context) diff --git a/selfdrive/modeld/runners/thneedmodel_pyx.pyx b/selfdrive/modeld/runners/thneedmodel_pyx.pyx deleted file mode 100644 index 6f8fdd255fa5bb..00000000000000 --- a/selfdrive/modeld/runners/thneedmodel_pyx.pyx +++ /dev/null @@ -1,14 +0,0 @@ -# distutils: language = c++ -# cython: c_string_encoding=ascii, language_level=3 - -from libcpp cimport bool -from libcpp.string cimport string - -from .thneedmodel cimport ThneedModel as cppThneedModel -from selfdrive.modeld.models.commonmodel_pyx cimport CLContext -from selfdrive.modeld.runners.runmodel_pyx cimport RunModel -from selfdrive.modeld.runners.runmodel cimport RunModel as cppRunModel - -cdef class ThneedModel(RunModel): - def __cinit__(self, string path, float[:] output, int runtime, bool use_tf8, CLContext context): - self.model = new cppThneedModel(path, &output[0], len(output), runtime, use_tf8, context.context) diff --git a/selfdrive/modeld/thneed/README b/selfdrive/modeld/thneed/README deleted file mode 100644 index f3bc66d8fc26ff..00000000000000 --- a/selfdrive/modeld/thneed/README +++ /dev/null @@ -1,8 +0,0 @@ -thneed is an SNPE accelerator. I know SNPE is already an accelerator, but sometimes things need to go even faster.. - -It runs on the local device, and caches a single model run. Then it replays it, but fast. - -thneed slices through abstraction layers like a fish. - -You need a thneed. - diff --git a/selfdrive/modeld/thneed/__init__.py b/selfdrive/modeld/thneed/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/selfdrive/modeld/thneed/serialize.cc b/selfdrive/modeld/thneed/serialize.cc deleted file mode 100644 index 3dc2bef41448f8..00000000000000 --- a/selfdrive/modeld/thneed/serialize.cc +++ /dev/null @@ -1,154 +0,0 @@ -#include -#include - -#include "third_party/json11/json11.hpp" -#include "common/util.h" -#include "common/clutil.h" -#include "common/swaglog.h" -#include "selfdrive/modeld/thneed/thneed.h" -using namespace json11; - -extern map g_program_source; - -void Thneed::load(const char *filename) { - LOGD("Thneed::load: loading from %s\n", filename); - - string buf = util::read_file(filename); - int jsz = *(int *)buf.data(); - string jsonerr; - string jj(buf.data() + sizeof(int), jsz); - Json jdat = Json::parse(jj, jsonerr); - - map real_mem; - real_mem[NULL] = NULL; - - int ptr = sizeof(int)+jsz; - for (auto &obj : jdat["objects"].array_items()) { - auto mobj = obj.object_items(); - int sz = mobj["size"].int_value(); - cl_mem clbuf = NULL; - - if (mobj["buffer_id"].string_value().size() > 0) { - // image buffer must already be allocated - clbuf = real_mem[*(cl_mem*)(mobj["buffer_id"].string_value().data())]; - assert(mobj["needs_load"].bool_value() == false); - } else { - if (mobj["needs_load"].bool_value()) { - clbuf = clCreateBuffer(context, CL_MEM_COPY_HOST_PTR | CL_MEM_READ_WRITE, sz, &buf[ptr], NULL); - if (debug >= 1) printf("loading %p %d @ 0x%X\n", clbuf, sz, ptr); - ptr += sz; - } else { - // TODO: is there a faster way to init zeroed out buffers? - void *host_zeros = calloc(sz, 1); - clbuf = clCreateBuffer(context, CL_MEM_COPY_HOST_PTR | CL_MEM_READ_WRITE, sz, host_zeros, NULL); - free(host_zeros); - } - } - assert(clbuf != NULL); - - if (mobj["arg_type"] == "image2d_t" || mobj["arg_type"] == "image1d_t") { - cl_image_desc desc = {0}; - desc.image_type = (mobj["arg_type"] == "image2d_t") ? CL_MEM_OBJECT_IMAGE2D : CL_MEM_OBJECT_IMAGE1D_BUFFER; - desc.image_width = mobj["width"].int_value(); - desc.image_height = mobj["height"].int_value(); - desc.image_row_pitch = mobj["row_pitch"].int_value(); - assert(sz == desc.image_height*desc.image_row_pitch); -#ifdef QCOM2 - desc.buffer = clbuf; -#else - // TODO: we are creating unused buffers on PC - clReleaseMemObject(clbuf); -#endif - cl_image_format format = {0}; - format.image_channel_order = CL_RGBA; - format.image_channel_data_type = mobj["float32"].bool_value() ? CL_FLOAT : CL_HALF_FLOAT; - - cl_int errcode; - -#ifndef QCOM2 - if (mobj["needs_load"].bool_value()) { - clbuf = clCreateImage(context, CL_MEM_COPY_HOST_PTR | CL_MEM_READ_WRITE, &format, &desc, &buf[ptr-sz], &errcode); - } else { - clbuf = clCreateImage(context, CL_MEM_READ_WRITE, &format, &desc, NULL, &errcode); - } -#else - clbuf = clCreateImage(context, CL_MEM_READ_WRITE, &format, &desc, NULL, &errcode); -#endif - if (clbuf == NULL) { - LOGE("clError: %s create image %zux%zu rp %zu with buffer %p\n", cl_get_error_string(errcode), - desc.image_width, desc.image_height, desc.image_row_pitch, desc.buffer); - } - assert(clbuf != NULL); - } - - real_mem[*(cl_mem*)(mobj["id"].string_value().data())] = clbuf; - } - - map g_programs; - for (const auto &[name, source] : jdat["programs"].object_items()) { - if (debug >= 1) printf("building %s with size %zu\n", name.c_str(), source.string_value().size()); - g_programs[name] = cl_program_from_source(context, device_id, source.string_value()); - } - - for (auto &obj : jdat["inputs"].array_items()) { - auto mobj = obj.object_items(); - int sz = mobj["size"].int_value(); - cl_mem aa = real_mem[*(cl_mem*)(mobj["buffer_id"].string_value().data())]; - input_clmem.push_back(aa); - input_sizes.push_back(sz); - LOGD("Thneed::load: adding input %s with size %d\n", mobj["name"].string_value().data(), sz); - - cl_int cl_err; - void *ret = clEnqueueMapBuffer(command_queue, aa, CL_TRUE, CL_MAP_WRITE, 0, sz, 0, NULL, NULL, &cl_err); - if (cl_err != CL_SUCCESS) LOGE("clError: %s map %p %d\n", cl_get_error_string(cl_err), aa, sz); - assert(cl_err == CL_SUCCESS); - inputs.push_back(ret); - } - - for (auto &obj : jdat["outputs"].array_items()) { - auto mobj = obj.object_items(); - int sz = mobj["size"].int_value(); - LOGD("Thneed::save: adding output with size %d\n", sz); - // TODO: support multiple outputs - output = real_mem[*(cl_mem*)(mobj["buffer_id"].string_value().data())]; - assert(output != NULL); - } - - for (auto &obj : jdat["binaries"].array_items()) { - string name = obj["name"].string_value(); - size_t length = obj["length"].int_value(); - if (debug >= 1) printf("binary %s with size %zu\n", name.c_str(), length); - g_programs[name] = cl_program_from_binary(context, device_id, (const uint8_t*)&buf[ptr], length); - ptr += length; - } - - for (auto &obj : jdat["kernels"].array_items()) { - auto gws = obj["global_work_size"]; - auto lws = obj["local_work_size"]; - auto kk = shared_ptr(new CLQueuedKernel(this)); - - kk->name = obj["name"].string_value(); - kk->program = g_programs[kk->name]; - kk->work_dim = obj["work_dim"].int_value(); - for (int i = 0; i < kk->work_dim; i++) { - kk->global_work_size[i] = gws[i].int_value(); - kk->local_work_size[i] = lws[i].int_value(); - } - kk->num_args = obj["num_args"].int_value(); - for (int i = 0; i < kk->num_args; i++) { - string arg = obj["args"].array_items()[i].string_value(); - int arg_size = obj["args_size"].array_items()[i].int_value(); - kk->args_size.push_back(arg_size); - if (arg_size == 8) { - cl_mem val = *(cl_mem*)(arg.data()); - val = real_mem[val]; - kk->args.push_back(string((char*)&val, sizeof(val))); - } else { - kk->args.push_back(arg); - } - } - kq.push_back(kk); - } - - clFinish(command_queue); -} diff --git a/selfdrive/modeld/thneed/thneed.h b/selfdrive/modeld/thneed/thneed.h deleted file mode 100644 index 47e18e0be3bb30..00000000000000 --- a/selfdrive/modeld/thneed/thneed.h +++ /dev/null @@ -1,133 +0,0 @@ -#pragma once - -#ifndef __user -#define __user __attribute__(()) -#endif - -#include -#include -#include -#include -#include - -#include - -#include "third_party/linux/include/msm_kgsl.h" - -using namespace std; - -cl_int thneed_clSetKernelArg(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void *arg_value); - -namespace json11 { - class Json; -} -class Thneed; - -class GPUMalloc { - public: - GPUMalloc(int size, int fd); - ~GPUMalloc(); - void *alloc(int size); - private: - uint64_t base; - int remaining; -}; - -class CLQueuedKernel { - public: - CLQueuedKernel(Thneed *lthneed) { thneed = lthneed; } - CLQueuedKernel(Thneed *lthneed, - cl_kernel _kernel, - cl_uint _work_dim, - const size_t *_global_work_size, - const size_t *_local_work_size); - cl_int exec(); - void debug_print(bool verbose); - int get_arg_num(const char *search_arg_name); - cl_program program; - string name; - cl_uint num_args; - vector arg_names; - vector arg_types; - vector args; - vector args_size; - cl_kernel kernel = NULL; - json11::Json to_json() const; - - cl_uint work_dim; - size_t global_work_size[3] = {0}; - size_t local_work_size[3] = {0}; - private: - Thneed *thneed; -}; - -class CachedIoctl { - public: - virtual void exec() {} -}; - -class CachedSync: public CachedIoctl { - public: - CachedSync(Thneed *lthneed, string ldata) { thneed = lthneed; data = ldata; } - void exec(); - private: - Thneed *thneed; - string data; -}; - -class CachedCommand: public CachedIoctl { - public: - CachedCommand(Thneed *lthneed, struct kgsl_gpu_command *cmd); - void exec(); - private: - void disassemble(int cmd_index); - struct kgsl_gpu_command cache; - unique_ptr cmds; - unique_ptr objs; - Thneed *thneed; - vector > kq; -}; - -class Thneed { - public: - Thneed(bool do_clinit=false, cl_context _context = NULL); - void stop(); - void execute(float **finputs, float *foutput, bool slow=false); - void wait(); - - vector input_clmem; - vector inputs; - vector input_sizes; - cl_mem output = NULL; - - cl_context context = NULL; - cl_command_queue command_queue; - cl_device_id device_id; - int context_id; - - // protected? - bool record = false; - int debug; - int timestamp; - -#ifdef QCOM2 - unique_ptr ram; - vector > cmds; - int fd; -#endif - - // all CL kernels - void copy_inputs(float **finputs, bool internal=false); - void copy_output(float *foutput); - cl_int clexec(); - vector > kq; - - // pending CL kernels - vector > ckq; - - // loading - void load(const char *filename); - private: - void clinit(); -}; - diff --git a/selfdrive/modeld/thneed/thneed_common.cc b/selfdrive/modeld/thneed/thneed_common.cc deleted file mode 100644 index ecdf1237e384ff..00000000000000 --- a/selfdrive/modeld/thneed/thneed_common.cc +++ /dev/null @@ -1,216 +0,0 @@ -#include "selfdrive/modeld/thneed/thneed.h" - -#include -#include -#include - -#include "common/clutil.h" -#include "common/timing.h" - -map, string> g_args; -map, int> g_args_size; -map g_program_source; - -void Thneed::stop() { - //printf("Thneed::stop: recorded %lu commands\n", cmds.size()); - record = false; -} - -void Thneed::clinit() { - device_id = cl_get_device_id(CL_DEVICE_TYPE_DEFAULT); - if (context == NULL) context = CL_CHECK_ERR(clCreateContext(NULL, 1, &device_id, NULL, NULL, &err)); - //cl_command_queue_properties props[3] = {CL_QUEUE_PROPERTIES, CL_QUEUE_PROFILING_ENABLE, 0}; - cl_command_queue_properties props[3] = {CL_QUEUE_PROPERTIES, 0, 0}; - command_queue = CL_CHECK_ERR(clCreateCommandQueueWithProperties(context, device_id, props, &err)); - printf("Thneed::clinit done\n"); -} - -cl_int Thneed::clexec() { - if (debug >= 1) printf("Thneed::clexec: running %lu queued kernels\n", kq.size()); - for (auto &k : kq) { - if (record) ckq.push_back(k); - cl_int ret = k->exec(); - assert(ret == CL_SUCCESS); - } - return clFinish(command_queue); -} - -void Thneed::copy_inputs(float **finputs, bool internal) { - for (int idx = 0; idx < inputs.size(); ++idx) { - if (debug >= 1) printf("copying %lu -- %p -> %p (cl %p)\n", input_sizes[idx], finputs[idx], inputs[idx], input_clmem[idx]); - - if (internal) { - // if it's internal, using memcpy is fine since the buffer sync is cached in the ioctl layer - if (finputs[idx] != NULL) memcpy(inputs[idx], finputs[idx], input_sizes[idx]); - } else { - if (finputs[idx] != NULL) CL_CHECK(clEnqueueWriteBuffer(command_queue, input_clmem[idx], CL_TRUE, 0, input_sizes[idx], finputs[idx], 0, NULL, NULL)); - } - } -} - -void Thneed::copy_output(float *foutput) { - if (output != NULL) { - size_t sz; - clGetMemObjectInfo(output, CL_MEM_SIZE, sizeof(sz), &sz, NULL); - if (debug >= 1) printf("copying %lu for output %p -> %p\n", sz, output, foutput); - CL_CHECK(clEnqueueReadBuffer(command_queue, output, CL_TRUE, 0, sz, foutput, 0, NULL, NULL)); - } else { - printf("CAUTION: model output is NULL, does it have no outputs?\n"); - } -} - -// *********** CLQueuedKernel *********** - -CLQueuedKernel::CLQueuedKernel(Thneed *lthneed, - cl_kernel _kernel, - cl_uint _work_dim, - const size_t *_global_work_size, - const size_t *_local_work_size) { - thneed = lthneed; - kernel = _kernel; - work_dim = _work_dim; - assert(work_dim <= 3); - for (int i = 0; i < work_dim; i++) { - global_work_size[i] = _global_work_size[i]; - local_work_size[i] = _local_work_size[i]; - } - - char _name[0x100]; - clGetKernelInfo(kernel, CL_KERNEL_FUNCTION_NAME, sizeof(_name), _name, NULL); - name = string(_name); - clGetKernelInfo(kernel, CL_KERNEL_NUM_ARGS, sizeof(num_args), &num_args, NULL); - - // get args - for (int i = 0; i < num_args; i++) { - char arg_name[0x100] = {0}; - clGetKernelArgInfo(kernel, i, CL_KERNEL_ARG_NAME, sizeof(arg_name), arg_name, NULL); - arg_names.push_back(string(arg_name)); - clGetKernelArgInfo(kernel, i, CL_KERNEL_ARG_TYPE_NAME, sizeof(arg_name), arg_name, NULL); - arg_types.push_back(string(arg_name)); - - args.push_back(g_args[make_pair(kernel, i)]); - args_size.push_back(g_args_size[make_pair(kernel, i)]); - } - - // get program - clGetKernelInfo(kernel, CL_KERNEL_PROGRAM, sizeof(program), &program, NULL); -} - -int CLQueuedKernel::get_arg_num(const char *search_arg_name) { - for (int i = 0; i < num_args; i++) { - if (arg_names[i] == search_arg_name) return i; - } - printf("failed to find %s in %s\n", search_arg_name, name.c_str()); - assert(false); -} - -cl_int CLQueuedKernel::exec() { - if (kernel == NULL) { - kernel = clCreateKernel(program, name.c_str(), NULL); - arg_names.clear(); - arg_types.clear(); - - for (int j = 0; j < num_args; j++) { - char arg_name[0x100] = {0}; - clGetKernelArgInfo(kernel, j, CL_KERNEL_ARG_NAME, sizeof(arg_name), arg_name, NULL); - arg_names.push_back(string(arg_name)); - clGetKernelArgInfo(kernel, j, CL_KERNEL_ARG_TYPE_NAME, sizeof(arg_name), arg_name, NULL); - arg_types.push_back(string(arg_name)); - - cl_int ret; - if (args[j].size() != 0) { - assert(args[j].size() == args_size[j]); - ret = thneed_clSetKernelArg(kernel, j, args[j].size(), args[j].data()); - } else { - ret = thneed_clSetKernelArg(kernel, j, args_size[j], NULL); - } - assert(ret == CL_SUCCESS); - } - } - - if (thneed->debug >= 1) { - debug_print(thneed->debug >= 2); - } - - return clEnqueueNDRangeKernel(thneed->command_queue, - kernel, work_dim, NULL, global_work_size, local_work_size, 0, NULL, NULL); -} - -void CLQueuedKernel::debug_print(bool verbose) { - printf("%p %56s -- ", kernel, name.c_str()); - for (int i = 0; i < work_dim; i++) { - printf("%4zu ", global_work_size[i]); - } - printf(" -- "); - for (int i = 0; i < work_dim; i++) { - printf("%4zu ", local_work_size[i]); - } - printf("\n"); - - if (verbose) { - for (int i = 0; i < num_args; i++) { - string arg = args[i]; - printf(" %s %s", arg_types[i].c_str(), arg_names[i].c_str()); - void *arg_value = (void*)arg.data(); - int arg_size = arg.size(); - if (arg_size == 0) { - printf(" (size) %d", args_size[i]); - } else if (arg_size == 1) { - printf(" = %d", *((char*)arg_value)); - } else if (arg_size == 2) { - printf(" = %d", *((short*)arg_value)); - } else if (arg_size == 4) { - if (arg_types[i] == "float") { - printf(" = %f", *((float*)arg_value)); - } else { - printf(" = %d", *((int*)arg_value)); - } - } else if (arg_size == 8) { - cl_mem val = (cl_mem)(*((uintptr_t*)arg_value)); - printf(" = %p", val); - if (val != NULL) { - cl_mem_object_type obj_type; - clGetMemObjectInfo(val, CL_MEM_TYPE, sizeof(obj_type), &obj_type, NULL); - if (arg_types[i] == "image2d_t" || arg_types[i] == "image1d_t" || obj_type == CL_MEM_OBJECT_IMAGE2D) { - cl_image_format format; - size_t width, height, depth, array_size, row_pitch, slice_pitch; - cl_mem buf; - clGetImageInfo(val, CL_IMAGE_FORMAT, sizeof(format), &format, NULL); - assert(format.image_channel_order == CL_RGBA); - assert(format.image_channel_data_type == CL_HALF_FLOAT || format.image_channel_data_type == CL_FLOAT); - clGetImageInfo(val, CL_IMAGE_WIDTH, sizeof(width), &width, NULL); - clGetImageInfo(val, CL_IMAGE_HEIGHT, sizeof(height), &height, NULL); - clGetImageInfo(val, CL_IMAGE_ROW_PITCH, sizeof(row_pitch), &row_pitch, NULL); - clGetImageInfo(val, CL_IMAGE_DEPTH, sizeof(depth), &depth, NULL); - clGetImageInfo(val, CL_IMAGE_ARRAY_SIZE, sizeof(array_size), &array_size, NULL); - clGetImageInfo(val, CL_IMAGE_SLICE_PITCH, sizeof(slice_pitch), &slice_pitch, NULL); - assert(depth == 0); - assert(array_size == 0); - assert(slice_pitch == 0); - - clGetImageInfo(val, CL_IMAGE_BUFFER, sizeof(buf), &buf, NULL); - size_t sz = 0; - if (buf != NULL) clGetMemObjectInfo(buf, CL_MEM_SIZE, sizeof(sz), &sz, NULL); - printf(" image %zu x %zu rp %zu @ %p buffer %zu", width, height, row_pitch, buf, sz); - } else { - size_t sz; - clGetMemObjectInfo(val, CL_MEM_SIZE, sizeof(sz), &sz, NULL); - printf(" buffer %zu", sz); - } - } - } - printf("\n"); - } - } -} - -cl_int thneed_clSetKernelArg(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void *arg_value) { - g_args_size[make_pair(kernel, arg_index)] = arg_size; - if (arg_value != NULL) { - g_args[make_pair(kernel, arg_index)] = string((char*)arg_value, arg_size); - } else { - g_args[make_pair(kernel, arg_index)] = string(""); - } - cl_int ret = clSetKernelArg(kernel, arg_index, arg_size, arg_value); - return ret; -} diff --git a/selfdrive/modeld/thneed/thneed_pc.cc b/selfdrive/modeld/thneed/thneed_pc.cc deleted file mode 100644 index 8d0037628e2f3d..00000000000000 --- a/selfdrive/modeld/thneed/thneed_pc.cc +++ /dev/null @@ -1,32 +0,0 @@ -#include "selfdrive/modeld/thneed/thneed.h" - -#include - -#include "common/clutil.h" -#include "common/timing.h" - -Thneed::Thneed(bool do_clinit, cl_context _context) { - context = _context; - if (do_clinit) clinit(); - char *thneed_debug_env = getenv("THNEED_DEBUG"); - debug = (thneed_debug_env != NULL) ? atoi(thneed_debug_env) : 0; -} - -void Thneed::execute(float **finputs, float *foutput, bool slow) { - uint64_t tb, te; - if (debug >= 1) tb = nanos_since_boot(); - - // ****** copy inputs - copy_inputs(finputs); - - // ****** run commands - clexec(); - - // ****** copy outputs - copy_output(foutput); - - if (debug >= 1) { - te = nanos_since_boot(); - printf("model exec in %lu us\n", (te-tb)/1000); - } -} diff --git a/selfdrive/modeld/thneed/thneed_qcom2.cc b/selfdrive/modeld/thneed/thneed_qcom2.cc deleted file mode 100644 index 21de15d17c9cfc..00000000000000 --- a/selfdrive/modeld/thneed/thneed_qcom2.cc +++ /dev/null @@ -1,258 +0,0 @@ -#include "selfdrive/modeld/thneed/thneed.h" - -#include -#include - -#include -#include -#include -#include -#include - -#include "common/clutil.h" -#include "common/timing.h" - -Thneed *g_thneed = NULL; -int g_fd = -1; - -void hexdump(uint8_t *d, int len) { - assert((len%4) == 0); - printf(" dumping %p len 0x%x\n", d, len); - for (int i = 0; i < len/4; i++) { - if (i != 0 && (i%0x10) == 0) printf("\n"); - printf("%8x ", d[i]); - } - printf("\n"); -} - -// *********** ioctl interceptor *********** - -extern "C" { - -int (*my_ioctl)(int filedes, unsigned long request, void *argp) = NULL; -#undef ioctl -int ioctl(int filedes, unsigned long request, void *argp) { - request &= 0xFFFFFFFF; // needed on QCOM2 - if (my_ioctl == NULL) my_ioctl = reinterpret_cast(dlsym(RTLD_NEXT, "ioctl")); - Thneed *thneed = g_thneed; - - // save the fd - if (request == IOCTL_KGSL_GPUOBJ_ALLOC) g_fd = filedes; - - // note that this runs always, even without a thneed object - if (request == IOCTL_KGSL_DRAWCTXT_CREATE) { - struct kgsl_drawctxt_create *create = (struct kgsl_drawctxt_create *)argp; - create->flags &= ~KGSL_CONTEXT_PRIORITY_MASK; - create->flags |= 6 << KGSL_CONTEXT_PRIORITY_SHIFT; // priority from 1-15, 1 is max priority - printf("IOCTL_KGSL_DRAWCTXT_CREATE: creating context with flags 0x%x\n", create->flags); - } - - if (thneed != NULL) { - if (request == IOCTL_KGSL_GPU_COMMAND) { - struct kgsl_gpu_command *cmd = (struct kgsl_gpu_command *)argp; - if (thneed->record) { - thneed->timestamp = cmd->timestamp; - thneed->context_id = cmd->context_id; - thneed->cmds.push_back(unique_ptr(new CachedCommand(thneed, cmd))); - } - if (thneed->debug >= 1) { - printf("IOCTL_KGSL_GPU_COMMAND(%2zu): flags: 0x%lx context_id: %u timestamp: %u numcmds: %d numobjs: %d\n", - thneed->cmds.size(), - cmd->flags, - cmd->context_id, cmd->timestamp, cmd->numcmds, cmd->numobjs); - } - } else if (request == IOCTL_KGSL_GPUOBJ_SYNC) { - struct kgsl_gpuobj_sync *cmd = (struct kgsl_gpuobj_sync *)argp; - struct kgsl_gpuobj_sync_obj *objs = (struct kgsl_gpuobj_sync_obj *)(cmd->objs); - - if (thneed->debug >= 2) { - printf("IOCTL_KGSL_GPUOBJ_SYNC count:%d ", cmd->count); - for (int i = 0; i < cmd->count; i++) { - printf(" -- offset:0x%lx len:0x%lx id:%d op:%d ", objs[i].offset, objs[i].length, objs[i].id, objs[i].op); - } - printf("\n"); - } - - if (thneed->record) { - thneed->cmds.push_back(unique_ptr(new - CachedSync(thneed, string((char *)objs, sizeof(struct kgsl_gpuobj_sync_obj)*cmd->count)))); - } - } else if (request == IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID) { - struct kgsl_device_waittimestamp_ctxtid *cmd = (struct kgsl_device_waittimestamp_ctxtid *)argp; - if (thneed->debug >= 1) { - printf("IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID: context_id: %d timestamp: %d timeout: %d\n", - cmd->context_id, cmd->timestamp, cmd->timeout); - } - } else if (request == IOCTL_KGSL_SETPROPERTY) { - if (thneed->debug >= 1) { - struct kgsl_device_getproperty *prop = (struct kgsl_device_getproperty *)argp; - printf("IOCTL_KGSL_SETPROPERTY: 0x%x sizebytes:%zu\n", prop->type, prop->sizebytes); - if (thneed->debug >= 2) { - hexdump((uint8_t *)prop->value, prop->sizebytes); - if (prop->type == KGSL_PROP_PWR_CONSTRAINT) { - struct kgsl_device_constraint *constraint = (struct kgsl_device_constraint *)prop->value; - hexdump((uint8_t *)constraint->data, constraint->size); - } - } - } - } else if (request == IOCTL_KGSL_DRAWCTXT_CREATE || request == IOCTL_KGSL_DRAWCTXT_DESTROY) { - // this happens - } else if (request == IOCTL_KGSL_GPUOBJ_ALLOC || request == IOCTL_KGSL_GPUOBJ_FREE) { - // this happens - } else { - if (thneed->debug >= 1) { - printf("other ioctl %lx\n", request); - } - } - } - - int ret = my_ioctl(filedes, request, argp); - // NOTE: This error message goes into stdout and messes up pyenv - // if (ret != 0) printf("ioctl returned %d with errno %d\n", ret, errno); - return ret; -} - -} - -// *********** GPUMalloc *********** - -GPUMalloc::GPUMalloc(int size, int fd) { - struct kgsl_gpuobj_alloc alloc; - memset(&alloc, 0, sizeof(alloc)); - alloc.size = size; - alloc.flags = 0x10000a00; - ioctl(fd, IOCTL_KGSL_GPUOBJ_ALLOC, &alloc); - void *addr = mmap64(NULL, alloc.mmapsize, 0x3, 0x1, fd, alloc.id*0x1000); - assert(addr != MAP_FAILED); - - base = (uint64_t)addr; - remaining = size; -} - -GPUMalloc::~GPUMalloc() { - // TODO: free the GPU malloced area -} - -void *GPUMalloc::alloc(int size) { - void *ret = (void*)base; - size = (size+0xff) & (~0xFF); - assert(size <= remaining); - remaining -= size; - base += size; - return ret; -} - -// *********** CachedSync, at the ioctl layer *********** - -void CachedSync::exec() { - struct kgsl_gpuobj_sync cmd; - - cmd.objs = (uint64_t)data.data(); - cmd.obj_len = data.length(); - cmd.count = data.length() / sizeof(struct kgsl_gpuobj_sync_obj); - - int ret = ioctl(thneed->fd, IOCTL_KGSL_GPUOBJ_SYNC, &cmd); - assert(ret == 0); -} - -// *********** CachedCommand, at the ioctl layer *********** - -CachedCommand::CachedCommand(Thneed *lthneed, struct kgsl_gpu_command *cmd) { - thneed = lthneed; - assert(cmd->numsyncs == 0); - - memcpy(&cache, cmd, sizeof(cache)); - - if (cmd->numcmds > 0) { - cmds = make_unique(cmd->numcmds); - memcpy(cmds.get(), (void *)cmd->cmdlist, sizeof(struct kgsl_command_object)*cmd->numcmds); - cache.cmdlist = (uint64_t)cmds.get(); - for (int i = 0; i < cmd->numcmds; i++) { - void *nn = thneed->ram->alloc(cmds[i].size); - memcpy(nn, (void*)cmds[i].gpuaddr, cmds[i].size); - cmds[i].gpuaddr = (uint64_t)nn; - } - } - - if (cmd->numobjs > 0) { - objs = make_unique(cmd->numobjs); - memcpy(objs.get(), (void *)cmd->objlist, sizeof(struct kgsl_command_object)*cmd->numobjs); - cache.objlist = (uint64_t)objs.get(); - for (int i = 0; i < cmd->numobjs; i++) { - void *nn = thneed->ram->alloc(objs[i].size); - memset(nn, 0, objs[i].size); - objs[i].gpuaddr = (uint64_t)nn; - } - } - - kq = thneed->ckq; - thneed->ckq.clear(); -} - -void CachedCommand::exec() { - cache.timestamp = ++thneed->timestamp; - int ret = ioctl(thneed->fd, IOCTL_KGSL_GPU_COMMAND, &cache); - - if (thneed->debug >= 1) printf("CachedCommand::exec got %d\n", ret); - - if (thneed->debug >= 2) { - for (auto &it : kq) { - it->debug_print(false); - } - } - - assert(ret == 0); -} - -// *********** Thneed *********** - -Thneed::Thneed(bool do_clinit, cl_context _context) { - // TODO: QCOM2 actually requires a different context - //context = _context; - if (do_clinit) clinit(); - assert(g_fd != -1); - fd = g_fd; - ram = make_unique(0x80000, fd); - timestamp = -1; - g_thneed = this; - char *thneed_debug_env = getenv("THNEED_DEBUG"); - debug = (thneed_debug_env != NULL) ? atoi(thneed_debug_env) : 0; -} - -void Thneed::wait() { - struct kgsl_device_waittimestamp_ctxtid wait; - wait.context_id = context_id; - wait.timestamp = timestamp; - wait.timeout = -1; - - uint64_t tb = nanos_since_boot(); - int wret = ioctl(fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID, &wait); - uint64_t te = nanos_since_boot(); - - if (debug >= 1) printf("wait %d after %lu us\n", wret, (te-tb)/1000); -} - -void Thneed::execute(float **finputs, float *foutput, bool slow) { - uint64_t tb, te; - if (debug >= 1) tb = nanos_since_boot(); - - // ****** copy inputs - copy_inputs(finputs, true); - - // ****** run commands - int i = 0; - for (auto &it : cmds) { - ++i; - if (debug >= 1) printf("run %2d @ %7lu us: ", i, (nanos_since_boot()-tb)/1000); - it->exec(); - if ((i == cmds.size()) || slow) wait(); - } - - // ****** copy outputs - copy_output(foutput); - - if (debug >= 1) { - te = nanos_since_boot(); - printf("model exec in %lu us\n", (te-tb)/1000); - } -} diff --git a/system/manager/build.py b/system/manager/build.py index 956336a604fdff..39be6e6abdfc14 100755 --- a/system/manager/build.py +++ b/system/manager/build.py @@ -31,7 +31,7 @@ def build(spinner: Spinner, dirty: bool = False, minimal: bool = False) -> None: compile_output: list[bytes] = [] for n in (nproc, nproc/2, 1): compile_output.clear() - scons: subprocess.Popen = subprocess.Popen(["scons", f"-j{int(n)}", "--cache-populate", *extra_args], cwd=BASEDIR, env=env, stderr=subprocess.PIPE) + scons: subprocess.Popen = subprocess.Popen(["scons", f"-j{int(n)}", "--cache-disable", "--cache-populate", *extra_args], cwd=BASEDIR, env=env, stderr=subprocess.PIPE) assert scons.stderr is not None # Read progress from stderr and update spinner diff --git a/system/manager/process_config.py b/system/manager/process_config.py index bdb549fa4136e9..eca5184c9243fe 100644 --- a/system/manager/process_config.py +++ b/system/manager/process_config.py @@ -70,7 +70,7 @@ def and_(*fns): PythonProcess("micd", "system.micd", iscar), PythonProcess("timed", "system.timed", always_run, enabled=not PC), - PythonProcess("dmonitoringmodeld", "selfdrive.modeld.dmonitoringmodeld", driverview, enabled=(not PC or WEBCAM)), + NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], driverview, enabled=(not PC or WEBCAM)), NativeProcess("encoderd", "system/loggerd", ["./encoderd"], only_onroad), NativeProcess("stream_encoderd", "system/loggerd", ["./encoderd", "--stream"], notcar), NativeProcess("loggerd", "system/loggerd", ["./loggerd"], logging), diff --git a/tinygrad_repo b/tinygrad_repo index f51aa0fc7cdbac..f45d178a55a21a 160000 --- a/tinygrad_repo +++ b/tinygrad_repo @@ -1 +1 @@ -Subproject commit f51aa0fc7cdbac710e640172db280cfb747d2718 +Subproject commit f45d178a55a21a1a5c890b628ff6b40f9056c1b5