diff --git a/open-amp/README.md b/open-amp/README.md index 35ce243..74b9b71 100644 --- a/open-amp/README.md +++ b/open-amp/README.md @@ -96,6 +96,9 @@ library for it project: enabled on buffers. * **WITH_DCACHE_RSC_TABLE** (default OFF): Build with data cache operations enabled on resource table. +* **WITH_DCACHE** (default OFF): Build with all cache operations + enabled. When set to ON, cache operations for vrings, buffers and resource + table are enabled. * **RPMSG_BUFFER_SIZE** (default 512): adjust the size of the RPMsg buffers. The default value of the RPMsg size is compatible with the Linux Kernel hard coded value. If you AMP configuration is Linux kernel host/ OpenAMP remote, diff --git a/open-amp/cmake/options.cmake b/open-amp/cmake/options.cmake index 4f89589..35a3333 100644 --- a/open-amp/cmake/options.cmake +++ b/open-amp/cmake/options.cmake @@ -78,22 +78,37 @@ if (NOT WITH_VIRTIO_DEVICE AND NOT WITH_VIRTIO_SLAVE) add_definitions(-DVIRTIO_DRIVER_ONLY) endif (NOT WITH_VIRTIO_DEVICE AND NOT WITH_VIRTIO_SLAVE) +option (WITH_VIRTIO_MMIO_DRV "Build with virtio mmio driver support enabled" OFF) + +if (WITH_VIRTIO_MMIO_DRV) + add_definitions(-DWITH_VIRTIO_MMIO_DRV) +endif (WITH_VIRTIO_MMIO_DRV) + +option (WITH_DCACHE "Build with all cache operations enabled" OFF) + +if (WITH_DCACHE) + add_definitions(-DVIRTIO_USE_DCACHE) +endif (WITH_DCACHE) + option (WITH_DCACHE_VRINGS "Build with vrings cache operations enabled" OFF) if (WITH_DCACHE_VRINGS) add_definitions(-DVIRTIO_CACHED_VRINGS) + message(DEPRECATION "deprecated cmake option replaced by WITH_DCACHE" ...) endif (WITH_DCACHE_VRINGS) -option (WITH_DCACHE_BUFFERS "Build with vrings cache operations enabled" OFF) +option (WITH_DCACHE_BUFFERS "Build with buffers cache operations enabled" OFF) if (WITH_DCACHE_BUFFERS) add_definitions(-DVIRTIO_CACHED_BUFFERS) + message(DEPRECATION "deprecated cmake option replaced by WITH_DCACHE" ...) endif (WITH_DCACHE_BUFFERS) option (WITH_DCACHE_RSC_TABLE "Build with resource table cache operations enabled" OFF) if (WITH_DCACHE_RSC_TABLE) add_definitions(-DVIRTIO_CACHED_RSC_TABLE) + message(DEPRECATION "deprecated cmake option replaced by WITH_DCACHE" ...) endif (WITH_DCACHE_RSC_TABLE) # Set the complication flags diff --git a/open-amp/lib/CMakeLists.txt b/open-amp/lib/CMakeLists.txt index a363256..ff2e399 100644 --- a/open-amp/lib/CMakeLists.txt +++ b/open-amp/lib/CMakeLists.txt @@ -10,6 +10,9 @@ collect (PROJECT_LIB_SOURCES version.c) add_subdirectory (virtio) add_subdirectory (rpmsg) add_subdirectory (remoteproc) +if (WITH_VIRTIO_MMIO_DRV) +add_subdirectory (virtio_mmio) +endif (WITH_VIRTIO_MMIO_DRV) if (WITH_PROXY) add_subdirectory (proxy) diff --git a/open-amp/lib/include/openamp/remoteproc.h b/open-amp/lib/include/openamp/remoteproc.h index 1c5d240..23f9178 100644 --- a/open-amp/lib/include/openamp/remoteproc.h +++ b/open-amp/lib/include/openamp/remoteproc.h @@ -25,19 +25,15 @@ extern "C" { #define RPROC_MAX_NAME_LEN 32 /** - * struct resource_table - firmware resource table header - * @ver: version number - * @num: number of resource entries - * @reserved: reserved (must be zero) - * @offset: array of offsets pointing at the various resource entries + * @brief Resource table header * * A resource table is essentially a list of system resources required * by the remote remoteproc. It may also include configuration entries. * If needed, the remote remoteproc firmware should contain this table * as a dedicated ".resource_table" ELF section. * - * Some resources entries are mere announcements, where the host is informed - * of specific remoteproc configuration. Other entries require the host to + * Some resource entries are mere announcements, where the host is informed + * of specific remoteproc configurations. Other entries require the host to * do something (e.g. allocate a system resource). Sometimes a negotiation * is expected, where the firmware requests a resource, and once allocated, * the host should provide back its details (e.g. address of an allocated @@ -49,28 +45,36 @@ extern "C" { * in the table. * * Immediately following this header are the resource entries themselves, - * each of which begins with a resource entry header (as described below). + * each of which begins with a resource entry header. */ METAL_PACKED_BEGIN struct resource_table { + /** Version number */ uint32_t ver; + + /** Number of resource entries */ uint32_t num; + + /** Reserved (must be zero) */ uint32_t reserved[2]; + + /** Array of offsets pointing at the various resource entries */ uint32_t offset[0]; } METAL_PACKED_END; /** - * struct fw_rsc_hdr - firmware resource entry header - * @type: resource type - * @data: resource data + * @brief Resource table entry header * - * Every resource entry begins with a 'struct fw_rsc_hdr' header providing - * its @type. The content of the entry itself will immediately follow + * Every resource entry begins with this firmware resource header providing + * its \ref type. The content of the entry itself will immediately follow * this header, and it should be parsed according to the resource type. */ METAL_PACKED_BEGIN struct fw_rsc_hdr { + /** Resource type matching the type field of the structure in \ref data */ uint32_t type; + + /** Resource data */ uint8_t data[0]; } METAL_PACKED_END; @@ -110,13 +114,7 @@ enum fw_resource_type { #define FW_RSC_U32_ADDR_ANY 0xFFFFFFFFUL /** - * struct fw_rsc_carveout - physically contiguous memory request - * @da: device address - * @pa: physical address - * @len: length (in bytes) - * @flags: iommu protection flags - * @reserved: reserved (must be zero) - * @name: human-readable name of the requested memory region + * @brief Resource table physically contiguous memory request entry * * This resource entry requests the host to allocate a physically contiguous * memory region. @@ -127,64 +125,61 @@ enum fw_resource_type { * * Allocating memory this way helps utilizing the reserved physical memory * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries - * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB + * needed to map it (in case rproc is using an IOMMU). Reducing the TLB * pressure is important; it may have a substantial impact on performance. * - * If the firmware is compiled with static addresses, then @da should specify - * the expected device address of this memory region. If @da is set to + * If the firmware is compiled with static addresses, then \ref da should specify + * the expected device address of this memory region. If \ref da is set to * FW_RSC_ADDR_ANY, then the host will dynamically allocate it, and then - * overwrite @da with the dynamically allocated address. + * overwrite \ref da with the dynamically allocated address. * - * We will always use @da to negotiate the device addresses, even if it - * isn't using an iommu. In that case, though, it will obviously contain + * We will always use \ref da to negotiate the device addresses, even if it + * isn't using an IOMMU. In that case, though, it will obviously contain * physical addresses. * - * Some remote remoteprocs needs to know the allocated physical address - * even if they do use an iommu. This is needed, e.g., if they control + * Some remote remoteprocs need to know the allocated physical address + * even if they do use an IOMMU. This is needed, e.g., if they control * hardware accelerators which access the physical memory directly (this * is the case with OMAP4 for instance). In that case, the host will - * overwrite @pa with the dynamically allocated physical address. + * overwrite \ref pa with the dynamically allocated physical address. * Generally we don't want to expose physical addresses if we don't have to * (remote remoteprocs are generally _not_ trusted), so we might want to * change this to happen _only_ when explicitly required by the hardware. - * - * @flags is used to provide IOMMU protection flags, and @name should - * (optionally) contain a human readable name of this carveout region - * (mainly for debugging purposes). */ METAL_PACKED_BEGIN struct fw_rsc_carveout { + /** Resource carveout has type 0 */ uint32_t type; + + /** Device address */ uint32_t da; + + /** Physical address */ uint32_t pa; + + /** Length in bytes */ uint32_t len; + + /** IOMMU protection flags */ uint32_t flags; + + /** Reserved (must be zero) */ uint32_t reserved; + + /** Optional human-readable name of the requested memory region used for debugging */ uint8_t name[RPROC_MAX_NAME_LEN]; } METAL_PACKED_END; /** - * struct fw_rsc_devmem - iommu mapping request - * @da: device address - * @pa: physical address - * @len: length (in bytes) - * @flags: iommu protection flags - * @reserved: reserved (must be zero) - * @name: human-readable name of the requested region to be mapped + * @brief Resource table IOMMU mapping request entry * - * This resource entry requests the host to iommu map a physically contiguous + * This resource entry requests the host to IOMMU map a physically contiguous * memory region. This is needed in case the remote remoteproc requires * access to certain memory-based peripherals; _never_ use it to access * regular memory. * * This is obviously only needed if the remote remoteproc is accessing memory - * via an iommu. - * - * @da should specify the required device address, @pa should specify - * the physical address we want to map, @len should specify the size of - * the mapping and @flags is the IOMMU protection flags. As always, @name may - * (optionally) contain a human readable name of this mapping (mainly for - * debugging purposes). + * via an IOMMU. * * Note: at this point we just "trust" those devmem entries to contain valid * physical addresses, but this isn't safe and will be changed: eventually we @@ -194,81 +189,89 @@ struct fw_rsc_carveout { */ METAL_PACKED_BEGIN struct fw_rsc_devmem { + /** IOMMU mapping request has type 1 */ uint32_t type; + + /** Device address */ uint32_t da; + + /** Physical address to map */ uint32_t pa; + + /** Length of the mapping in bytes */ uint32_t len; + + /** IOMMU protection flags */ uint32_t flags; + + /** Reserved (must be zero) */ uint32_t reserved; + + /** Optional human-readable name of the requested memory region used for debugging */ uint8_t name[RPROC_MAX_NAME_LEN]; } METAL_PACKED_END; /** - * struct fw_rsc_trace - trace buffer declaration - * @da: device address - * @len: length (in bytes) - * @reserved: reserved (must be zero) - * @name: human-readable name of the trace buffer + * @brief Resource table trace buffer declaration entry * * This resource entry provides the host information about a trace buffer * into which the remote remoteproc will write log messages. * - * @da specifies the device address of the buffer, @len specifies - * its size, and @name may contain a human readable name of the trace buffer. - * * After booting the remote remoteproc, the trace buffers are exposed to the * user via debugfs entries (called trace0, trace1, etc..). */ METAL_PACKED_BEGIN struct fw_rsc_trace { + /** Trace buffer entry has type 2 */ uint32_t type; + + /** Device address of the buffer */ uint32_t da; + + /** Length of the buffer in bytes */ uint32_t len; + + /** Reserved (must be zero) */ uint32_t reserved; + + /** Optional human-readable name of the requested memory region used for debugging */ uint8_t name[RPROC_MAX_NAME_LEN]; } METAL_PACKED_END; /** - * struct fw_rsc_vdev_vring - vring descriptor entry - * @da: device address - * @align: the alignment between the consumer and producer parts of the vring - * @num: num of buffers supported by this vring (must be power of two) - * @notifyid is a unique rproc-wide notify index for this vring. This notify - * index is used when kicking a remote remoteproc, to let it know that this - * vring is triggered. - * @reserved: reserved (must be zero) + * @brief Resource table vring descriptor entry * * This descriptor is not a resource entry by itself; it is part of the - * vdev resource type (see below). - * - * Note that @da should either contain the device address where - * the remote remoteproc is expecting the vring, or indicate that - * dynamically allocation of the vring's device address is supported. + * \ref fw_rsc_vdev resource type. */ METAL_PACKED_BEGIN struct fw_rsc_vdev_vring { + /** + * The device address where the remoteproc is expecting the vring, or + * FW_RSC_U32_ADDR_ANY/FW_RSC_U64_ADDR_ANY to indicate that dynamic + * allocation of the vring's device address is supported + */ uint32_t da; + + /** The alignment between the consumer and producer parts of the vring */ uint32_t align; + + /** Number of buffers supported by this vring (must be power of two) */ uint32_t num; + + /** + * A unique rproc-wide notify index for this vring. This notify index is + * used when kicking a remote remoteproc, to let it know that this vring + * is triggered + */ uint32_t notifyid; + + /** Reserved (must be zero) */ uint32_t reserved; } METAL_PACKED_END; /** - * struct fw_rsc_vdev - virtio device header - * @id: virtio device id (as in virtio_ids.h) - * @notifyid is a unique rproc-wide notify index for this vdev. This notify - * index is used when kicking a remote remoteproc, to let it know that the - * status/features of this vdev have changes. - * @dfeatures specifies the virtio device features supported by the firmware - * @gfeatures is a place holder used by the host to write back the - * negotiated features that are supported by both sides. - * @config_len is the size of the virtio config space of this vdev. The config - * space lies in the resource table immediate after this vdev header. - * @status is a place holder where the host will indicate its virtio progress. - * @num_of_vrings indicates how many vrings are described in this vdev header - * @reserved: reserved (must be zero) - * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. + * @brief Resource table virtio device entry * * This resource is a virtio device header: it provides information about * the vdev, and is then used by the host and its peer remote remoteprocs @@ -283,29 +286,56 @@ struct fw_rsc_vdev_vring { * remoteprocs. We use the name 'gfeatures' to comply with virtio's terms, * though there isn't really any virtualized guest OS here: it's the host * which is responsible for negotiating the final features. - * Yeah, it's a bit confusing. * * Note: immediately following this structure is the virtio config space for * this vdev (which is specific to the vdev; for more info, read the virtio - * spec). the size of the config space is specified by @config_len. + * spec). */ METAL_PACKED_BEGIN struct fw_rsc_vdev { + /** Virtio device header has type 3 */ uint32_t type; + + /** Virtio device id (as in virtio_ids.h) */ uint32_t id; + + /** + * A unique rproc-wide notify index for this vdev. This notify index is + * used when kicking a remote remoteproc, to let it know that the + * status/features of this vdev have changes. + */ uint32_t notifyid; + + /** The virtio device features supported by the firmware */ uint32_t dfeatures; + + /** + * A place holder used by the host to write back the negotiated features + * that are supported by both sides + */ uint32_t gfeatures; + + /** + * The size of the virtio config space of this vdev. The config space lies + * in the resource table immediate after this vdev header + */ uint32_t config_len; + + /** A place holder where the host will indicate its virtio progress */ uint8_t status; + + /** Number of vrings described in this vdev header */ uint8_t num_of_vrings; + + /** Reserved (must be zero) */ uint8_t reserved[2]; + + /** An array of \ref num_of_vrings entries of \ref fw_rsc_vdev_vring */ struct fw_rsc_vdev_vring vring[0]; } METAL_PACKED_END; /** - * struct fw_rsc_vendor - remote processor vendor specific resource - * @len: length of the resource + * @brief Resource table remote processor vendor specific entry * * This resource entry tells the host the vendor specific resource * required by the remote. @@ -315,7 +345,10 @@ struct fw_rsc_vdev { */ METAL_PACKED_BEGIN struct fw_rsc_vendor { + /** Vendor specific resource type can be values 128-512 */ uint32_t type; + + /** Length of the resource */ uint32_t len; } METAL_PACKED_END; @@ -323,109 +356,125 @@ struct loader_ops; struct image_store_ops; struct remoteproc_ops; -/** - * struct remoteproc_mem - * - * This structure presents the memory used by the remote processor - * - * @da: device memory - * @pa: physical memory - * @size: size of the memory - * @io: pointer to the I/O region - * @node: list node - */ +/** @brief Memory used by the remote processor */ struct remoteproc_mem { + /** Device memory */ metal_phys_addr_t da; + + /** Physical memory */ metal_phys_addr_t pa; + + /** Size of the memory */ size_t size; + + /** Optional human-readable name of the memory region */ char name[RPROC_MAX_NAME_LEN]; + + /** Pointer to the I/O region */ struct metal_io_region *io; + + /** List node */ struct metal_list node; }; /** - * struct remoteproc + * @brief A remote processor instance * * This structure is maintained by the remoteproc to represent the remote * processor instance. This structure acts as a prime parameter to use * the remoteproc APIs. - * - * @bootaddr: boot address - * @loader: executable loader - * @lock: mutex lock - * @ops: remoteproc operations - * @rsc_table: pointer to resource table - * @rsc_len: length of resource table - * @rsc_io: metal I/O region of resource table - * @mems: remoteproc memories - * @vdevs: remoteproc virtio devices - * @bitmap: bitmap for notify IDs for remoteproc subdevices - * @state: remote processor state - * @priv: private data */ struct remoteproc { + /** Mutex lock */ metal_mutex_t lock; + + /** Pointer to the resource table */ void *rsc_table; + + /** Length of the resource table */ size_t rsc_len; + + /** Metal I/O region of the resource table */ struct metal_io_region *rsc_io; + + /** Remoteproc memories */ struct metal_list mems; + + /** Remoteproc virtio devices */ struct metal_list vdevs; + + /** Bitmap for notify IDs for remoteproc subdevices */ unsigned long bitmap; + + /** Remoteproc operations */ const struct remoteproc_ops *ops; + + /** Boot address */ metal_phys_addr_t bootaddr; + + /** Executable loader */ const struct loader_ops *loader; + + /** Remote processor state */ unsigned int state; + + /** Private data */ void *priv; }; /** - * struct remoteproc_ops - * - * remoteproc operations needs to be implemented by each remoteproc driver + * @brief Remoteproc operations to manage a remoteproc instance * - * @init: initialize the remoteproc instance - * @remove: remove the remoteproc instance - * @mmap: memory mapped the memory with physical address or destination - * address as input. - * @handle_rsc: handle the vendor specific resource - * @config: configure the remoteproc to make it ready to load and run - * executable - * @start: kick the remoteproc to run application - * @stop: stop the remoteproc from running application, the resource such as - * memory may not be off. - * @shutdown: shutdown the remoteproc and release its resources. - * @notify: notify the remote - * @get_mem: get remoteproc memory I/O region. + * Remoteproc operations need to be implemented by each remoteproc driver */ struct remoteproc_ops { + /** Initialize the remoteproc instance */ struct remoteproc *(*init)(struct remoteproc *rproc, const struct remoteproc_ops *ops, void *arg); + + /** Remove the remoteproc instance */ void (*remove)(struct remoteproc *rproc); + + /** Memory map the memory with physical address or destination address as input */ void *(*mmap)(struct remoteproc *rproc, metal_phys_addr_t *pa, metal_phys_addr_t *da, size_t size, unsigned int attribute, struct metal_io_region **io); + + /** Handle the vendor specific resource */ int (*handle_rsc)(struct remoteproc *rproc, void *rsc, size_t len); + + /** Configure the remoteproc to make it ready to load and run the executable */ int (*config)(struct remoteproc *rproc, void *data); + + /** Kick the remoteproc to run the application */ int (*start)(struct remoteproc *rproc); + + /** + * Stop the remoteproc from running the application, the resource such as + * memory may not be off + */ int (*stop)(struct remoteproc *rproc); + + /** Shutdown the remoteproc and release its resources */ int (*shutdown)(struct remoteproc *rproc); + + /** Notify the remote */ int (*notify)(struct remoteproc *rproc, uint32_t id); + /** - * get_mem - * - * get remoteproc memory I/O region by either name, virtual + * @brief Get remoteproc memory I/O region by either name, virtual * address, physical address or device address. * - * @rproc - pointer to remoteproc instance - * @name - memory name - * @pa - physical address - * @da - device address - * @va - virtual address - * @size - memory size - * @buf - pointer to remoteproc_mem struct object to store result + * @param rproc Pointer to remoteproc instance + * @param name Memory name + * @param pa Physical address + * @param da Device address + * @param va Virtual address + * @param size Memory size + * @param buf Pointer to remoteproc_mem struct object to store result * - * @returns remoteproc memory pointed by buf if success, otherwise NULL + * @return remoteproc memory pointed by buf if success, otherwise NULL */ struct remoteproc_mem *(*get_mem)(struct remoteproc *rproc, const char *name, diff --git a/open-amp/lib/include/openamp/remoteproc_loader.h b/open-amp/lib/include/openamp/remoteproc_loader.h index 14d3716..d928fcf 100644 --- a/open-amp/lib/include/openamp/remoteproc_loader.h +++ b/open-amp/lib/include/openamp/remoteproc_loader.h @@ -54,50 +54,53 @@ extern "C" { /* Remoteproc loader reserved mask */ #define RPROC_LOADER_RESERVED_MASK 0x0F000000L -/** - * struct image_store_ops - user defined image store operations - * @open: user defined callback to open the "firmware" to prepare loading - * @close: user defined callback to close the "firmware" to clean up - * after loading - * @load: user defined callback to load the firmware contents to target - * memory or local memory - * @features: loader supported features. e.g. seek - */ +/** @brief User-defined image store operations */ struct image_store_ops { + /** User-defined callback to open the "firmware" to prepare loading */ int (*open)(void *store, const char *path, const void **img_data); + + /** User-defined callback to close the "firmware" to clean up after loading */ void (*close)(void *store); + + /** User-defined callback to load the firmware contents to target memory or local memory */ int (*load)(void *store, size_t offset, size_t size, const void **data, metal_phys_addr_t pa, struct metal_io_region *io, char is_blocking); + + /** Loader supported features. e.g. seek */ unsigned int features; }; -/** - * struct loader_ops - loader operations - * @load_header: define how to get the executable headers - * @load_data: define how to load the target data - * @locate_rsc_table: define how to get the resource table target address, - * offset to the ELF image file and size of the resource - * table. - * @release: define how to release the loader - * @get_entry: get entry address - * @get_load_state: get load state from the image information - */ +/** @brief Loader operations */ struct loader_ops { + /** Define how to get the executable headers */ int (*load_header)(const void *img_data, size_t offset, size_t len, void **img_info, int last_state, size_t *noffset, size_t *nlen); + + /** Define how to load the target data */ int (*load_data)(struct remoteproc *rproc, const void *img_data, size_t offset, size_t len, void **img_info, int last_load_state, metal_phys_addr_t *da, size_t *noffset, size_t *nlen, unsigned char *padding, size_t *nmemsize); + + /** + * Define how to get the resource table target address, offset to the ELF + * image file and size of the resource table + */ int (*locate_rsc_table)(void *img_info, metal_phys_addr_t *da, size_t *offset, size_t *size); + + /** Define how to release the loader */ void (*release)(void *img_info); + + /** Get entry address */ metal_phys_addr_t (*get_entry)(void *img_info); + + /** Get load state from the image information */ int (*get_load_state)(void *img_info); }; diff --git a/open-amp/lib/include/openamp/remoteproc_virtio.h b/open-amp/lib/include/openamp/remoteproc_virtio.h index 31e575e..0b747ca 100644 --- a/open-amp/lib/include/openamp/remoteproc_virtio.h +++ b/open-amp/lib/include/openamp/remoteproc_virtio.h @@ -26,31 +26,37 @@ extern "C" { /* cache invalidation helpers for resource table */ #ifdef VIRTIO_CACHED_RSC_TABLE -#define RSC_TABLE_FLUSH(x, s) metal_cache_flush(x, s) -#define RSC_TABLE_INVALIDATE(x, s) metal_cache_invalidate(x, s) +#warning "VIRTIO_CACHED_RSC_TABLE is deprecated, please use VIRTIO_USE_DCACHE" +#endif +#if defined(VIRTIO_CACHED_RSC_TABLE) || defined(VIRTIO_USE_DCACHE) +#define RSC_TABLE_FLUSH(x, s) CACHE_FLUSH(x, s) +#define RSC_TABLE_INVALIDATE(x, s) CACHE_INVALIDATE(x, s) #else #define RSC_TABLE_FLUSH(x, s) do { } while (0) #define RSC_TABLE_INVALIDATE(x, s) do { } while (0) -#endif /* VIRTIO_CACHED_RSC_TABLE */ +#endif /* VIRTIO_CACHED_RSC_TABLE || VIRTIO_USE_DCACHE */ /* define vdev notification function user should implement */ typedef int (*rpvdev_notify_func)(void *priv, uint32_t id); -/** - * struct remoteproc_virtio - * @priv pointer to private data - * @vdev_rsc address of vdev resource - * @vdev_rsc_io metal I/O region of vdev_info, can be NULL - * @notify notification function - * @vdev virtio device - * @node list node - */ +/** @brief Virtio structure for remoteproc instance */ struct remoteproc_virtio { + /** Pointer to private data */ void *priv; + + /** Address of vdev resource */ void *vdev_rsc; + + /** Metal I/O region of vdev_info, can be NULL */ struct metal_io_region *vdev_rsc_io; + + /** Notification function */ rpvdev_notify_func notify; + + /** Virtio device */ struct virtio_device vdev; + + /** List node */ struct metal_list node; }; diff --git a/open-amp/lib/include/openamp/rpmsg.h b/open-amp/lib/include/openamp/rpmsg.h index a39b208..9cf1e74 100644 --- a/open-amp/lib/include/openamp/rpmsg.h +++ b/open-amp/lib/include/openamp/rpmsg.h @@ -55,76 +55,90 @@ typedef void (*rpmsg_ns_bind_cb)(struct rpmsg_device *rdev, const char *name, uint32_t dest); /** - * struct rpmsg_endpoint - binds a local rpmsg address to its user - * @name: name of the service supported - * @rdev: pointer to the rpmsg device - * @addr: local address of the endpoint - * @dest_addr: address of the default remote endpoint binded. - * @cb: user rx callback, return value of this callback is reserved - * for future use, for now, only allow RPMSG_SUCCESS as return value. - * @ns_unbind_cb: end point service unbind callback, called when remote - * ept is destroyed. - * @node: end point node. - * @priv: private data for the driver's use + * @brief Structure that binds a local RPMsg address to its user * - * In essence, an rpmsg endpoint represents a listener on the rpmsg bus, as - * it binds an rpmsg address with an rx callback handler. + * In essence, an RPMsg endpoint represents a listener on the RPMsg bus, as + * it binds an RPMsg address with an rx callback handler. */ struct rpmsg_endpoint { + /** Name of the service supported */ char name[RPMSG_NAME_SIZE]; + + /** Pointer to the RPMsg device */ struct rpmsg_device *rdev; + + /** Local address of the endpoint */ uint32_t addr; + + /** Address of the default remote endpoint binded */ uint32_t dest_addr; + + /** + * User rx callback, return value of this callback is reserved for future + * use, for now, only allow RPMSG_SUCCESS as return value + */ rpmsg_ept_cb cb; + + /** Endpoint service unbind callback, called when remote ept is destroyed */ rpmsg_ns_unbind_cb ns_unbind_cb; + + /** Endpoint node */ struct metal_list node; + + /** Private data for the driver's use */ void *priv; }; -/** - * struct rpmsg_device_ops - RPMsg device operations - * @send_offchannel_raw: send RPMsg data - * @hold_rx_buffer: hold RPMsg RX buffer - * @release_rx_buffer: release RPMsg RX buffer - * @get_tx_payload_buffer: get RPMsg TX buffer - * @send_offchannel_nocopy: send RPMsg data without copy - * @release_tx_buffer: release RPMsg TX buffer - */ +/** @brief RPMsg device operations */ struct rpmsg_device_ops { + /** Send RPMsg data */ int (*send_offchannel_raw)(struct rpmsg_device *rdev, uint32_t src, uint32_t dst, const void *data, int len, int wait); + + /** Hold RPMsg RX buffer */ void (*hold_rx_buffer)(struct rpmsg_device *rdev, void *rxbuf); + + /** Release RPMsg RX buffer */ void (*release_rx_buffer)(struct rpmsg_device *rdev, void *rxbuf); + + /** Get RPMsg TX buffer */ void *(*get_tx_payload_buffer)(struct rpmsg_device *rdev, uint32_t *len, int wait); + + /** Send RPMsg data without copy */ int (*send_offchannel_nocopy)(struct rpmsg_device *rdev, uint32_t src, uint32_t dst, - const void *data, int len); + const void *data, int len); + + /** Release RPMsg TX buffer */ int (*release_tx_buffer)(struct rpmsg_device *rdev, void *txbuf); }; -/** - * struct rpmsg_device - representation of a RPMsg device - * @endpoints: list of endpoints - * @ns_ept: name service endpoint - * @bitmap: table endpoint address allocation. - * @lock: mutex lock for rpmsg management - * @ns_bind_cb: callback handler for name service announcement without local - * endpoints waiting to bind. - * @ns_unbind_cb: callback handler for name service announcement, called when - * remote ept is destroyed. - * @ops: RPMsg device operations - * @support_ns: create/destroy namespace message - */ +/** @brief Representation of a RPMsg device */ struct rpmsg_device { + /** List of endpoints */ struct metal_list endpoints; + + /** Name service endpoint */ struct rpmsg_endpoint ns_ept; + + /** Table endpoint address allocation */ unsigned long bitmap[metal_bitmap_longs(RPMSG_ADDR_BMP_SIZE)]; + + /** Mutex lock for RPMsg management */ metal_mutex_t lock; + + /** Callback handler for name service announcement without local epts waiting to bind */ rpmsg_ns_bind_cb ns_bind_cb; + + /** Callback handler for name service announcement, called when remote ept is destroyed */ rpmsg_ns_bind_cb ns_unbind_cb; + + /** RPMsg device operations */ struct rpmsg_device_ops ops; + + /** Create/destroy namespace message */ bool support_ns; }; diff --git a/open-amp/lib/include/openamp/rpmsg_rpc_client_server.h b/open-amp/lib/include/openamp/rpmsg_rpc_client_server.h index dfbc6c3..f940784 100644 --- a/open-amp/lib/include/openamp/rpmsg_rpc_client_server.h +++ b/open-amp/lib/include/openamp/rpmsg_rpc_client_server.h @@ -47,77 +47,70 @@ struct rpmsg_rpc_request { unsigned char params[MAX_BUF_LEN]; }; -/** - * struct rpmsg_rpc_answer - rpc request message - * - * @id: service id - * @status: status of rpc - * @params: answer params - * - */ +/** @brief RPC request message */ METAL_PACKED_BEGIN struct rpmsg_rpc_answer { + /** Service ID */ uint32_t id; + + /** Status of RPC */ int32_t status; + + /** Answer params */ unsigned char params[MAX_BUF_LEN]; } METAL_PACKED_END; -/** - * struct rpmsg_rpc_services - table for services - * - * @id: service id - * @cb_function: id callback - * - */ +/** @brief Table for services */ struct rpmsg_rpc_services { + /** Service ID */ uint32_t id; + + /** ID callback */ rpmsg_rpc_syscall_cb cb_function; }; -/** - * struct rpmsg_rpc_client_services - table for client services - * - * @id: service id - * @app_cb: id callback - * - */ +/** @brief Table for client services */ struct rpmsg_rpc_client_services { + /** Service ID */ uint32_t id; + + /** ID callback */ app_cb cb; }; /** - * struct rpmsg_rpc_svr - server remote procedure call data + * @brief Server remote procedure call data * * RPMsg RPC will send request to endpoint - * - * @ept: rpmsg_endpoint structure - * @services: service table - * @n_services: number of services - * */ struct rpmsg_rpc_svr { + /** RPMsg destination endpoint structure */ struct rpmsg_endpoint ept; + + /** Service table */ const struct rpmsg_rpc_services *services; + + /** Number of services */ unsigned int n_services; }; /** - * struct rpmsg_rpc_clt - client remote procedure call data + * @brief Client remote procedure call data * * RPMsg RPC will send request to remote and * wait for callback. - * - * @ept: rpmsg_endpoint structure - * @shutdown_cb: shutdown callback function - * @services: service table - * @n_services: number of services - * */ struct rpmsg_rpc_clt { + /** RPMsg endpoint associated with the call */ struct rpmsg_endpoint ept; + + /** Shutdown callback function */ rpmsg_rpc_shutdown_cb shutdown_cb; + + /** Service table */ const struct rpmsg_rpc_client_services *services; + + /** Number of services */ unsigned int n_services; }; diff --git a/open-amp/lib/include/openamp/rpmsg_virtio.h b/open-amp/lib/include/openamp/rpmsg_virtio.h index 3d03e8d..aea2edf 100644 --- a/open-amp/lib/include/openamp/rpmsg_virtio.h +++ b/open-amp/lib/include/openamp/rpmsg_virtio.h @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -29,54 +30,73 @@ extern "C" { /* The feature bitmap for virtio rpmsg */ #define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */ -/** - * struct rpmsg_virtio_shm_pool - shared memory pool used for rpmsg buffers - * @base: base address of the memory pool - * @avail: available memory size - * @size: total pool size - */ +#ifdef VIRTIO_CACHED_BUFFERS +#warning "VIRTIO_CACHED_BUFFERS is deprecated, please use VIRTIO_USE_DCACHE" +#endif +#if defined(VIRTIO_CACHED_BUFFERS) || defined(VIRTIO_USE_DCACHE) +#define BUFFER_FLUSH(x, s) CACHE_FLUSH(x, s) +#define BUFFER_INVALIDATE(x, s) CACHE_INVALIDATE(x, s) +#else +#define BUFFER_FLUSH(x, s) do { } while (0) +#define BUFFER_INVALIDATE(x, s) do { } while (0) +#endif /* VIRTIO_CACHED_BUFFERS || VIRTIO_USE_DCACHE */ + +/** @brief Shared memory pool used for RPMsg buffers */ struct rpmsg_virtio_shm_pool { + /** Base address of the memory pool */ void *base; + + /** Available memory size */ size_t avail; + + /** Total pool size */ size_t size; }; /** - * struct rpmsg_virtio_config - configuration of rpmsg device based on virtio + * @brief Configuration of RPMsg device based on virtio * - * This structure is used by the rpmsg virtio host to configure the virtiio + * This structure is used by the RPMsg virtio host to configure the virtiio * layer. - * - * @h2r_buf_size: the size of the buffer used to send data from host to remote - * @r2h_buf_size: the size of the buffer used to send data from remote to host - * @split_shpool: the flag that splitting share memory pool to TX and RX */ struct rpmsg_virtio_config { + /** The size of the buffer used to send data from host to remote */ uint32_t h2r_buf_size; + + /** The size of the buffer used to send data from remote to host */ uint32_t r2h_buf_size; + + /** The flag for splitting shared memory pool to TX and RX */ bool split_shpool; }; -/** - * struct rpmsg_virtio_device - representation of a rpmsg device based on virtio - * @rdev: rpmsg device, first property in the struct - * @config: structure containing virtio configuration - * @vdev: pointer to the virtio device - * @rvq: pointer to receive virtqueue - * @svq: pointer to send virtqueue - * @shbuf_io: pointer to the shared buffer I/O region - * @shpool: pointer to the shared buffers pool - * @reclaimer: Rpmsg buffer reclaimer that contains buffers released by - * the rpmsg_virtio_release_tx_buffer function. - */ +/** @brief Representation of a RPMsg device based on virtio */ struct rpmsg_virtio_device { + /** RPMsg device */ struct rpmsg_device rdev; + + /** Structure containing virtio configuration */ struct rpmsg_virtio_config config; + + /** Pointer to the virtio device */ struct virtio_device *vdev; + + /** Pointer to receive virtqueue */ struct virtqueue *rvq; + + /** Pointer to send virtqueue */ struct virtqueue *svq; + + /** Pointer to the shared buffer I/O region */ struct metal_io_region *shbuf_io; + + /** Pointer to the shared buffers pool */ struct rpmsg_virtio_shm_pool *shpool; + + /** + * RPMsg buffer reclaimer that contains buffers released by the + * \ref rpmsg_virtio_release_tx_buffer function + */ struct metal_list reclaimer; }; @@ -142,7 +162,7 @@ rpmsg_virtio_create_virtqueues(struct rpmsg_virtio_device *rvdev, vq_callback *callbacks) { return virtio_create_virtqueues(rvdev->vdev, flags, nvqs, names, - callbacks); + callbacks, NULL); } /** diff --git a/open-amp/lib/include/openamp/virtio.h b/open-amp/lib/include/openamp/virtio.h index 5e2f642..f677fef 100644 --- a/open-amp/lib/include/openamp/virtio.h +++ b/open-amp/lib/include/openamp/virtio.h @@ -95,9 +95,16 @@ __deprecated static inline int deprecated_virtio_dev_slave(void) #warning "VIRTIO_SLAVE_ONLY is deprecated, please use VIRTIO_DEVICE_ONLY" #endif +/** @brief Virtio device identifier. */ struct virtio_device_id { + /** Virtio subsystem device ID. */ uint32_t device; + + /** Virtio subsystem vendor ID. */ uint32_t vendor; + + /** Virtio subsystem device version. */ + uint32_t version; }; /* @@ -120,102 +127,170 @@ struct virtio_device_id { #define VIRTIO_TRANSPORT_F_START 28 #define VIRTIO_TRANSPORT_F_END 32 +#ifdef VIRTIO_DEBUG +#include + +#define VIRTIO_ASSERT(_exp, _msg) do { \ + int exp = (_exp); \ + if (!(exp)) { \ + metal_log(METAL_LOG_EMERGENCY, \ + "FATAL: %s - " _msg, __func__); \ + metal_assert(exp); \ + } \ + } while (0) +#else +#define VIRTIO_ASSERT(_exp, _msg) metal_assert(_exp) +#endif /* VIRTIO_DEBUG */ + +#define VIRTIO_MMIO_VRING_ALIGNMENT 4096 + typedef void (*virtio_dev_reset_cb)(struct virtio_device *vdev); struct virtio_dispatch; +/** @brief Device features. */ struct virtio_feature_desc { + /** Unique feature ID, defined in the virtio specification. */ uint32_t vfd_val; + + /** Name of the feature (for debug). */ const char *vfd_str; }; -/** - * struct virtio_vring_info - * @vq virtio queue - * @info vring alloc info - * @notifyid vring notify id - * @io metal I/O region of the vring memory, can be NULL - */ +/** @brief Virtio vring data structure */ struct virtio_vring_info { + /** Virtio queue */ struct virtqueue *vq; + + /** Vring alloc info */ struct vring_alloc_info info; + + /** Vring notify id */ uint32_t notifyid; + + /** Metal I/O region of the vring memory, can be NULL */ struct metal_io_region *io; }; -/* - * Structure definition for virtio devices for use by the - * applications/drivers - */ - +/** @brief Structure definition for virtio devices for use by the applications/drivers */ struct virtio_device { - uint32_t notifyid; /**< unique position on the virtio bus */ - struct virtio_device_id id; /**< the device type identification - * (used to match it with a driver - */ - uint64_t features; /**< the features supported by both ends. */ - unsigned int role; /**< if it is virtio backend or front end. */ - virtio_dev_reset_cb reset_cb; /**< user registered device callback */ - const struct virtio_dispatch *func; /**< Virtio dispatch table */ - void *priv; /**< TODO: remove pointer to virtio_device private data */ - unsigned int vrings_num; /**< number of vrings */ + /** Unique position on the virtio bus */ + uint32_t notifyid; + + /** The device type identification used to match it with a driver */ + struct virtio_device_id id; + + /** The features supported by both ends. */ + uint64_t features; + + /** If it is virtio backend or front end. */ + unsigned int role; + + /** User-registered device callback */ + virtio_dev_reset_cb reset_cb; + + /** Virtio dispatch table */ + const struct virtio_dispatch *func; + + /** Private data */ + void *priv; + + /** Number of vrings */ + unsigned int vrings_num; + + /** Pointer to the virtio vring structure */ struct virtio_vring_info *vrings_info; }; /* * Helper functions. */ + +/** + * @brief Get the name of a virtio device. + * + * @param devid Id of the device. + * + * @return pointer to the device name string if found, otherwise null. + */ const char *virtio_dev_name(uint16_t devid); void virtio_describe(struct virtio_device *dev, const char *msg, uint32_t features, struct virtio_feature_desc *feature_desc); -/* - * Functions for virtio device configuration as defined in Rusty Russell's - * paper. - * Drivers are expected to implement these functions in their respective codes. +/** + * @brief Virtio device dispatcher functions. + * + * Functions for virtio device configuration as defined in Rusty Russell's paper. + * The virtio transport layers are expected to implement these functions in their respective codes. */ struct virtio_dispatch { + /** Create virtio queue instances. */ int (*create_virtqueues)(struct virtio_device *vdev, unsigned int flags, unsigned int nvqs, const char *names[], - vq_callback callbacks[]); + vq_callback callbacks[], + void *callback_args[]); + + /** Delete virtio queue instances. */ void (*delete_virtqueues)(struct virtio_device *vdev); + + /** Get the status of the virtio device. */ uint8_t (*get_status)(struct virtio_device *dev); + + /** Set the status of the virtio device. */ void (*set_status)(struct virtio_device *dev, uint8_t status); + + /** Get the feature exposed by the virtio device. */ uint32_t (*get_features)(struct virtio_device *dev); + + /** Set the supported feature (virtio driver only). */ void (*set_features)(struct virtio_device *dev, uint32_t feature); + + /** + * Set the supported feature negotiate between the \ref features parameter and features + * supported by the device (virtio driver only). + */ uint32_t (*negotiate_features)(struct virtio_device *dev, uint32_t features); - /* - * Read/write a variable amount from the device specific (ie, network) - * configuration region. This region is encoded in the same endian as - * the guest. + /** + * Read a variable amount from the device specific (ie, network) + * configuration region. */ void (*read_config)(struct virtio_device *dev, uint32_t offset, void *dst, int length); + + /** + * Write a variable amount from the device specific (ie, network) + * configuration region. + */ void (*write_config)(struct virtio_device *dev, uint32_t offset, void *src, int length); + + /** Request a reset of the virtio device. */ void (*reset_device)(struct virtio_device *dev); + + /** Notify the other side that a virtio vring as been updated. */ void (*notify)(struct virtqueue *vq); }; /** * @brief Create the virtio device virtqueue. * - * @param vdev Pointer to virtio device structure. - * @param flags Create flag. - * @param nvqs The virtqueue number. - * @param names Virtqueue names. - * @param callbacks Virtqueue callback functions. + * @param vdev Pointer to virtio device structure. + * @param flags Create flag. + * @param nvqs The virtqueue number. + * @param names Virtqueue names. + * @param callbacks Virtqueue callback functions. + * @param callback_args Virtqueue callback function arguments. * * @return 0 on success, otherwise error code. */ int virtio_create_virtqueues(struct virtio_device *vdev, unsigned int flags, unsigned int nvqs, const char *names[], - vq_callback callbacks[]); + vq_callback callbacks[], void *callback_args[]); /** * @brief Delete the virtio device virtqueue. @@ -236,6 +311,20 @@ static inline int virtio_delete_virtqueues(struct virtio_device *vdev) return 0; } +/** + * @brief Get device ID. + * + * @param dev Pointer to device structure. + * + * @return Device ID value. + */ +static inline uint32_t virtio_get_devid(const struct virtio_device *vdev) +{ + if (!vdev) + return 0; + return vdev->id.device; +} + /** * @brief Retrieve device status. * diff --git a/open-amp/lib/include/openamp/virtio_mmio.h b/open-amp/lib/include/openamp/virtio_mmio.h new file mode 100644 index 0000000..db678f6 --- /dev/null +++ b/open-amp/lib/include/openamp/virtio_mmio.h @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2022 Wind River Systems, Inc. + * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef OPENAMP_VIRTIO_MMIO_H +#define OPENAMP_VIRTIO_MMIO_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Enable support for legacy devices */ +#define VIRTIO_MMIO_LEGACY + +/* Control registers */ + +/* Magic value ("virt" string) - Read Only */ +#define VIRTIO_MMIO_MAGIC_VALUE 0x000 + +#define VIRTIO_MMIO_MAGIC_VALUE_STRING ('v' | ('i' << 8) | ('r' << 16) | ('t' << 24)) + +/* Virtio device version - Read Only */ +#define VIRTIO_MMIO_VERSION 0x004 + +/* Virtio device ID - Read Only */ +#define VIRTIO_MMIO_DEVICE_ID 0x008 + +/* Virtio vendor ID - Read Only */ +#define VIRTIO_MMIO_VENDOR_ID 0x00c + +/* + * Bitmask of the features supported by the device (host) + * (32 bits per set) - Read Only + */ +#define VIRTIO_MMIO_DEVICE_FEATURES 0x010 + +/* Device (host) features set selector - Write Only */ +#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014 + +/* + * Bitmask of features activated by the driver (guest) + * (32 bits per set) - Write Only + */ +#define VIRTIO_MMIO_DRIVER_FEATURES 0x020 + +/* Activated features set selector - Write Only */ +#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024 + +#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */ +/* Guest's memory page size in bytes - Write Only */ +#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 +#endif + +/* Queue selector - Write Only */ +#define VIRTIO_MMIO_QUEUE_SEL 0x030 + +/* Maximum size of the currently selected queue - Read Only */ +#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034 + +/* Queue size for the currently selected queue - Write Only */ +#define VIRTIO_MMIO_QUEUE_NUM 0x038 + +#ifdef VIRTIO_MMIO_LEGACY +/* Used Ring alignment for the currently selected queue - Write Only */ +#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c +/* Guest's PFN for the currently selected queue - Read Write */ +#define VIRTIO_MMIO_QUEUE_PFN 0x040 +#endif + +/* Ready bit for the currently selected queue - Read Write */ +#define VIRTIO_MMIO_QUEUE_READY 0x044 + +/* Queue notifier - Write Only */ +#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 + +/* Interrupt status - Read Only */ +#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060 + +/* Interrupt acknowledge - Write Only */ +#define VIRTIO_MMIO_INTERRUPT_ACK 0x064 + +/* Device status register - Read Write */ +#define VIRTIO_MMIO_STATUS 0x070 + +/* Selected queue's Descriptor Table address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080 +#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084 + +/* Selected queue's Available Ring address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090 +#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094 + +/* Selected queue's Used Ring address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 +#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 + +/* Shared memory region id */ +#define VIRTIO_MMIO_SHM_SEL 0x0ac + +/* Shared memory region length, 64 bits in two halves */ +#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0 +#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4 + +/* Shared memory region base address, 64 bits in two halves */ +#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8 +#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc + +/* Configuration atomicity value */ +#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc + +/* + * The config space is defined by each driver as + * the per-driver configuration space - Read Write + */ +#define VIRTIO_MMIO_CONFIG 0x100 + +/* Interrupt flags (re: interrupt status & acknowledge registers) */ +#define VIRTIO_MMIO_INT_VRING (1 << 0) +#define VIRTIO_MMIO_INT_CONFIG (1 << 1) + +/* Data buffer size for preallocated buffers before vring */ +#define VIRTIO_MMIO_MAX_DATA_SIZE 128 + +/** @brief VIRTIO MMIO memory area */ +struct virtio_mmio_dev_mem { + /** Memory region physical address */ + void *base; + + /** Memory region size */ + size_t size; +}; + +/** @brief A VIRTIO MMIO device */ +struct virtio_mmio_device { + /** Base virtio device structure */ + struct virtio_device vdev; + + /** Device configuration space metal_io_region */ + struct metal_io_region *cfg_io; + + /** Pre-shared memory space metal_io_region */ + struct metal_io_region *shm_io; + + /** Shared memory device */ + struct metal_device shm_device; + + /** VIRTIO device configuration space */ + struct virtio_mmio_dev_mem cfg_mem; + + /** VIRTIO device pre-shared memory */ + struct virtio_mmio_dev_mem shm_mem; + + /** VIRTIO_DEV_DRIVER or VIRTIO_DEV_DEVICE */ + unsigned int device_mode; + + /** Interrupt number */ + unsigned int irq; + + /** Custom user data */ + void *user_data; +}; + +/** + * @brief Register a VIRTIO device with the VIRTIO stack. + * + * @param dev Pointer to device structure. + * @param vq_num Number of virtqueues the device uses. + * @param vqs Array of pointers to vthe virtqueues used by the device. + */ +void virtio_mmio_register_device(struct virtio_device *vdev, int vq_num, struct virtqueue **vqs); + +/** + * @brief Setup a virtqueue structure. + * + * @param dev Pointer to device structure. + * @param idx Index of the virtqueue. + * @param vq Pointer to virtqueue structure. + * @param cb Pointer to virtqueue callback. Can be NULL. + * @param cb_arg Argument for the virtqueue callback. + * + * @return pointer to virtqueue structure. + */ +struct virtqueue *virtio_mmio_setup_virtqueue(struct virtio_device *vdev, + unsigned int idx, + struct virtqueue *vq, + void (*cb)(void *), + void *cb_arg, + const char *vq_name); + +/** + * @brief VIRTIO MMIO device initialization. + * + * @param vmdev Pointer to virtio_mmio_device structure. + * @param virt_mem_ptr Guest virtio (shared) memory base address (virtual). + * @param cfg_mem_ptr Virtio device configuration memory base address (virtual). + * @param user_data Pointer to custom user data. + * + * @return int 0 for success. + */ +int virtio_mmio_device_init(struct virtio_mmio_device *vmdev, uintptr_t virt_mem_ptr, + uintptr_t cfg_mem_ptr, void *user_data); + +/** + * @brief VIRTIO MMIO interrupt service routine. + * + * @param vdev Pointer to virtio_device structure. + */ +void virtio_mmio_isr(struct virtio_device *vdev); + +#ifdef __cplusplus +} +#endif + +#endif /* OPENAMP_VIRTIO_MMIO_H */ diff --git a/open-amp/lib/include/openamp/virtio_ring.h b/open-amp/lib/include/openamp/virtio_ring.h index af2dd43..c9c061a 100644 --- a/open-amp/lib/include/openamp/virtio_ring.h +++ b/open-amp/lib/include/openamp/virtio_ring.h @@ -33,25 +33,49 @@ extern "C" { */ #define VRING_AVAIL_F_NO_INTERRUPT 1 -/* VirtIO ring descriptors: 16 bytes. - * These can chain together via "next". +/** + * @brief VirtIO ring descriptors. + * + * The descriptor table refers to the buffers the driver is using for the + * device. addr is a physical address, and the buffers can be chained via \ref next. + * Each descriptor describes a buffer which is read-only for the device + * (“device-readable”) or write-only for the device (“device-writable”), but a + * chain of descriptors can contain both device-readable and device-writable + * buffers. */ METAL_PACKED_BEGIN struct vring_desc { - /* Address (guest-physical). */ + /** Address (guest-physical) */ uint64_t addr; - /* Length. */ + + /** Length */ uint32_t len; - /* The flags as indicated above. */ + + /** Flags relevant to the descriptors */ uint16_t flags; - /* We chain unused descriptors via this, too. */ + + /** We chain unused descriptors via this, too */ uint16_t next; } METAL_PACKED_END; +/** + * @brief Used to offer buffers to the device. + * + * Each ring entry refers to the head of a descriptor chain. It is only + * written by the driver and read by the device. + */ METAL_PACKED_BEGIN struct vring_avail { + /** Flag which determines whether device notifications are required */ uint16_t flags; + + /** + * Indicates where the driver puts the next descriptor entry in the + * ring (modulo the queue size) + */ uint16_t idx; + + /** The ring of descriptors */ uint16_t ring[0]; } METAL_PACKED_END; @@ -67,22 +91,39 @@ struct vring_used_elem { uint32_t len; } METAL_PACKED_END; +/** + * @brief The device returns buffers to this structure when done with them + * + * The structure is only written to by the device, and read by the driver. + */ METAL_PACKED_BEGIN struct vring_used { + /** Flag which determines whether device notifications are required */ uint16_t flags; + + /** + * Indicates where the driver puts the next descriptor entry in the + * ring (modulo the queue size) + */ uint16_t idx; + + /** The ring of descriptors */ struct vring_used_elem ring[0]; } METAL_PACKED_END; -struct vring { - unsigned int num; - - struct vring_desc *desc; - struct vring_avail *avail; - struct vring_used *used; -}; - -/* The standard layout for the ring is a continuous chunk of memory which +/** + * @brief The virtqueue layout structure + * + * Each virtqueue consists of; descriptor table, available ring, used ring, + * where each part is physically contiguous in guest memory. + * + * When the driver wants to send a buffer to the device, it fills in a slot in + * the descriptor table (or chains several together), and writes the descriptor + * index into the available ring. It then notifies the device. When the device + * has finished a buffer, it writes the descriptor index into the used ring, + * and sends an interrupt. + * + * The standard layout for the ring is a continuous chunk of memory which * looks like this. We assume num is a power of 2. * * struct vring { @@ -107,6 +148,22 @@ struct vring { * * NOTE: for VirtIO PCI, align is 4096. */ +struct vring { + /** + * The maximum number of buffer descriptors in the virtqueue. + * The value is always a power of 2. + */ + unsigned int num; + + /** The actual buffer descriptors, 16 bytes each */ + struct vring_desc *desc; + + /** A ring of available descriptor heads with free-running index */ + struct vring_avail *avail; + + /** A ring of used descriptor heads with free-running index */ + struct vring_used *used; +}; /* * We publish the used event index at the end of the available ring, and vice diff --git a/open-amp/lib/include/openamp/virtqueue.h b/open-amp/lib/include/openamp/virtqueue.h index 237f9be..1a9d2e8 100644 --- a/open-amp/lib/include/openamp/virtqueue.h +++ b/open-amp/lib/include/openamp/virtqueue.h @@ -20,6 +20,7 @@ extern "C" { #include #include #include +#include /* Error Codes */ #define VQ_ERROR_BASE -3000 @@ -47,65 +48,113 @@ extern "C" { /* Support to suppress interrupt until specific index is reached. */ #define VIRTIO_RING_F_EVENT_IDX (1 << 29) +/* cache invalidation helpers */ +#define CACHE_FLUSH(x, s) metal_cache_flush(x, s) +#define CACHE_INVALIDATE(x, s) metal_cache_invalidate(x, s) + +#ifdef VIRTIO_CACHED_VRINGS +#warning "VIRTIO_CACHED_VRINGS is deprecated, please use VIRTIO_USE_DCACHE" +#endif +#if defined(VIRTIO_CACHED_VRINGS) || defined(VIRTIO_USE_DCACHE) +#define VRING_FLUSH(x, s) CACHE_FLUSH(x, s) +#define VRING_INVALIDATE(x, s) CACHE_INVALIDATE(x, s) +#else +#define VRING_FLUSH(x, s) do { } while (0) +#define VRING_INVALIDATE(x, s) do { } while (0) +#endif /* VIRTIO_CACHED_VRINGS || VIRTIO_USE_DCACHE */ + +/** @brief Buffer descriptor. */ struct virtqueue_buf { + /** Address of the buffer. */ void *buf; + + /** Size of the buffer. */ int len; }; +/** @brief Vring descriptor extra information for buffer list management. */ struct vq_desc_extra { + /** Pointer to first descriptor. */ void *cookie; + + /** Number of chained descriptors. */ uint16_t ndescs; }; +/** @brief Local virtio queue to manage a virtio ring for sending or receiving. */ struct virtqueue { + /** Associated virtio device. */ struct virtio_device *vq_dev; + + /** Name of the virtio queue. */ const char *vq_name; + + /** Index of the virtio queue. */ uint16_t vq_queue_index; + + /** Max number of buffers in the virtio queue. */ uint16_t vq_nentries; + + /** Function to invoke, when message is available on the virtio queue. */ void (*callback)(struct virtqueue *vq); + + /** Private data associated to the virtio queue. */ + void *priv; + + /** Function to invoke, to inform the other side about an update in the virtio queue. */ void (*notify)(struct virtqueue *vq); + + /** Associated virtio ring. */ struct vring vq_ring; + + /** Number of free descriptor in the virtio ring. */ uint16_t vq_free_cnt; + + /** Number of queued buffer in the virtio ring. */ uint16_t vq_queued_cnt; - void *shm_io; /* opaque pointer to data needed to allow v2p & p2v */ - /* - * Head of the free chain in the descriptor table. If - * there are no free descriptors, this will be set to - * VQ_RING_DESC_CHAIN_END. + /** + * Metal I/O region of the vrings and buffers. + * This structure is used for conversion between virtual and physical addresses. */ - uint16_t vq_desc_head_idx; + void *shm_io; - /* - * Last consumed descriptor in the used table, - * trails vq_ring.used->idx. + /** + * Head of the free chain in the descriptor table. If there are no free descriptors, + * this will be set to VQ_RING_DESC_CHAIN_END. */ + uint16_t vq_desc_head_idx; + + /** Last consumed descriptor in the used table, trails vq_ring.used->idx. */ uint16_t vq_used_cons_idx; - /* - * Last consumed descriptor in the available table - - * used by the consumer side. - */ + /** Last consumed descriptor in the available table, used by the consumer side. */ uint16_t vq_available_idx; #ifdef VQUEUE_DEBUG + /** Debug counter for virtqueue reentrance check. */ bool vq_inuse; #endif - /* - * Used by the host side during callback. Cookie - * holds the address of buffer received from other side. - * Other fields in this structure are not used currently. + /** + * Used by the host side during callback. Cookie holds the address of buffer received from + * other side. Other fields in this structure are not used currently. */ - struct vq_desc_extra vq_descx[0]; }; -/* struct to hold vring specific information */ +/** @brief Virtio ring specific information. */ struct vring_alloc_info { + /** Vring address. */ void *vaddr; + + /** Vring alignment. */ uint32_t align; + + /** Number of descriptors in the vring. */ uint16_t num_descs; + + /** Padding */ uint16_t pad; }; @@ -334,6 +383,30 @@ uint32_t virtqueue_get_desc_size(struct virtqueue *vq); uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx); void *virtqueue_get_buffer_addr(struct virtqueue *vq, uint16_t idx); +/** + * @brief Test if virtqueue is empty + * + * @param vq Pointer to VirtIO queue control block + * + * @return 1 if virtqueue is empty, 0 otherwise + */ +static inline int virtqueue_empty(struct virtqueue *vq) +{ + return (vq->vq_nentries == vq->vq_free_cnt); +} + +/** + * @brief Test if virtqueue is full + * + * @param vq Pointer to VirtIO queue control block + * + * @return 1 if virtqueue is full, 0 otherwise + */ +static inline int virtqueue_full(struct virtqueue *vq) +{ + return (vq->vq_free_cnt == 0); +} + #if defined __cplusplus } #endif diff --git a/open-amp/lib/rpmsg/rpmsg_internal.h b/open-amp/lib/rpmsg/rpmsg_internal.h index ab6e0f2..6721ecf 100644 --- a/open-amp/lib/rpmsg/rpmsg_internal.h +++ b/open-amp/lib/rpmsg/rpmsg_internal.h @@ -48,40 +48,46 @@ enum rpmsg_ns_flags { }; /** - * struct rpmsg_hdr - common header for all rpmsg messages - * @src: source address - * @dst: destination address - * @reserved: reserved for future use - * @len: length of payload (in bytes) - * @flags: message flags + * @brief Common header for all RPMsg messages * - * Every message sent(/received) on the rpmsg bus begins with this header. + * Every message sent(/received) on the RPMsg bus begins with this header. */ METAL_PACKED_BEGIN struct rpmsg_hdr { + /** Source address */ uint32_t src; + + /** Destination address */ uint32_t dst; + + /** Reserved for future use */ uint32_t reserved; + + /** Length of payload (in bytes) */ uint16_t len; + + /** Message flags */ uint16_t flags; } METAL_PACKED_END; /** - * struct rpmsg_ns_msg - dynamic name service announcement message - * @name: name of remote service that is published - * @addr: address of remote service that is published - * @flags: indicates whether service is created or destroyed + * @brief Dynamic name service announcement message * * This message is sent across to publish a new service, or announce * about its removal. When we receive these messages, an appropriate - * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe() - * or ->remove() handler of the appropriate rpmsg driver will be invoked + * RPMsg channel (i.e device) is created/destroyed. In turn, the ->probe() + * or ->remove() handler of the appropriate RPMsg driver will be invoked * (if/as-soon-as one is registered). */ METAL_PACKED_BEGIN struct rpmsg_ns_msg { + /** Name of the remote service that is being published */ char name[RPMSG_NAME_SIZE]; + + /** Endpoint address of the remote service that is being published */ uint32_t addr; + + /** Indicates whether service is created or destroyed */ uint32_t flags; } METAL_PACKED_END; diff --git a/open-amp/lib/rpmsg/rpmsg_virtio.c b/open-amp/lib/rpmsg/rpmsg_virtio.c index 0ec5193..ea4cc0d 100644 --- a/open-amp/lib/rpmsg/rpmsg_virtio.c +++ b/open-amp/lib/rpmsg/rpmsg_virtio.c @@ -9,7 +9,6 @@ */ #include -#include #include #include #include @@ -93,9 +92,7 @@ static void rpmsg_virtio_return_buffer(struct rpmsg_virtio_device *rvdev, { unsigned int role = rpmsg_virtio_get_role(rvdev); -#ifdef VIRTIO_CACHED_BUFFERS - metal_cache_invalidate(buffer, len); -#endif + BUFFER_INVALIDATE(buffer, len); #ifndef VIRTIO_DEVICE_ONLY if (role == RPMSG_HOST) { @@ -135,9 +132,7 @@ static int rpmsg_virtio_enqueue_buffer(struct rpmsg_virtio_device *rvdev, { unsigned int role = rpmsg_virtio_get_role(rvdev); -#ifdef VIRTIO_CACHED_BUFFERS - metal_cache_flush(buffer, len); -#endif /* VIRTIO_CACHED_BUFFERS */ + BUFFER_FLUSH(buffer, len); #ifndef VIRTIO_DEVICE_ONLY if (role == RPMSG_HOST) { @@ -245,11 +240,9 @@ static void *rpmsg_virtio_get_rx_buffer(struct rpmsg_virtio_device *rvdev, } #endif /*!VIRTIO_DRIVER_ONLY*/ -#ifdef VIRTIO_CACHED_BUFFERS /* Invalidate the buffer before returning it */ if (data) - metal_cache_invalidate(data, *len); -#endif /* VIRTIO_CACHED_BUFFERS */ + BUFFER_INVALIDATE(data, *len); return data; } diff --git a/open-amp/lib/virtio/virtio.c b/open-amp/lib/virtio/virtio.c index c745426..6083e4f 100644 --- a/open-amp/lib/virtio/virtio.c +++ b/open-amp/lib/virtio/virtio.c @@ -96,7 +96,7 @@ void virtio_describe(struct virtio_device *dev, const char *msg, int virtio_create_virtqueues(struct virtio_device *vdev, unsigned int flags, unsigned int nvqs, const char *names[], - vq_callback callbacks[]) + vq_callback callbacks[], void *callback_args[]) { struct virtio_vring_info *vring_info; struct vring_alloc_info *vring_alloc; @@ -109,7 +109,7 @@ int virtio_create_virtqueues(struct virtio_device *vdev, unsigned int flags, if (vdev->func && vdev->func->create_virtqueues) { return vdev->func->create_virtqueues(vdev, flags, nvqs, - names, callbacks); + names, callbacks, callback_args); } num_vrings = vdev->vrings_num; diff --git a/open-amp/lib/virtio/virtqueue.c b/open-amp/lib/virtio/virtqueue.c index 39e25a2..2544ee3 100644 --- a/open-amp/lib/virtio/virtqueue.c +++ b/open-amp/lib/virtio/virtqueue.c @@ -11,7 +11,6 @@ #include #include #include -#include /* Prototype for internal functions. */ static void vq_ring_init(struct virtqueue *, void *, int); @@ -29,14 +28,6 @@ static int virtqueue_nused(struct virtqueue *vq); static int virtqueue_navail(struct virtqueue *vq); #endif -#ifdef VIRTIO_CACHED_VRINGS -#define VRING_FLUSH(x) metal_cache_flush(&x, sizeof(x)) -#define VRING_INVALIDATE(x) metal_cache_invalidate(&x, sizeof(x)) -#else -#define VRING_FLUSH(x) do { } while (0) -#define VRING_INVALIDATE(x) do { } while (0) -#endif /* VIRTIO_CACHED_VRINGS */ - /* Default implementation of P2V based on libmetal */ static inline void *virtqueue_phys_to_virt(struct virtqueue *vq, metal_phys_addr_t phys) @@ -152,7 +143,7 @@ void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx) uint16_t used_idx, desc_idx; /* Used.idx is updated by the virtio device, so we need to invalidate */ - VRING_INVALIDATE(vq->vq_ring.used->idx); + VRING_INVALIDATE(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx)); if (!vq || vq->vq_used_cons_idx == vq->vq_ring.used->idx) return NULL; @@ -165,7 +156,8 @@ void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx) atomic_thread_fence(memory_order_seq_cst); /* Used.ring is written by remote, invalidate it */ - VRING_INVALIDATE(vq->vq_ring.used->ring[used_idx]); + VRING_INVALIDATE(&vq->vq_ring.used->ring[used_idx], + sizeof(vq->vq_ring.used->ring[used_idx])); desc_idx = (uint16_t)uep->id; if (len) @@ -185,13 +177,15 @@ void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx) uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx) { - VRING_INVALIDATE(vq->vq_ring.desc[idx].len); + VRING_INVALIDATE(&vq->vq_ring.desc[idx].len, + sizeof(vq->vq_ring.desc[idx].len)); return vq->vq_ring.desc[idx].len; } void *virtqueue_get_buffer_addr(struct virtqueue *vq, uint16_t idx) { - VRING_INVALIDATE(vq->vq_ring.desc[idx].addr); + VRING_INVALIDATE(&vq->vq_ring.desc[idx].addr, + sizeof(vq->vq_ring.desc[idx].addr)); return virtqueue_phys_to_virt(vq, vq->vq_ring.desc[idx].addr); } @@ -217,7 +211,7 @@ void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx, atomic_thread_fence(memory_order_seq_cst); /* Avail.idx is updated by driver, invalidate it */ - VRING_INVALIDATE(vq->vq_ring.avail->idx); + VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx)); if (vq->vq_available_idx == vq->vq_ring.avail->idx) { return NULL; } @@ -227,11 +221,13 @@ void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx, head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1); /* Avail.ring is updated by driver, invalidate it */ - VRING_INVALIDATE(vq->vq_ring.avail->ring[head_idx]); + VRING_INVALIDATE(&vq->vq_ring.avail->ring[head_idx], + sizeof(vq->vq_ring.avail->ring[head_idx])); *avail_idx = vq->vq_ring.avail->ring[head_idx]; /* Invalidate the desc entry written by driver before accessing it */ - VRING_INVALIDATE(vq->vq_ring.desc[*avail_idx]); + VRING_INVALIDATE(&vq->vq_ring.desc[*avail_idx], + sizeof(vq->vq_ring.desc[*avail_idx])); buffer = virtqueue_phys_to_virt(vq, vq->vq_ring.desc[*avail_idx].addr); *len = vq->vq_ring.desc[*avail_idx].len; @@ -259,14 +255,15 @@ int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx, used_desc->len = len; /* We still need to flush it because this is read by driver */ - VRING_FLUSH(vq->vq_ring.used->ring[used_idx]); + VRING_FLUSH(&vq->vq_ring.used->ring[used_idx], + sizeof(vq->vq_ring.used->ring[used_idx])); atomic_thread_fence(memory_order_seq_cst); vq->vq_ring.used->idx++; /* Used.idx is read by driver, so we need to flush it */ - VRING_FLUSH(vq->vq_ring.used->idx); + VRING_FLUSH(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx)); /* Keep pending count until virtqueue_notify(). */ vq->vq_queued_cnt++; @@ -290,27 +287,31 @@ void virtqueue_disable_cb(struct virtqueue *vq) if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) { vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1; - VRING_FLUSH(vring_used_event(&vq->vq_ring)); + VRING_FLUSH(&vring_used_event(&vq->vq_ring), + sizeof(vring_used_event(&vq->vq_ring))); } #endif /*VIRTIO_DEVICE_ONLY*/ #ifndef VIRTIO_DRIVER_ONLY if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) { vring_avail_event(&vq->vq_ring) = vq->vq_available_idx - vq->vq_nentries - 1; - VRING_FLUSH(vring_avail_event(&vq->vq_ring)); + VRING_FLUSH(&vring_avail_event(&vq->vq_ring), + sizeof(vring_avail_event(&vq->vq_ring))); } #endif /*VIRTIO_DRIVER_ONLY*/ } else { #ifndef VIRTIO_DEVICE_ONLY if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) { vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; - VRING_FLUSH(vq->vq_ring.avail->flags); + VRING_FLUSH(&vq->vq_ring.avail->flags, + sizeof(vq->vq_ring.avail->flags)); } #endif /*VIRTIO_DEVICE_ONLY*/ #ifndef VIRTIO_DRIVER_ONLY if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) { vq->vq_ring.used->flags |= VRING_USED_F_NO_NOTIFY; - VRING_FLUSH(vq->vq_ring.used->flags); + VRING_FLUSH(&vq->vq_ring.used->flags, + sizeof(vq->vq_ring.used->flags)); } #endif /*VIRTIO_DRIVER_ONLY*/ } @@ -338,8 +339,8 @@ void virtqueue_dump(struct virtqueue *vq) if (!vq) return; - VRING_INVALIDATE(vq->vq_ring.avail); - VRING_INVALIDATE(vq->vq_ring.used); + VRING_INVALIDATE(&vq->vq_ring.avail, sizeof(vq->vq_ring.avail)); + VRING_INVALIDATE(&vq->vq_ring.used, sizeof(vq->vq_ring.used)); metal_log(METAL_LOG_DEBUG, "VQ: %s - size=%d; free=%d; queued=%d; desc_head_idx=%d; " @@ -359,7 +360,7 @@ uint32_t virtqueue_get_desc_size(struct virtqueue *vq) uint32_t len = 0; /* Avail.idx is updated by driver, invalidate it */ - VRING_INVALIDATE(vq->vq_ring.avail->idx); + VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx)); if (vq->vq_available_idx == vq->vq_ring.avail->idx) { return 0; @@ -370,11 +371,13 @@ uint32_t virtqueue_get_desc_size(struct virtqueue *vq) head_idx = vq->vq_available_idx & (vq->vq_nentries - 1); /* Avail.ring is updated by driver, invalidate it */ - VRING_INVALIDATE(vq->vq_ring.avail->ring[head_idx]); + VRING_INVALIDATE(&vq->vq_ring.avail->ring[head_idx], + sizeof(vq->vq_ring.avail->ring[head_idx])); avail_idx = vq->vq_ring.avail->ring[head_idx]; /* Invalidate the desc entry written by driver before accessing it */ - VRING_INVALIDATE(vq->vq_ring.desc[avail_idx].len); + VRING_INVALIDATE(&vq->vq_ring.desc[avail_idx].len, + sizeof(vq->vq_ring.desc[avail_idx].len)); len = vq->vq_ring.desc[avail_idx].len; @@ -429,7 +432,7 @@ static uint16_t vq_ring_add_buffer(struct virtqueue *vq, * Instead of flushing the whole desc region, we flush only the * single entry hopefully saving some cycles */ - VRING_FLUSH(desc[idx]); + VRING_FLUSH(&desc[idx], sizeof(desc[idx])); } @@ -528,14 +531,15 @@ static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) vq->vq_ring.avail->ring[avail_idx] = desc_idx; /* We still need to flush the ring */ - VRING_FLUSH(vq->vq_ring.avail->ring[avail_idx]); + VRING_FLUSH(&vq->vq_ring.avail->ring[avail_idx], + sizeof(vq->vq_ring.avail->ring[avail_idx])); atomic_thread_fence(memory_order_seq_cst); vq->vq_ring.avail->idx++; /* And the index */ - VRING_FLUSH(vq->vq_ring.avail->idx); + VRING_FLUSH(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx)); /* Keep pending count until virtqueue_notify(). */ vq->vq_queued_cnt++; @@ -557,27 +561,31 @@ static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc) if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) { vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc; - VRING_FLUSH(vring_used_event(&vq->vq_ring)); + VRING_FLUSH(&vring_used_event(&vq->vq_ring), + sizeof(vring_used_event(&vq->vq_ring))); } #endif /*VIRTIO_DEVICE_ONLY*/ #ifndef VIRTIO_DRIVER_ONLY if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) { vring_avail_event(&vq->vq_ring) = vq->vq_available_idx + ndesc; - VRING_FLUSH(vring_avail_event(&vq->vq_ring)); + VRING_FLUSH(&vring_avail_event(&vq->vq_ring), + sizeof(vring_avail_event(&vq->vq_ring))); } #endif /*VIRTIO_DRIVER_ONLY*/ } else { #ifndef VIRTIO_DEVICE_ONLY if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) { vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; - VRING_FLUSH(vq->vq_ring.avail->flags); + VRING_FLUSH(&vq->vq_ring.avail->flags, + sizeof(vq->vq_ring.avail->flags)); } #endif /*VIRTIO_DEVICE_ONLY*/ #ifndef VIRTIO_DRIVER_ONLY if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) { vq->vq_ring.used->flags &= ~VRING_USED_F_NO_NOTIFY; - VRING_FLUSH(vq->vq_ring.used->flags); + VRING_FLUSH(&vq->vq_ring.used->flags, + sizeof(vq->vq_ring.used->flags)); } #endif /*VIRTIO_DRIVER_ONLY*/ } @@ -634,7 +642,8 @@ static int vq_ring_must_notify(struct virtqueue *vq) /* CACHE: no need to invalidate avail */ new_idx = vq->vq_ring.avail->idx; prev_idx = new_idx - vq->vq_queued_cnt; - VRING_INVALIDATE(vring_avail_event(&vq->vq_ring)); + VRING_INVALIDATE(&vring_avail_event(&vq->vq_ring), + sizeof(vring_avail_event(&vq->vq_ring))); event_idx = vring_avail_event(&vq->vq_ring); return vring_need_event(event_idx, new_idx, prev_idx) != 0; @@ -645,7 +654,8 @@ static int vq_ring_must_notify(struct virtqueue *vq) /* CACHE: no need to invalidate used */ new_idx = vq->vq_ring.used->idx; prev_idx = new_idx - vq->vq_queued_cnt; - VRING_INVALIDATE(vring_used_event(&vq->vq_ring)); + VRING_INVALIDATE(&vring_used_event(&vq->vq_ring), + sizeof(vring_used_event(&vq->vq_ring))); event_idx = vring_used_event(&vq->vq_ring); return vring_need_event(event_idx, new_idx, prev_idx) != 0; @@ -654,14 +664,16 @@ static int vq_ring_must_notify(struct virtqueue *vq) } else { #ifndef VIRTIO_DEVICE_ONLY if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) { - VRING_INVALIDATE(vq->vq_ring.used->flags); + VRING_INVALIDATE(&vq->vq_ring.used->flags, + sizeof(vq->vq_ring.used->flags)); return (vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0; } #endif /*VIRTIO_DEVICE_ONLY*/ #ifndef VIRTIO_DRIVER_ONLY if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) { - VRING_INVALIDATE(vq->vq_ring.avail->flags); + VRING_INVALIDATE(&vq->vq_ring.avail->flags, + sizeof(vq->vq_ring.avail->flags)); return (vq->vq_ring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) == 0; } @@ -693,7 +705,7 @@ static int virtqueue_nused(struct virtqueue *vq) uint16_t used_idx, nused; /* Used is written by remote */ - VRING_INVALIDATE(vq->vq_ring.used->idx); + VRING_INVALIDATE(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx)); used_idx = vq->vq_ring.used->idx; nused = (uint16_t)(used_idx - vq->vq_used_cons_idx); @@ -714,7 +726,7 @@ static int virtqueue_navail(struct virtqueue *vq) uint16_t avail_idx, navail; /* Avail is written by driver */ - VRING_INVALIDATE(vq->vq_ring.avail->idx); + VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx)); avail_idx = vq->vq_ring.avail->idx; diff --git a/open-amp/lib/virtio_mmio/CMakeLists.txt b/open-amp/lib/virtio_mmio/CMakeLists.txt new file mode 100644 index 0000000..f25d00c --- /dev/null +++ b/open-amp/lib/virtio_mmio/CMakeLists.txt @@ -0,0 +1,3 @@ +if (WITH_VIRTIO_MMIO_DRV) +collect (PROJECT_LIB_SOURCES virtio_mmio_drv.c) +endif (WITH_VIRTIO_MMIO_DRV) diff --git a/open-amp/lib/virtio_mmio/virtio_mmio_drv.c b/open-amp/lib/virtio_mmio/virtio_mmio_drv.c new file mode 100644 index 0000000..5f42180 --- /dev/null +++ b/open-amp/lib/virtio_mmio/virtio_mmio_drv.c @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2022 Wind River Systems, Inc. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include + +void virtio_mmio_isr(struct virtio_device *vdev); + +typedef void (*virtio_mmio_vq_callback)(void *); + +static int virtio_mmio_create_virtqueues(struct virtio_device *vdev, unsigned int flags, + unsigned int nvqs, const char *names[], + vq_callback callbacks[], void *callback_args[]); + +static inline void virtio_mmio_write32(struct virtio_device *vdev, int offset, uint32_t value) +{ + struct virtio_mmio_device *vmdev = metal_container_of(vdev, + struct virtio_mmio_device, vdev); + + metal_io_write32(vmdev->cfg_io, offset, value); +} + +static inline uint32_t virtio_mmio_read32(struct virtio_device *vdev, int offset) +{ + struct virtio_mmio_device *vmdev = metal_container_of(vdev, + struct virtio_mmio_device, vdev); + + return metal_io_read32(vmdev->cfg_io, offset); +} + +static inline uint8_t virtio_mmio_read8(struct virtio_device *vdev, int offset) +{ + struct virtio_mmio_device *vmdev = metal_container_of(vdev, + struct virtio_mmio_device, vdev); + + return metal_io_read8(vmdev->cfg_io, offset); +} + +static inline void virtio_mmio_set_status(struct virtio_device *vdev, uint8_t status) +{ + virtio_mmio_write32(vdev, VIRTIO_MMIO_STATUS, status); +} + +static uint8_t virtio_mmio_get_status(struct virtio_device *vdev) +{ + return virtio_mmio_read32(vdev, VIRTIO_MMIO_STATUS); +} + +static void virtio_mmio_write_config(struct virtio_device *vdev, + uint32_t offset, void *dst, int length) +{ + (void)(vdev); + (void)(offset); + (void)(dst); + (void)length; + + metal_log(METAL_LOG_WARNING, "%s not supported\n", __func__); +} + +static void virtio_mmio_read_config(struct virtio_device *vdev, + uint32_t offset, void *dst, int length) +{ + int i; + uint8_t *d = dst; + (void)(offset); + + for (i = 0; i < length; i++) + d[i] = virtio_mmio_read8(vdev, VIRTIO_MMIO_CONFIG + i); +} + +static uint32_t _virtio_mmio_get_features(struct virtio_device *vdev, int idx) +{ + uint32_t hfeatures; + + /* Writing selection register VIRTIO_MMIO_DEVICE_FEATURES_SEL. In pure AMP + * mode this needs to be followed by a synchronization w/ the device + * before reading VIRTIO_MMIO_DEVICE_FEATURES + */ + virtio_mmio_write32(vdev, VIRTIO_MMIO_DEVICE_FEATURES_SEL, idx); + hfeatures = virtio_mmio_read32(vdev, VIRTIO_MMIO_DEVICE_FEATURES); + return hfeatures & vdev->features; +} + +static uint32_t virtio_mmio_get_features(struct virtio_device *vdev) +{ + return _virtio_mmio_get_features(vdev, 0); +} + +/* This is more like negotiate_features */ +static void _virtio_mmio_set_features(struct virtio_device *vdev, + uint32_t features, int idx) +{ + uint32_t hfeatures; + + /* Writing selection register VIRTIO_MMIO_DEVICE_FEATURES_SEL. In pure AMP + * mode this needs to be followed by a synchronization w/ the device + * before reading VIRTIO_MMIO_DEVICE_FEATURES + */ + virtio_mmio_write32(vdev, VIRTIO_MMIO_DEVICE_FEATURES_SEL, idx); + hfeatures = virtio_mmio_read32(vdev, VIRTIO_MMIO_DEVICE_FEATURES); + features &= hfeatures; + virtio_mmio_write32(vdev, VIRTIO_MMIO_DRIVER_FEATURES, features); + vdev->features = features; +} + +static void virtio_mmio_set_features(struct virtio_device *vdev, uint32_t features) +{ + _virtio_mmio_set_features(vdev, features, 0); +} + +static void virtio_mmio_reset_device(struct virtio_device *vdev) +{ + virtio_mmio_set_status(vdev, 0); +} + +static void virtio_mmio_notify(struct virtqueue *vq) +{ + /* VIRTIO_F_NOTIFICATION_DATA is not supported for now */ + virtio_mmio_write32(vq->vq_dev, VIRTIO_MMIO_QUEUE_NOTIFY, vq->vq_queue_index); +} + +const struct virtio_dispatch virtio_mmio_dispatch = { + .create_virtqueues = virtio_mmio_create_virtqueues, + .get_status = virtio_mmio_get_status, + .set_status = virtio_mmio_set_status, + .get_features = virtio_mmio_get_features, + .set_features = virtio_mmio_set_features, + .read_config = virtio_mmio_read_config, + .write_config = virtio_mmio_write_config, + .reset_device = virtio_mmio_reset_device, + .notify = virtio_mmio_notify, +}; + +static int virtio_mmio_get_metal_io(struct virtio_device *vdev, uintptr_t virt_mem_ptr, + uintptr_t cfg_mem_ptr) +{ + struct metal_device *device; + int32_t err; + struct virtio_mmio_device *vmdev = metal_container_of(vdev, + struct virtio_mmio_device, vdev); + + /* Setup shared memory device */ + vmdev->shm_device.regions[0].physmap = (metal_phys_addr_t *)&vmdev->shm_mem.base; + vmdev->shm_device.regions[0].virt = (void *)virt_mem_ptr; + vmdev->shm_device.regions[0].size = vmdev->shm_mem.size; + + VIRTIO_ASSERT((METAL_MAX_DEVICE_REGIONS > 1), + "METAL_MAX_DEVICE_REGIONS must be greater that 1"); + + vmdev->shm_device.regions[1].physmap = (metal_phys_addr_t *)&vmdev->cfg_mem.base; + vmdev->shm_device.regions[1].virt = (void *)cfg_mem_ptr; + vmdev->shm_device.regions[1].size = vmdev->cfg_mem.size; + + err = metal_register_generic_device(&vmdev->shm_device); + if (err) { + metal_log(METAL_LOG_ERROR, "Couldn't register shared memory device: %d\n", err); + return err; + } + + err = metal_device_open("generic", vmdev->shm_device.name, &device); + if (err) { + metal_log(METAL_LOG_ERROR, "metal_device_open failed: %d", err); + return err; + } + + vmdev->shm_io = metal_device_io_region(device, 0); + if (!vmdev->shm_io) { + metal_log(METAL_LOG_ERROR, "metal_device_io_region failed to get region 0"); + return err; + } + + vmdev->cfg_io = metal_device_io_region(device, 1); + if (!vmdev->cfg_io) { + metal_log(METAL_LOG_ERROR, "metal_device_io_region failed to get region 1"); + return err; + } + + return 0; +} + +uint32_t virtio_mmio_get_max_elem(struct virtio_device *vdev, int idx) +{ + /* Select the queue we're interested in by writing selection register + * VIRTIO_MMIO_QUEUE_SEL. In pure AMP mode this needs to be followed by a + * synchronization w/ the device before reading VIRTIO_MMIO_QUEUE_NUM_MAX + */ + virtio_mmio_write32(vdev, VIRTIO_MMIO_QUEUE_SEL, idx); + return virtio_mmio_read32(vdev, VIRTIO_MMIO_QUEUE_NUM_MAX); +} + +int virtio_mmio_device_init(struct virtio_mmio_device *vmdev, uintptr_t virt_mem_ptr, + uintptr_t cfg_mem_ptr, void *user_data) +{ + struct virtio_device *vdev = &vmdev->vdev; + uint32_t magic, version, devid, vendor; + + vdev->role = vmdev->device_mode; + vdev->priv = vmdev; + vdev->func = &virtio_mmio_dispatch; + vmdev->user_data = user_data; + + /* Set metal io mem ops */ + virtio_mmio_get_metal_io(vdev, virt_mem_ptr, cfg_mem_ptr); + + magic = virtio_mmio_read32(vdev, VIRTIO_MMIO_MAGIC_VALUE); + if (magic != VIRTIO_MMIO_MAGIC_VALUE_STRING) { + metal_log(METAL_LOG_ERROR, "Bad magic value %08x\n", magic); + return -1; + } + + version = virtio_mmio_read32(vdev, VIRTIO_MMIO_VERSION); + devid = virtio_mmio_read32(vdev, VIRTIO_MMIO_DEVICE_ID); + if (devid == 0) { + /* Placeholder */ + return -1; + } + + if (version != 1) { + metal_log(METAL_LOG_ERROR, "Bad version %08x\n", version); + return -1; + } + + vendor = virtio_mmio_read32(vdev, VIRTIO_MMIO_VENDOR_ID); + metal_log(METAL_LOG_DEBUG, "VIRTIO %08x:%08x\n", vendor, devid); + + vdev->id.version = version; + vdev->id.device = devid; + vdev->id.vendor = vendor; + + virtio_mmio_set_status(vdev, VIRTIO_CONFIG_STATUS_ACK); + virtio_mmio_write32(vdev, VIRTIO_MMIO_GUEST_PAGE_SIZE, 4096); + + return 0; +} + +/* Register preallocated virtqueues */ +void virtio_mmio_register_device(struct virtio_device *vdev, int vq_num, struct virtqueue **vqs) +{ + int i; + + vdev->vrings_info = metal_allocate_memory(sizeof(struct virtio_vring_info) * vq_num); + /* TODO: handle error case */ + for (i = 0; i < vq_num; i++) { + vdev->vrings_info[i].vq = vqs[i]; + } + vdev->vrings_num = vq_num; +} + +struct virtqueue *virtio_mmio_setup_virtqueue(struct virtio_device *vdev, + unsigned int idx, + struct virtqueue *vq, + void (*cb)(void *), + void *cb_arg, + const char *vq_name) +{ + uint32_t maxq; + struct virtio_vring_info _vring_info = {0}; + struct virtio_vring_info *vring_info = &_vring_info; + struct vring_alloc_info *vring_alloc_info; + struct virtio_mmio_device *vmdev = metal_container_of(vdev, + struct virtio_mmio_device, vdev); + + if (vdev->role != (unsigned int)VIRTIO_DEV_DRIVER) { + metal_log(METAL_LOG_ERROR, "Only VIRTIO_DEV_DRIVER is currently supported\n"); + return NULL; + } + + if (!vq) { + metal_log(METAL_LOG_ERROR, + "Only preallocated virtqueues are currently supported\n"); + return NULL; + } + + if (vdev->id.version != 0x1) { + metal_log(METAL_LOG_ERROR, + "Only VIRTIO MMIO version 1 is currently supported\n"); + return NULL; + } + + vring_info->io = vmdev->shm_io; + vring_info->info.num_descs = virtio_mmio_get_max_elem(vdev, idx); + vring_info->info.align = VIRTIO_MMIO_VRING_ALIGNMENT; + + /* Check if vrings are already configured */ + if (vq->vq_nentries != 0 && vq->vq_nentries == vq->vq_free_cnt && + vq->vq_ring.desc) { + vring_info->info.vaddr = vq->vq_ring.desc; + vring_info->vq = vq; + } + vring_info->info.num_descs = vq->vq_nentries; + + vq->vq_dev = vdev; + + vring_alloc_info = &vring_info->info; + + unsigned int role_bk = vdev->role; + /* Assign OA VIRTIO_DEV_DRIVER role to allow virtio guests to setup the vrings */ + vdev->role = (unsigned int)VIRTIO_DEV_DRIVER; + if (virtqueue_create(vdev, idx, vq_name, vring_alloc_info, (void (*)(struct virtqueue *))cb, + vdev->func->notify, vring_info->vq)) { + metal_log(METAL_LOG_ERROR, "virtqueue_create failed\n"); + return NULL; + } + vdev->role = role_bk; + vq->priv = cb_arg; + virtqueue_set_shmem_io(vq, vmdev->shm_io); + + /* Writing selection register VIRTIO_MMIO_QUEUE_SEL. In pure AMP + * mode this needs to be followed by a synchronization w/ the device + * before reading VIRTIO_MMIO_QUEUE_NUM_MAX + */ + virtio_mmio_write32(vdev, VIRTIO_MMIO_QUEUE_SEL, idx); + maxq = virtio_mmio_read32(vdev, VIRTIO_MMIO_QUEUE_NUM_MAX); + VIRTIO_ASSERT((maxq != 0), + "VIRTIO_MMIO_QUEUE_NUM_MAX cannot be 0"); + VIRTIO_ASSERT((maxq >= vq->vq_nentries), + "VIRTIO_MMIO_QUEUE_NUM_MAX must be greater than vqueue->vq_nentries"); + virtio_mmio_write32(vdev, VIRTIO_MMIO_QUEUE_NUM, vq->vq_nentries); + virtio_mmio_write32(vdev, VIRTIO_MMIO_QUEUE_ALIGN, 4096); + virtio_mmio_write32(vdev, VIRTIO_MMIO_QUEUE_PFN, + ((uintptr_t)metal_io_virt_to_phys(vq->shm_io, + (char *)vq->vq_ring.desc)) / 4096); + + vdev->vrings_info[vdev->vrings_num].vq = vq; + vdev->vrings_num++; + virtqueue_enable_cb(vq); + + return vq; +} + +void virtio_mmio_isr(struct virtio_device *vdev) +{ + struct virtio_vring_info *vrings_info = vdev->vrings_info; + + uint32_t isr = virtio_mmio_read32(vdev, VIRTIO_MMIO_INTERRUPT_STATUS); + struct virtqueue *vq; + unsigned int i; + + if (isr & VIRTIO_MMIO_INT_VRING) { + for (i = 0; i < vdev->vrings_num; i++) { + vq = vrings_info[i].vq; + if (vq->callback) + vq->callback(vq->priv); + } + } + + if (isr & ~(VIRTIO_MMIO_INT_VRING)) + metal_log(METAL_LOG_WARNING, "Unhandled interrupt type: 0x%x\n", isr); + + virtio_mmio_write32(vdev, VIRTIO_MMIO_INTERRUPT_ACK, isr); +} + +static int virtio_mmio_create_virtqueues(struct virtio_device *vdev, unsigned int flags, + unsigned int nvqs, const char *names[], + vq_callback callbacks[], void *callback_args[]) +{ + struct virtqueue *vq; + struct virtqueue *vring_vq; + void (*cb)(void *); + void *cb_arg; + unsigned int i; + + (void)flags; + + if (!vdev || !names || !vdev->vrings_info) + return -EINVAL; + + for (i = 0; i < nvqs; i++) { + vring_vq = NULL; + cb = NULL; + cb_arg = NULL; + if (vdev->vrings_info[i].vq) + vring_vq = vdev->vrings_info[i].vq; + if (callbacks) + cb = (virtio_mmio_vq_callback)callbacks[i]; + if (callback_args) + cb_arg = callback_args[i]; + vq = virtio_mmio_setup_virtqueue(vdev, i, vring_vq, cb, cb_arg, names[i]); + if (!vq) + return -ENODEV; + } + + return 0; +}