From f1295e575a9134015bb5ff7342a5b0b0b3033131 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Tue, 23 Dec 2025 21:09:43 +0200 Subject: [PATCH 01/42] mostly implement ack transmission but with a few missing todos; the tests are not yet updated --- libudpard/udpard.c | 217 +++++++++++++++++++++++++++++++++++---------- libudpard/udpard.h | 90 +++++++++++++++---- 2 files changed, 240 insertions(+), 67 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 9bc71b5..9720b44 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -41,12 +41,6 @@ typedef unsigned char byte_t; ///< For compatibility with platforms where byte size is not 8 bits. -#define BIG_BANG INT64_MIN -#define HEAT_DEATH INT64_MAX - -#define KILO 1000LL -#define MEGA 1000000LL - /// Sessions will be garbage-collected after being idle for this long, along with unfinished transfers, if any. /// Pending slots within a live session will also be reset after this timeout to avoid storing stale data indefinitely. #define SESSION_LIFETIME (60 * MEGA) @@ -63,7 +57,7 @@ typedef unsigned char byte_t; ///< For compatibility with platforms where byte s /// were found to offer no advantage except in the perfect scenario of non-restarting senders, and an increased /// implementation complexity (more branches, more lines of code), so they were replaced with a simple list. /// The list works equally well given a non-contiguous transfer-ID stream, unlike the bitmask, thus more robust. -#define RX_TRANSFER_HISTORY_COUNT 16U +#define RX_TRANSFER_HISTORY_COUNT 32U /// In the ORDERED reassembly mode, with the most recently received transfer-ID N, the library will reject /// transfers with transfer-ID less than or equal to N-ORDERING_WINDOW (modulo 2^64) as late. @@ -74,6 +68,18 @@ typedef unsigned char byte_t; ///< For compatibility with platforms where byte s static_assert((UDPARD_IPv4_SUBJECT_ID_MAX & (UDPARD_IPv4_SUBJECT_ID_MAX + 1)) == 0, "UDPARD_IPv4_SUBJECT_ID_MAX must be one less than a power of 2"); +#define P2P_KIND_RESPONSE 0U +#define P2P_KIND_ACK 1U + +#define BIG_BANG INT64_MIN +#define HEAT_DEATH INT64_MAX + +#define KILO 1000LL +#define MEGA 1000000LL + +/// Pending ack transfers expire after this long if not transmitted. +#define ACK_TX_DEADLINE MEGA + static size_t smaller(const size_t a, const size_t b) { return (a < b) ? a : b; } static size_t larger(const size_t a, const size_t b) { return (a > b) ? a : b; } static int64_t min_i64(const int64_t a, const int64_t b) { return (a < b) ? a : b; } @@ -479,7 +485,7 @@ static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) /// Frames with identical weight are processed in the FIFO order. static int32_t tx_cavl_compare_prio(const void* const user, const udpard_tree_t* const node) { - return (((int)*(const udpard_prio_t*)user) >= (int)CAVL2_TO_OWNER(node, udpard_tx_item_t, index_prio)->priority) + return (((int)*(const udpard_prio_t*)user) >= (int)CAVL2_TO_OWNER(node, udpard_tx_item_t, index_order)->priority) ? +1 : -1; } @@ -498,7 +504,7 @@ static udpard_tx_item_t* tx_item_new(const udpard_tx_mem_resources_t memory, { udpard_tx_item_t* out = mem_alloc(memory.fragment, sizeof(udpard_tx_item_t)); if (out != NULL) { - out->index_prio = (udpard_tree_t){ 0 }; + out->index_order = (udpard_tree_t){ 0 }; out->index_deadline = (udpard_tree_t){ 0 }; UDPARD_ASSERT(priority <= UDPARD_PRIORITY_MAX); out->priority = priority; @@ -582,8 +588,8 @@ static uint32_t tx_push(udpard_tx_t* const tx, udpard_tx_item_t* const head = chain.head; UDPARD_ASSERT(frame_count == chain.count); const udpard_tree_t* res = cavl2_find_or_insert( - &tx->index_prio, &head->priority, &tx_cavl_compare_prio, &head->index_prio, &cavl2_trivial_factory); - UDPARD_ASSERT(res == &head->index_prio); + &tx->index_order, &head->priority, &tx_cavl_compare_prio, &head->index_order, &cavl2_trivial_factory); + UDPARD_ASSERT(res == &head->index_order); (void)res; res = cavl2_find_or_insert(&tx->index_deadline, &head->deadline, @@ -619,7 +625,7 @@ static uint64_t tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) udpard_tree_t* const next = cavl2_next_greater(p); // Get next before removing current node from tree. // Remove from both indices. cavl2_remove(&self->index_deadline, &item->index_deadline); - cavl2_remove(&self->index_prio, &item->index_prio); + cavl2_remove(&self->index_order, &item->index_order); // Free the entire transfer chain. udpard_tx_item_t* current = item; while (current != NULL) { @@ -647,7 +653,7 @@ bool udpard_tx_new(udpard_tx_t* const self, self->mtu = UDPARD_MTU_DEFAULT; self->memory = memory; self->queue_size = 0; - self->index_prio = NULL; + self->index_order = NULL; self->index_deadline = NULL; } return ok; @@ -688,7 +694,7 @@ udpard_tx_item_t* udpard_tx_peek(udpard_tx_t* const self, const udpard_us_t now) udpard_tx_item_t* out = NULL; if (self != NULL) { self->errors_expiration += tx_purge_expired(self, now); - out = CAVL2_TO_OWNER(cavl2_min(self->index_prio), udpard_tx_item_t, index_prio); + out = CAVL2_TO_OWNER(cavl2_min(self->index_order), udpard_tx_item_t, index_order); } return out; } @@ -697,10 +703,10 @@ void udpard_tx_pop(udpard_tx_t* const self, udpard_tx_item_t* const item) { if ((self != NULL) && (item != NULL)) { if (item->next_in_transfer == NULL) { - cavl2_remove(&self->index_prio, &item->index_prio); + cavl2_remove(&self->index_order, &item->index_order); cavl2_remove(&self->index_deadline, &item->index_deadline); } else { // constant-time update, super quick, just relink a few pointers! - cavl2_replace(&self->index_prio, &item->index_prio, &item->next_in_transfer->index_prio); + cavl2_replace(&self->index_order, &item->index_order, &item->next_in_transfer->index_order); cavl2_replace(&self->index_deadline, &item->index_deadline, &item->next_in_transfer->index_deadline); } self->queue_size--; @@ -710,8 +716,8 @@ void udpard_tx_pop(udpard_tx_t* const self, udpard_tx_item_t* const item) void udpard_tx_free(const udpard_tx_mem_resources_t memory, udpard_tx_item_t* const item) { if (item != NULL) { - UDPARD_ASSERT((item->index_prio.lr[0] == NULL) && (item->index_prio.up == NULL) && - (item->index_prio.lr[1] == NULL)); + UDPARD_ASSERT((item->index_order.lr[0] == NULL) && (item->index_order.up == NULL) && + (item->index_order.lr[1] == NULL)); UDPARD_ASSERT((item->index_deadline.lr[0] == NULL) && (item->index_deadline.up == NULL) && (item->index_deadline.lr[1] == NULL)); if (item->datagram_payload.data != NULL) { @@ -721,6 +727,62 @@ void udpard_tx_free(const udpard_tx_mem_resources_t memory, udpard_tx_item_t* co } } +/// Handle an ACK received from a remote node. +/// This is where we acknowledge pending transmissions. +static void tx_receive_ack(udpard_rx_t* const rx, + const uint64_t topic_hash, + const uint64_t transfer_id, + const udpard_remote_t remote) +{ + (void)rx; + (void)topic_hash; + (void)transfer_id; + (void)remote; + // TODO: implement +} + +/// Generate an ack transfer for the specified remote transfer. +static void tx_send_ack(udpard_rx_t* const rx, + const udpard_us_t now, + const udpard_prio_t priority, + const uint64_t topic_hash, + const uint64_t transfer_id, + const udpard_remote_t remote) +{ + // Compose the ack transfer payload. It simply contains the topic hash and the ID of the acked transfer. + byte_t header[UDPARD_P2P_HEADER_BYTES]; + byte_t* ptr = header; + *ptr++ = P2P_KIND_ACK; + ptr += 7U; // Reserved bytes. + ptr = serialize_u64(ptr, topic_hash); + ptr = serialize_u64(ptr, transfer_id); + UDPARD_ASSERT((ptr - header) == UDPARD_P2P_HEADER_BYTES); + const udpard_bytes_t payload = { .size = UDPARD_P2P_HEADER_BYTES, .data = header }; + + // Enqueue the ack transfer. + const uint64_t p2p_transfer_id = rx->p2p_transfer_id++; + for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { + udpard_tx_t* const tx = rx->tx[i]; + if ((tx != NULL) && udpard_is_valid_endpoint(remote.endpoints[i])) { + // TODO: scan the transmission queue for already pending acks; abort if one is already there. + const uint32_t count = udpard_tx_push(tx, + now, + now + ACK_TX_DEADLINE, + priority, + remote.uid, // this is a P2P transfer + remote.endpoints[i], + p2p_transfer_id, + payload, + false, + NULL); + UDPARD_ASSERT(count <= 1); + if (count != 1) { // ack is always a single-frame transfer, so we get either 0 or 1 + rx->errors_ack_tx[i]++; + } + } + } +} + // --------------------------------------------------------------------------------------------------------------------- // --------------------------------------------- RX PIPELINE --------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- @@ -1084,6 +1146,9 @@ static void rx_slot_update(rx_slot_t* const slot, // --------------------------------------------- SESSION & PORT --------------------------------------------- +/// The number of times `from` must be incremented (modulo 2^64) to reach `to`. +static uint64_t rx_transfer_id_forward_distance(const uint64_t from, const uint64_t to) { return to - from; } + /// Keep in mind that we have a dedicated session object per remote node per port; this means that the states /// kept here are specific per remote node, as it should be. typedef struct rx_session_t @@ -1126,9 +1191,6 @@ typedef struct udpard_rx_port_vtable_private_t void (*update_session)(rx_session_t*, udpard_rx_t*, udpard_us_t, rx_frame_t*, udpard_mem_deleter_t); } udpard_rx_port_vtable_private_t; -/// The number of times `from` must be incremented (modulo 2^64) to reach `to`. -static uint64_t rx_transfer_id_forward_distance(const uint64_t from, const uint64_t to) { return to - from; } - /// True iff the given transfer-ID was recently ejected. static bool rx_session_is_transfer_ejected(const rx_session_t* const self, const uint64_t transfer_id) { @@ -1163,21 +1225,6 @@ static bool rx_session_is_transfer_interned(const rx_session_t* const self, cons return false; } -static void rx_session_on_ack_mandate(const rx_session_t* const self, - udpard_rx_t* const rx, - const udpard_prio_t priority, - const uint64_t transfer_id, - const udpard_bytes_t payload_head) -{ - UDPARD_ASSERT(rx_session_is_transfer_ejected(self, transfer_id) || - rx_session_is_transfer_interned(self, transfer_id)); - const udpard_rx_ack_mandate_t mandate = { - .remote = self->remote, .priority = priority, .transfer_id = transfer_id, .payload_head = payload_head - }; - UDPARD_ASSERT(payload_head.data != NULL || payload_head.size == 0U); - self->port->vtable->on_ack_mandate(rx, self->port, mandate); -} - static int32_t cavl_compare_rx_session_by_remote_uid(const void* const user, const udpard_tree_t* const node) { const uint64_t uid_a = *(const uint64_t*)user; @@ -1456,8 +1503,8 @@ static void rx_session_update_ordered(rx_session_t* const self, if (slot->state == rx_slot_done) { UDPARD_ASSERT(rx_session_is_transfer_interned(self, slot->transfer_id)); if (frame->meta.flag_ack) { - rx_session_on_ack_mandate( - self, rx, slot->priority, slot->transfer_id, ((udpard_fragment_t*)cavl2_min(slot->fragments))->view); + // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view + tx_send_ack(rx, ts, slot->priority, self->port->topic_hash, slot->transfer_id, self->remote); } rx_session_ordered_scan_slots(self, rx, ts, false); } @@ -1466,7 +1513,8 @@ static void rx_session_update_ordered(rx_session_t* const self, // meaning that the sender will not get a confirmation if the retransmitted transfer is too old. // We assume that RX_TRANSFER_HISTORY_COUNT is enough to cover all sensible use cases. if ((is_interned || is_ejected) && frame->meta.flag_ack && (frame->base.offset == 0U)) { - rx_session_on_ack_mandate(self, rx, frame->meta.priority, frame->meta.transfer_id, frame->base.payload); + // Payload view: frame->base.payload + tx_send_ack(rx, ts, frame->meta.priority, self->port->topic_hash, frame->meta.transfer_id, self->remote); } mem_free_payload(payload_deleter, frame->base.origin); } @@ -1496,16 +1544,15 @@ static void rx_session_update_unordered(rx_session_t* const self, &rx->errors_oom, &rx->errors_transfer_malformed); if (slot->state == rx_slot_done) { - if (frame->meta.flag_ack) { - rx_session_on_ack_mandate( - self, rx, slot->priority, slot->transfer_id, ((udpard_fragment_t*)cavl2_min(slot->fragments))->view); + if (frame->meta.flag_ack) { // Payload view: ((udpard_fragment_t*)cavl2_min(slot->fragments))->view + tx_send_ack(rx, ts, slot->priority, self->port->topic_hash, slot->transfer_id, self->remote); } rx_session_eject(self, rx, slot); } - } else { // retransmit ACK if needed - if (frame->meta.flag_ack && (frame->base.offset == 0U)) { + } else { // retransmit ACK if needed + if (frame->meta.flag_ack && (frame->base.offset == 0U)) { // Payload view: frame->base.payload UDPARD_ASSERT(rx_session_is_transfer_ejected(self, frame->meta.transfer_id)); - rx_session_on_ack_mandate(self, rx, frame->meta.priority, frame->meta.transfer_id, frame->base.payload); + tx_send_ack(rx, ts, frame->meta.priority, self->port->topic_hash, frame->meta.transfer_id, self->remote); } mem_free_payload(payload_deleter, frame->base.origin); } @@ -1593,7 +1640,9 @@ static bool rx_validate_mem_resources(const udpard_rx_mem_resources_t memory) (memory.fragment.alloc != NULL) && (memory.fragment.free != NULL); } -void udpard_rx_new(udpard_rx_t* const self) +void udpard_rx_new(udpard_rx_t* const self, + udpard_tx_t* const tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX], + const uint64_t p2p_transfer_id_initial) { UDPARD_ASSERT(self != NULL); mem_zero(sizeof(*self), self); @@ -1602,7 +1651,11 @@ void udpard_rx_new(udpard_rx_t* const self) self->errors_oom = 0; self->errors_frame_malformed = 0; self->errors_transfer_malformed = 0; - self->user = NULL; + for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { + self->tx[i] = tx[i]; + } + self->p2p_transfer_id = p2p_transfer_id_initial; + self->user = NULL; } void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now) @@ -1638,7 +1691,7 @@ bool udpard_rx_port_new(udpard_rx_port_t* const self, (reordering_window == UDPARD_RX_REORDERING_WINDOW_UNORDERED) || (reordering_window == UDPARD_RX_REORDERING_WINDOW_STATELESS); const bool ok = (self != NULL) && rx_validate_mem_resources(memory) && win_ok && (vtable != NULL) && - (vtable->on_message != NULL) && (vtable->on_ack_mandate != NULL) && (vtable->on_collision != NULL); + (vtable->on_message != NULL) && (vtable->on_collision != NULL); if (ok) { mem_zero(sizeof(*self), self); self->topic_hash = topic_hash; @@ -1659,6 +1712,74 @@ bool udpard_rx_port_new(udpard_rx_port_t* const self, return ok; } +/// A thin proxy that reads the P2P header and dispatches the message to the appropriate handler. +static void rx_p2p_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + udpard_rx_port_p2p_t* const self = (udpard_rx_port_p2p_t*)port; + + // Read the header. + udpard_fragment_t* const frag0 = udpard_fragment_seek(transfer.payload, 0); + if (frag0->view.size < UDPARD_P2P_HEADER_BYTES) { + ++rx->errors_transfer_malformed; + udpard_fragment_free_all(transfer.payload, port->memory.fragment); + return; // Bad transfer -- fragmented header. We can still handle it but it's a protocol violation. + } + + // Parse the P2P header. + const byte_t* ptr = (const byte_t*)frag0->view.data; + const byte_t kind = *ptr++; + ptr += 7U; // reserved + uint64_t topic_hash = 0; + uint64_t transfer_id = 0; + ptr = deserialize_u64(ptr, &topic_hash); + ptr = deserialize_u64(ptr, &transfer_id); + UDPARD_ASSERT((ptr == (UDPARD_P2P_HEADER_BYTES + (byte_t*)frag0->view.data))); + + // Remove the header from the view. + frag0->view.size -= UDPARD_P2P_HEADER_BYTES; + frag0->view.data = UDPARD_P2P_HEADER_BYTES + (byte_t*)(frag0->view.data); + + // Process the data depending on the kind. + if (kind == P2P_KIND_ACK) { + tx_receive_ack(rx, topic_hash, transfer_id, transfer.remote); + } else if (kind == P2P_KIND_RESPONSE) { + const udpard_rx_transfer_p2p_t tr = { .base = transfer, .topic_hash = topic_hash }; + self->vtable->on_message(rx, self, tr); + } else { + (void)0; // Malformed, ignored. + } +} + +static void rx_p2p_on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) +{ + (void)rx; + (void)port; + (void)remote; + // A hash collision on a P2P port simply means that someone sent a transfer to the wrong unicast endpoint. + // This could happen if nodes swapped UDP/IP endpoints live, or if there are multiple nodes sharing the + // same UDP endpoint (same socket). Simply ignore it as there is nothing to do. +} + +bool udpard_rx_port_new_p2p(udpard_rx_port_p2p_t* const self, + const uint64_t local_uid, + const size_t extent, + const udpard_rx_mem_resources_t memory, + const udpard_rx_port_p2p_vtable_t* const vtable) +{ + static const udpard_rx_port_vtable_t proxy = { .on_message = rx_p2p_on_message, + .on_collision = rx_p2p_on_collision }; + if ((self != NULL) && (vtable != NULL) && (vtable->on_message != NULL)) { + self->vtable = vtable; + return udpard_rx_port_new((udpard_rx_port_t*)&self, // + local_uid, + extent + UDPARD_P2P_HEADER_BYTES, + UDPARD_RX_REORDERING_WINDOW_UNORDERED, + memory, + &proxy); + } + return false; +} + void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port) { if ((rx != NULL) && (port != NULL)) { diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 5a62045..e00e6db 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -71,6 +71,20 @@ extern "C" /// The library supports at most this many local redundant network interfaces. #define UDPARD_NETWORK_INTERFACE_COUNT_MAX 3U +/// All P2P transfers have a fixed prefix, handled by the library transparently for the application, +/// defined as follows in DSDL notation: +/// +/// uint8 KIND_RESPONSE = 0 # The topic hash and transfer-ID specify which message this is a response to. +/// uint8 KIND_ACK = 1 # The topic hash and transfer-ID specify which transfer is being acknowledged. +/// uint8 kind +/// void56 +/// uint64 topic_hash +/// uint64 transfer_id +/// # Payload follows only for KIND_RESPONSE. +/// +/// The extent of P2P ports must be at least this large to accommodate the header. +#define UDPARD_P2P_HEADER_BYTES 24U + /// Timestamps supplied by the application must be non-negative monotonically increasing counts of microseconds. typedef int64_t udpard_us_t; @@ -332,7 +346,7 @@ typedef struct udpard_tx_t uint64_t errors_expiration; ///< A frame had to be dropped due to premature deadline expiration. /// Internal use only, do not modify! - udpard_tree_t* index_prio; ///< Most urgent on the left, then according to the insertion order. + udpard_tree_t* index_order; ///< Most urgent on the left, then according to the insertion order. udpard_tree_t* index_deadline; ///< Soonest on the left, then according to the insertion order. } udpard_tx_t; @@ -344,8 +358,9 @@ typedef struct udpard_tx_t /// a transfer of the payload memory ownership to somewhere else. typedef struct udpard_tx_item_t { - udpard_tree_t index_prio; + udpard_tree_t index_order; udpard_tree_t index_deadline; + // TODO: indexing by (topic hash, transfer-ID); retain for retransmission. /// Points to the next frame in this transfer or NULL. /// Normally, the application would not use it because transfer frame ordering is orthogonal to global TX ordering. @@ -372,6 +387,7 @@ typedef struct udpard_tx_item_t /// This opaque pointer is assigned the value that is passed to udpard_tx_push(). /// The library itself does not make use of it but the application can use it to provide continuity between /// its high-level transfer objects and datagrams that originate from it. Assign NULL if not needed. + /// Items generated by the library (ack transfers) always store NULL here. void* user_transfer_reference; } udpard_tx_item_t; @@ -436,7 +452,7 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_udpip_ep_t remote_ep, const uint64_t transfer_id, const udpard_bytes_t payload, - const bool ack_required, + const bool ack_required, // TODO: provide retry count; 0 if no ack. void* const user_transfer_reference); /// Purges all timed out items from the transmission queue automatically; returns the next item to be transmitted, @@ -558,6 +574,18 @@ typedef struct udpard_rx_t uint64_t errors_frame_malformed; ///< A received frame was malformed and thus dropped. uint64_t errors_transfer_malformed; ///< A transfer could not be reassembled correctly. + /// Whenever an ack fails to transmit on a certain interface, the corresponding counter is incremented. + /// The specific error can be determined by checking the specific counters in the corresponding tx instance. + uint64_t errors_ack_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; + + /// The transmission pipelines are needed to manage ack transmission and removal of acknowledged transfers. + /// Some of the pointers can be NULL depending on the number of redundant interfaces available. + /// If the application wants to only listen, all pointers may be NULL (no acks will be sent ever). + udpard_tx_t* tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; + + /// A random-initialized transfer-ID counter for all outgoing P2P transfers. + uint64_t p2p_transfer_id; + void* user; ///< Opaque pointer for the application use only. Not accessed by the library. } udpard_rx_t; @@ -575,9 +603,10 @@ typedef struct udpard_rx_mem_resources_t udpard_mem_resource_t fragment; } udpard_rx_mem_resources_t; -typedef struct udpard_rx_port_t udpard_rx_port_t; -typedef struct udpard_rx_transfer_t udpard_rx_transfer_t; -typedef struct udpard_rx_ack_mandate_t udpard_rx_ack_mandate_t; +typedef struct udpard_rx_port_t udpard_rx_port_t; +typedef struct udpard_rx_port_p2p_t udpard_rx_port_p2p_t; +typedef struct udpard_rx_transfer_t udpard_rx_transfer_t; +typedef struct udpard_rx_transfer_p2p_t udpard_rx_transfer_p2p_t; /// Provided by the application per port instance to specify the callbacks to be invoked on certain events. /// This design allows distinct callbacks per port, which is especially useful for the P2P port. @@ -587,8 +616,6 @@ typedef struct udpard_rx_port_vtable_t void (*on_message)(udpard_rx_t*, udpard_rx_port_t*, udpard_rx_transfer_t); /// A topic hash collision is detected on a port. void (*on_collision)(udpard_rx_t*, udpard_rx_port_t*, udpard_remote_t); - /// The application is required to send an acknowledgment back to the sender. - void (*on_ack_mandate)(udpard_rx_t*, udpard_rx_port_t*, udpard_rx_ack_mandate_t); } udpard_rx_port_vtable_t; /// This type represents an open input port, such as a subscription to a topic. @@ -600,6 +627,7 @@ struct udpard_rx_port_t /// Transfer payloads exceeding this extent may be truncated. /// The total size of the received payload may still exceed this extent setting by some small margin. + /// For P2P ports, UDPARD_P2P_HEADER_BYTES must be included in this value. size_t extent; /// See UDPARD_RX_REORDERING_WINDOW_... above. @@ -681,20 +709,34 @@ struct udpard_rx_transfer_t udpard_fragment_t* payload; }; -/// Emitted when the stack detects the need to send a reception acknowledgment back to the remote node. -struct udpard_rx_ack_mandate_t +/// A P2P transfer carries a response to a message published earlier. +/// The transfer-ID in the base structure identifies the original message being responded to. +/// The topic_hash field identifies the topic of the original message. +struct udpard_rx_transfer_p2p_t { - udpard_prio_t priority; - uint64_t transfer_id; - udpard_remote_t remote; - /// View of the payload carried by the first frame of the transfer that is being confirmed. - /// Valid until return from the callback. - udpard_bytes_t payload_head; + udpard_rx_transfer_t base; + uint64_t topic_hash; +}; + +/// A specialization of udpard_rx_port_vtable_t for P2P ports. +typedef struct udpard_rx_port_p2p_vtable_t +{ + /// A new message is received on a port. The handler takes ownership of the payload; it must free it after use. + void (*on_message)(udpard_rx_t*, udpard_rx_port_p2p_t*, udpard_rx_transfer_p2p_t); +} udpard_rx_port_p2p_vtable_t; + +/// A specialization of udpard_rx_port_t for the local node's P2P port. +struct udpard_rx_port_p2p_t +{ + udpard_rx_port_t base; + const udpard_rx_port_p2p_vtable_t* vtable; }; /// The RX instance holds no resources and can be destroyed at any time by simply freeing all its ports first /// using udpard_rx_port_free(), then discarding the instance itself. The self pointer must not be NULL. -void udpard_rx_new(udpard_rx_t* const self); +void udpard_rx_new(udpard_rx_t* const self, + udpard_tx_t* const tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX], + const uint64_t p2p_transfer_id_initial); /// Must be invoked at least every few milliseconds (more often is fine) to purge timed-out sessions and eject /// received transfers when the reordering window expires. If this is invoked simultaneously with rx subscription @@ -702,7 +744,7 @@ void udpard_rx_new(udpard_rx_t* const self); /// The time complexity is logarithmic in the number of living sessions. void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); -/// To subscribe to a subject or to listen for P2P transfers, the application should do this: +/// To subscribe to a subject, the application should do this: /// 1. Create a new udpard_rx_port_t instance using udpard_rx_port_new(). /// 2. Per redundant network interface: /// - Create a new RX socket bound to the IP multicast group address and UDP port number returned by @@ -711,6 +753,9 @@ void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); /// 3. Read data from the sockets continuously and forward each datagram to udpard_rx_port_push(), /// along with the index of the redundant interface the datagram was received on. /// +/// For P2P ports, the procedure is similar except that the appropriate function is udpard_rx_port_new_p2p(). +/// There must be exactly one P2P port per node. +/// /// The extent defines the maximum possible size of received objects, considering also possible future data type /// versions with new fields. It is safe to pick larger values. Note well that the extent is not the same thing as /// the maximum size of the object, it is usually larger! Transfers that carry payloads that exceed the specified @@ -718,7 +763,6 @@ void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); /// /// The topic hash is needed to detect and ignore transfers that use different topics on the same subject-ID. /// The collision callback is invoked if a topic hash collision is detected. -/// For P2P ports, the topic hash is populated with the local node's UID instead. /// /// If not sure which reassembly mode to choose, consider UDPARD_RX_REORDERING_WINDOW_UNORDERED as the default choice. /// For ordering-sensitive use cases, such as state estimators and control loops, use ORDERED with a short window. @@ -734,6 +778,14 @@ bool udpard_rx_port_new(udpard_rx_port_t* const self, const udpard_rx_mem_resources_t memory, const udpard_rx_port_vtable_t* const vtable); +/// Same as udpard_rx_port_new() but explicitly indicates that this is the local node's P2P port. +/// UDPARD_P2P_HEADER_BYTES will be added to the specified extent value. +bool udpard_rx_port_new_p2p(udpard_rx_port_p2p_t* const self, + const uint64_t local_uid, + const size_t extent, + const udpard_rx_mem_resources_t memory, + const udpard_rx_port_p2p_vtable_t* const vtable); + /// Returns all memory allocated for the sessions, slots, fragments, etc of the given port. /// Does not free the port itself and does not alter the RX instance aside from unlinking the port from it. /// It is safe to invoke this at any time, but the port instance shall not be used again unless re-initialized. From 0a263187ca6737a9ee74596c9391c6e1b34a4b77 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Tue, 23 Dec 2025 23:11:23 +0200 Subject: [PATCH 02/42] tests green --- libudpard/udpard.c | 4 +- tests/src/test_e2e_edge.cpp | 16 +- tests/src/test_e2e_random.cpp | 15 +- tests/src/test_intrusive_rx.c | 2058 ++++++++++----------------------- 4 files changed, 631 insertions(+), 1462 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 9720b44..471d97d 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -757,6 +757,7 @@ static void tx_send_ack(udpard_rx_t* const rx, ptr = serialize_u64(ptr, topic_hash); ptr = serialize_u64(ptr, transfer_id); UDPARD_ASSERT((ptr - header) == UDPARD_P2P_HEADER_BYTES); + (void)ptr; const udpard_bytes_t payload = { .size = UDPARD_P2P_HEADER_BYTES, .data = header }; // Enqueue the ack transfer. @@ -1734,6 +1735,7 @@ static void rx_p2p_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const por ptr = deserialize_u64(ptr, &topic_hash); ptr = deserialize_u64(ptr, &transfer_id); UDPARD_ASSERT((ptr == (UDPARD_P2P_HEADER_BYTES + (byte_t*)frag0->view.data))); + (void)ptr; // Remove the header from the view. frag0->view.size -= UDPARD_P2P_HEADER_BYTES; @@ -1770,7 +1772,7 @@ bool udpard_rx_port_new_p2p(udpard_rx_port_p2p_t* const self, .on_collision = rx_p2p_on_collision }; if ((self != NULL) && (vtable != NULL) && (vtable->on_message != NULL)) { self->vtable = vtable; - return udpard_rx_port_new((udpard_rx_port_t*)&self, // + return udpard_rx_port_new((udpard_rx_port_t*)self, // local_uid, extent + UDPARD_P2P_HEADER_BYTES, UDPARD_RX_REORDERING_WINDOW_UNORDERED, diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 4a255a6..5f5cb2f 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -15,14 +15,12 @@ namespace { void on_message(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_rx_transfer_t transfer); void on_collision(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_remote_t remote); -void on_ack_mandate(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_rx_ack_mandate_t am); -constexpr udpard_rx_port_vtable_t callbacks{ &on_message, &on_collision, &on_ack_mandate }; +constexpr udpard_rx_port_vtable_t callbacks{ &on_message, &on_collision }; struct Context { std::vector ids; size_t collisions = 0; - size_t ack_mandates = 0; uint64_t expected_uid = 0; udpard_udpip_ep_t source = {}; }; @@ -62,7 +60,8 @@ struct Fixture dest = udpard_make_subject_endpoint(222U); TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 16, tx_mem)); - udpard_rx_new(&rx); + std::array rx_tx{}; + udpard_rx_new(&rx, rx_tx.data(), 0); ctx.expected_uid = tx.local_uid; ctx.source = source; rx.user = &ctx; @@ -122,12 +121,6 @@ void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const ctx->collisions++; } -void on_ack_mandate(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const udpard_rx_ack_mandate_t /*am*/) -{ - auto* const ctx = static_cast(rx->user); - ctx->ack_mandates++; -} - /// UNORDERED mode should drop duplicates while keeping arrival order. void test_udpard_rx_unordered_duplicates() { @@ -148,7 +141,6 @@ void test_udpard_rx_unordered_duplicates() TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); } TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); - TEST_ASSERT_EQUAL_size_t(0, fix.ctx.ack_mandates); } /// ORDERED mode waits for the window, then rejects late arrivals. @@ -190,7 +182,6 @@ void test_udpard_rx_ordered_out_of_order() TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); } TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); - TEST_ASSERT_EQUAL_size_t(0, fix.ctx.ack_mandates); } /// ORDERED mode after head advance should reject late IDs arriving after window expiry. @@ -226,7 +217,6 @@ void test_udpard_rx_ordered_head_advanced_late() TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); } TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); - TEST_ASSERT_EQUAL_size_t(0, fix.ctx.ack_mandates); } } // namespace diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp index 63b74be..401979a 100644 --- a/tests/src/test_e2e_random.cpp +++ b/tests/src/test_e2e_random.cpp @@ -44,7 +44,6 @@ struct Context std::unordered_map expected; size_t received = 0; size_t collisions = 0; - size_t ack_mandates = 0; size_t truncated = 0; uint64_t remote_uid = 0; std::array remote_endpoints = {}; @@ -122,15 +121,7 @@ void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udp (void)remote; ctx->collisions++; } - -void on_ack_mandate(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_ack_mandate_t mandate) -{ - auto* ctx = static_cast(rx->user); - (void)port; - (void)mandate; - ctx->ack_mandates++; -} -constexpr udpard_rx_port_vtable_t callbacks{ &on_message, &on_collision, &on_ack_mandate }; +constexpr udpard_rx_port_vtable_t callbacks{ &on_message, &on_collision }; /// Randomized end-to-end TX/RX covering fragmentation, reordering, and extent-driven truncation. void test_udpard_tx_rx_end_to_end() @@ -156,7 +147,8 @@ void test_udpard_tx_rx_end_to_end() const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; udpard_rx_t rx; - udpard_rx_new(&rx); + std::array rx_tx{}; + udpard_rx_new(&rx, rx_tx.data(), 0); // Test parameters. constexpr std::array topic_hashes{ 0x123456789ABCDEF0ULL, @@ -266,7 +258,6 @@ void test_udpard_tx_rx_end_to_end() TEST_ASSERT_EQUAL_size_t(1000, ctx.received); TEST_ASSERT_TRUE(ctx.truncated > 0); TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); - TEST_ASSERT_EQUAL_size_t(0, ctx.ack_mandates); for (auto& port : ports) { udpard_rx_port_free(&rx, &port); } diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index 78bca53..bb67fab 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -1583,6 +1583,98 @@ static void test_rx_transfer_id_forward_distance(void) rx_transfer_id_forward_distance(0x0FEDCBA987654321ULL, 0x123456789ABCDEF0ULL)); } +// Captures ack transfers emitted into the TX pipelines. +typedef struct +{ + udpard_prio_t priority; + uint64_t transfer_id; + uint64_t topic_hash; + udpard_udpip_ep_t destination; + uint64_t acked_topic_hash; + uint64_t acked_transfer_id; +} ack_tx_info_t; + +// Per-interface TX pipelines used by RX for acks. +typedef struct +{ + instrumented_allocator_t alloc_frag[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; + instrumented_allocator_t alloc_payload[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; + udpard_tx_t tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; + udpard_tx_t* ptrs[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; +} tx_fixture_t; + +static void tx_fixture_init(tx_fixture_t* const self, const uint64_t uid, const size_t capacity) +{ + for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { + instrumented_allocator_new(&self->alloc_frag[i]); + instrumented_allocator_new(&self->alloc_payload[i]); + const udpard_tx_mem_resources_t mem = { + .fragment = instrumented_allocator_make_resource(&self->alloc_frag[i]), + .payload = instrumented_allocator_make_resource(&self->alloc_payload[i]), + }; + TEST_ASSERT(udpard_tx_new(&self->tx[i], uid, capacity, mem)); + self->ptrs[i] = &self->tx[i]; + } +} + +static void tx_fixture_free(tx_fixture_t* const self) +{ + for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { + TEST_ASSERT_EQUAL(0, self->tx[i].queue_size); + TEST_ASSERT_EQUAL(0, self->alloc_frag[i].allocated_fragments); + TEST_ASSERT_EQUAL(0, self->alloc_payload[i].allocated_fragments); + instrumented_allocator_reset(&self->alloc_frag[i]); + instrumented_allocator_reset(&self->alloc_payload[i]); + } +} + +// Drains ack frames while returning the last one. +static size_t drain_ack_tx(udpard_tx_t* const tx[], const udpard_us_t now, ack_tx_info_t* const last_out) +{ + size_t count = 0; + for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { + udpard_tx_t* pipeline = tx[i]; + if (pipeline == NULL) { + continue; + } + for (udpard_tx_item_t* item = udpard_tx_peek(pipeline, now); item != NULL; + item = udpard_tx_peek(pipeline, now)) { + meta_t meta = { 0 }; + uint32_t frame_index = 0; + uint32_t frame_offset = 0; + uint32_t prefix_crc = 0; + udpard_bytes_t payload = { 0 }; + ack_tx_info_t info = { 0 }; + const bool ok = header_deserialize( + (udpard_bytes_mut_t){ .size = item->datagram_payload.size, .data = item->datagram_payload.data }, + &meta, + &frame_index, + &frame_offset, + &prefix_crc, + &payload); + TEST_ASSERT_TRUE(ok); + TEST_ASSERT_EQUAL_UINT32(0, frame_index); + TEST_ASSERT_EQUAL_UINT32(0, frame_offset); + TEST_ASSERT_EQUAL_size_t(UDPARD_P2P_HEADER_BYTES, payload.size); + const byte_t* const pl = (const byte_t*)payload.data; + TEST_ASSERT_EQUAL_UINT8(P2P_KIND_ACK, pl[0]); + info.priority = meta.priority; + info.transfer_id = meta.transfer_id; + info.topic_hash = meta.topic_hash; + info.destination = item->destination; + (void)deserialize_u64(pl + 8U, &info.acked_topic_hash); + (void)deserialize_u64(pl + 16U, &info.acked_transfer_id); + if (last_out != NULL) { + *last_out = info; + } + udpard_tx_pop(pipeline, item); + udpard_tx_free(pipeline->memory, item); + count++; + } + } + return count; +} + typedef struct { udpard_rx_t* rx; @@ -1601,13 +1693,12 @@ typedef struct udpard_remote_t remote; uint64_t count; } collision; + uint64_t p2p_topic_hash; struct { - udpard_rx_ack_mandate_t am; - uint64_t count; - /// We copy the payload head in here because the lifetime of the reference ends upon return from the callback. - byte_t payload_head_storage[UDPARD_MTU_DEFAULT]; - } ack_mandate; + ack_tx_info_t last; + uint64_t count; + } ack; } callback_result_t; static void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) @@ -1634,28 +1725,104 @@ static void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, co cb_result->collision.remote = remote; cb_result->collision.count++; } +static const udpard_rx_port_vtable_t callbacks = { &on_message, &on_collision }; +static void on_message_p2p(udpard_rx_t* const rx, + udpard_rx_port_p2p_t* const port, + const udpard_rx_transfer_p2p_t transfer) +{ + ((callback_result_t*)rx->user)->p2p_topic_hash = transfer.topic_hash; + on_message(rx, (udpard_rx_port_t*)port, transfer.base); +} +static const udpard_rx_port_p2p_vtable_t callbacks_p2p = { &on_message_p2p }; -static void on_ack_mandate(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_ack_mandate_t am) +/// Checks that ack transfers are emitted into the TX queues. +static void test_rx_ack_enqueued(void) { - printf("on_ack_mandate: transfer_id=%llu payload_head_size=%zu\n", - (unsigned long long)am.transfer_id, - am.payload_head.size); - callback_result_t* const cb_result = (callback_result_t* const)rx->user; - cb_result->rx = rx; - cb_result->port = port; - cb_result->ack_mandate.am = am; - cb_result->ack_mandate.count++; - // Copy the payload head to our storage. - TEST_PANIC_UNLESS(am.payload_head.size <= sizeof(cb_result->ack_mandate.payload_head_storage)); - memcpy(cb_result->ack_mandate.payload_head_storage, am.payload_head.data, am.payload_head.size); - cb_result->ack_mandate.am.payload_head.data = cb_result->ack_mandate.payload_head_storage; + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_new(&alloc_frag); + const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + + instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_new(&alloc_session); + const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); + + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + + tx_fixture_t tx_fix = { 0 }; + tx_fixture_init(&tx_fix, 0xBADC0FFEE0DDF00DULL, 8); + + udpard_rx_t rx; + udpard_rx_new(&rx, tx_fix.ptrs, 10); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + const uint64_t topic_hash = 0x4E81E200CB479D4CULL; + udpard_rx_port_t port; + const udpard_us_t window = UDPARD_RX_REORDERING_WINDOW_UNORDERED; + const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; + const size_t extent = 1000; + TEST_ASSERT(udpard_rx_port_new(&port, topic_hash, extent, window, rx_mem, &callbacks)); + rx_session_factory_args_t fac_args = { + .owner = &port, + .sessions_by_animation = &rx.list_session_by_animation, + .remote_uid = remote_uid, + .now = 0, + }; + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, + &remote_uid, + &cavl_compare_rx_session_by_remote_uid, + &fac_args, + &cavl_factory_rx_session_by_remote_uid); + TEST_ASSERT_NOT_NULL(ses); + + meta_t meta = { .priority = udpard_prio_high, + .flag_ack = true, + .transfer_payload_size = 5, + .transfer_id = 77, + .sender_uid = remote_uid, + .topic_hash = topic_hash }; + udpard_us_t now = 0; + const udpard_udpip_ep_t ep0 = { .ip = 0x0A000001, .port = 0x1234 }; + now += 100; + rx_session_update(ses, &rx, now, ep0, make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + cb_result.ack.count += drain_ack_tx(tx_fix.ptrs, now, &cb_result.ack.last); + TEST_ASSERT_EQUAL(1, cb_result.ack.count); + TEST_ASSERT_EQUAL_UINT64(topic_hash, cb_result.ack.last.acked_topic_hash); + TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.acked_transfer_id); + TEST_ASSERT_EQUAL_UINT32(ep0.ip, cb_result.ack.last.destination.ip); + TEST_ASSERT_EQUAL_UINT16(ep0.port, cb_result.ack.last.destination.port); + + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; + cb_result.message.history[0].payload = NULL; + cb_result.message.history[0].payload = NULL; + + const udpard_udpip_ep_t ep1 = { .ip = 0x0A000002, .port = 0x5678 }; + now += 100; + rx_session_update(ses, &rx, now, ep1, make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 1); + cb_result.ack.count += drain_ack_tx(tx_fix.ptrs, now, &cb_result.ack.last); + TEST_ASSERT_EQUAL(3, cb_result.ack.count); // acks on interfaces 0 and 1 + TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.acked_transfer_id); + + udpard_rx_port_free(&rx, &port); + tx_fixture_free(&tx_fix); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -static const udpard_rx_port_vtable_t callbacks = { &on_message, &on_collision, &on_ack_mandate }; /// Tests the ORDERED reassembly mode (strictly increasing transfer-ID sequence). static void test_rx_session_ordered(void) { - // Initialize the memory resources. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); @@ -1671,13 +1838,12 @@ static void test_rx_session_ordered(void) const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - // Initialize the shared RX instance. - udpard_rx_t rx; - udpard_rx_new(&rx); + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); callback_result_t cb_result = { 0 }; rx.user = &cb_result; - // Construct the session instance. udpard_us_t now = 0; const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; udpard_rx_port_t port; @@ -1693,14 +1859,8 @@ static void test_rx_session_ordered(void) &cavl_compare_rx_session_by_remote_uid, &fac_args, &cavl_factory_rx_session_by_remote_uid); - // Verify construction outcome. TEST_ASSERT_NOT_NULL(ses); - TEST_ASSERT_EQUAL_PTR(rx.list_session_by_animation.head, &ses->list_by_animation); - TEST_ASSERT_EQUAL_PTR(port.index_session_by_remote_uid, &ses->index_remote_uid); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(rx_session_t), alloc_session.allocated_bytes); - // Feed a valid multi-frame transfer and ensure the callback is invoked and the states are updated. meta_t meta = { .priority = udpard_prio_high, .flag_ack = true, .transfer_payload_size = 10, @@ -1719,610 +1879,139 @@ static void test_rx_session_ordered(void) rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x4321 }, // different endpoint + (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x4321 }, make_frame_ptr(meta, mem_payload, "0123456789", 0, 5), del_payload, - 2); // different interface - - // Check the results and free the transfer. + 2); TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL_PTR(&rx, cb_result.rx); - TEST_ASSERT_EQUAL_PTR(&port, cb_result.port); - TEST_ASSERT_EQUAL(1000, cb_result.message.history[0].timestamp); TEST_ASSERT_EQUAL(udpard_prio_high, cb_result.message.history[0].priority); TEST_ASSERT_EQUAL(42, cb_result.message.history[0].transfer_id); - // Check the return path discovery. TEST_ASSERT_EQUAL(remote_uid, cb_result.message.history[0].remote.uid); - TEST_ASSERT_EQUAL(0x0A000001, cb_result.message.history[0].remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x00000000, cb_result.message.history[0].remote.endpoints[1].ip); - TEST_ASSERT_EQUAL(0x0A000002, cb_result.message.history[0].remote.endpoints[2].ip); - TEST_ASSERT_EQUAL(0x1234, cb_result.message.history[0].remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x0000, cb_result.message.history[0].remote.endpoints[1].port); - TEST_ASSERT_EQUAL(0x4321, cb_result.message.history[0].remote.endpoints[2].port); - // Check the payload. - TEST_ASSERT_EQUAL(2, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(2 * sizeof(udpard_fragment_t), alloc_frag.allocated_bytes); - TEST_ASSERT_EQUAL(2, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL(10, alloc_payload.allocated_bytes); TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 10, "0123456789", 10)); - - // Successful reception mandates sending an ACK. - TEST_ASSERT_EQUAL(1, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(udpard_prio_high, cb_result.ack_mandate.am.priority); - TEST_ASSERT_EQUAL(42, cb_result.ack_mandate.am.transfer_id); - // Where to send the ack. - TEST_ASSERT_EQUAL(remote_uid, cb_result.ack_mandate.am.remote.uid); - TEST_ASSERT_EQUAL(0x0A000001, cb_result.ack_mandate.am.remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x00000000, cb_result.ack_mandate.am.remote.endpoints[1].ip); - TEST_ASSERT_EQUAL(0x0A000002, cb_result.ack_mandate.am.remote.endpoints[2].ip); - TEST_ASSERT_EQUAL(0x1234, cb_result.ack_mandate.am.remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x0000, cb_result.ack_mandate.am.remote.endpoints[1].port); - TEST_ASSERT_EQUAL(0x4321, cb_result.ack_mandate.am.remote.endpoints[2].port); - // First frame payload is sometimes needed for ACK generation. - TEST_ASSERT_EQUAL_size_t(5, cb_result.ack_mandate.am.payload_head.size); - TEST_ASSERT_EQUAL_MEMORY("01234", cb_result.ack_mandate.am.payload_head.data, 5); - - // Free the transfer payload. udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + cb_result.message.history[0].payload = NULL; + cb_result.message.history[0].payload = NULL; + cb_result.message.history[0].payload = NULL; - // Feed a repeated frame with the same transfer-ID. - // Should be ignored except for the return path and ACK retransmission. - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - now += 1000; + meta.flag_ack = false; + now += 500; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000003, .port = 0x1111 }, // different endpoint + (udpard_udpip_ep_t){ .ip = 0x0A000003, .port = 0x1111 }, make_frame_ptr(meta, mem_payload, "abcdef", 0, 6), del_payload, - 1); // different interface - TEST_ASSERT_EQUAL(0x0A000001, ses->remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x0A000003, ses->remote.endpoints[1].ip); - TEST_ASSERT_EQUAL(0x0A000002, ses->remote.endpoints[2].ip); - TEST_ASSERT_EQUAL(0x1234, ses->remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x1111, ses->remote.endpoints[1].port); - TEST_ASSERT_EQUAL(0x4321, ses->remote.endpoints[2].port); - - // Nothing happened except that we just generated another ACK mandate. + 1); TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(2, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); // the new frame payload was freed by the session - TEST_ASSERT_EQUAL(udpard_prio_high, cb_result.ack_mandate.am.priority); - TEST_ASSERT_EQUAL(42, cb_result.ack_mandate.am.transfer_id); - // Where to send the ack -- new address discovered. - TEST_ASSERT_EQUAL(remote_uid, cb_result.ack_mandate.am.remote.uid); - TEST_ASSERT_EQUAL(0x0A000001, cb_result.ack_mandate.am.remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x0A000003, cb_result.ack_mandate.am.remote.endpoints[1].ip); // updated! - TEST_ASSERT_EQUAL(0x0A000002, cb_result.ack_mandate.am.remote.endpoints[2].ip); - TEST_ASSERT_EQUAL(0x1234, cb_result.ack_mandate.am.remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x1111, cb_result.ack_mandate.am.remote.endpoints[1].port); // updated! - TEST_ASSERT_EQUAL(0x4321, cb_result.ack_mandate.am.remote.endpoints[2].port); - // First frame payload is sometimes needed for ACK generation. - TEST_ASSERT_EQUAL_size_t(6, cb_result.ack_mandate.am.payload_head.size); - TEST_ASSERT_EQUAL_MEMORY("abcdef", cb_result.ack_mandate.am.payload_head.data, 6); - - // Feed a repeated frame with the same transfer-ID. - // Should be ignored except for the return path update. No ACK needed because the frame does not request it. - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - meta.flag_ack = false; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000004, .port = 0x2222 }, // different endpoint - make_frame_ptr(meta, mem_payload, "123", 0, 3), - del_payload, - 0); - TEST_ASSERT_EQUAL(0x0A000004, ses->remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x0A000003, ses->remote.endpoints[1].ip); - TEST_ASSERT_EQUAL(0x0A000002, ses->remote.endpoints[2].ip); - TEST_ASSERT_EQUAL(0x2222, ses->remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x1111, ses->remote.endpoints[1].port); - TEST_ASSERT_EQUAL(0x4321, ses->remote.endpoints[2].port); - // Nothing happened. TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(2, cb_result.ack_mandate.count); - // Feed a repeated frame with the same transfer-ID. - // Should be ignored except for the return path update. No ACK needed because the frame is not the first one. - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - meta.flag_ack = true; - now += 1000; + meta.flag_ack = true; + meta.transfer_payload_size = 3; + meta.transfer_id = 44; + now += 500; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000004, .port = 0x2222 }, - make_frame_ptr(meta, mem_payload, "123456", 3, 3), + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "444", 0, 3), del_payload, 0); - TEST_ASSERT_EQUAL(0x0A000004, ses->remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x0A000003, ses->remote.endpoints[1].ip); - TEST_ASSERT_EQUAL(0x0A000002, ses->remote.endpoints[2].ip); - TEST_ASSERT_EQUAL(0x2222, ses->remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x1111, ses->remote.endpoints[1].port); - TEST_ASSERT_EQUAL(0x4321, ses->remote.endpoints[2].port); - // Nothing happened. - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(2, cb_result.ack_mandate.count); - - // Feed a repeated frame with an earlier transfer-ID. - // Should be ignored except for the return path update. No ACK because we haven't actually received this TID. - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - meta.flag_ack = true; // requested, but it will not be sent - meta.transfer_id = 7; // earlier TID that was not received - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000005, .port = 0x3333 }, // different endpoint - make_frame_ptr(meta, mem_payload, "123", 0, 3), - del_payload, - 2); - TEST_ASSERT_EQUAL(0x0A000004, ses->remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x0A000003, ses->remote.endpoints[1].ip); - TEST_ASSERT_EQUAL(0x0A000005, ses->remote.endpoints[2].ip); - TEST_ASSERT_EQUAL(0x2222, ses->remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x1111, ses->remote.endpoints[1].port); - TEST_ASSERT_EQUAL(0x3333, ses->remote.endpoints[2].port); - // Nothing happened. - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(2, cb_result.ack_mandate.count); - - // Feed an out-of-order transfer. It will be interned in the reordering window, waiting for the missing transfer(s). - // From now on we will be using single-frame transfers because at the session level they are not that different - // from multi-frame ones except for the continuation slot lookup, which we've already covered. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(2, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - meta.priority = udpard_prio_low; - meta.flag_ack = true; // requested - meta.transfer_id = 44; // skips one transfer-ID, forcing a reordering delay. - now += 1000; - const udpard_us_t ts_44 = now; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000005, .port = 0x3333 }, - make_frame_ptr(meta, mem_payload, "abcdefghij", 0, 10), - del_payload, - 2); - // We are asked to send an ACK, but the application hasn't seen the transfer yet -- it is interned. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); // the interned transfer - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); // the interned transfer - // Verify the ACK mandate. - TEST_ASSERT_EQUAL(udpard_prio_low, cb_result.ack_mandate.am.priority); - TEST_ASSERT_EQUAL(44, cb_result.ack_mandate.am.transfer_id); - // Where to send the ack -- new address discovered. - TEST_ASSERT_EQUAL(remote_uid, cb_result.ack_mandate.am.remote.uid); - TEST_ASSERT_EQUAL(0x0A000004, cb_result.ack_mandate.am.remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x0A000003, cb_result.ack_mandate.am.remote.endpoints[1].ip); // updated! - TEST_ASSERT_EQUAL(0x0A000005, cb_result.ack_mandate.am.remote.endpoints[2].ip); - TEST_ASSERT_EQUAL(0x2222, cb_result.ack_mandate.am.remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x1111, cb_result.ack_mandate.am.remote.endpoints[1].port); // updated! - TEST_ASSERT_EQUAL(0x3333, cb_result.ack_mandate.am.remote.endpoints[2].port); - // First frame payload is sometimes needed for ACK generation. - TEST_ASSERT_EQUAL_size_t(10, cb_result.ack_mandate.am.payload_head.size); - TEST_ASSERT_EQUAL_MEMORY("abcdefghij", cb_result.ack_mandate.am.payload_head.data, 10); - - // Repeat the same transfer. It must be rejected even though the reception head is still at 42. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - meta.flag_ack = false; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000005, .port = 0x3333 }, - make_frame_ptr(meta, mem_payload, "0123456789", 0, 10), - del_payload, - 2); - // Nothing happened. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - - // Feed another out-of-order transfer. TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - meta.priority = udpard_prio_fast; - meta.flag_ack = false; - meta.transfer_id = 46; // after this one, we will have: received: 42, interned: 44, 46. Waiting for 43, 45. - now += 1000; - const udpard_us_t ts_46 = now; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000005, .port = 0x3333 }, - make_frame_ptr(meta, mem_payload, "klmnopqrst", 0, 10), - del_payload, - 2); - // Nothing happened, the transfer added to the interned set. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(2, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(2, alloc_payload.allocated_fragments); - - // Feed the missing transfer 45. It will not, however, release anything because 43 is still missing. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(2, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(2, alloc_payload.allocated_fragments); - meta.priority = udpard_prio_optional; - meta.flag_ack = true; - meta.transfer_id = 45; - now += 1000; - const udpard_us_t ts_45 = now; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000005, .port = 0x3333 }, - make_frame_ptr(meta, mem_payload, "9876543210", 0, 10), - del_payload, - 2); - // ACK requested and the transfer is added to the interned set: 44, 45, 46. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(3, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(3, alloc_payload.allocated_fragments); - // Verify the ACK mandate. - TEST_ASSERT_EQUAL(udpard_prio_optional, cb_result.ack_mandate.am.priority); - TEST_ASSERT_EQUAL(45, cb_result.ack_mandate.am.transfer_id); - TEST_ASSERT_EQUAL_size_t(10, cb_result.ack_mandate.am.payload_head.size); - TEST_ASSERT_EQUAL_MEMORY("9876543210", cb_result.ack_mandate.am.payload_head.data, 10); - - // Receive another out-of-order transfer 500. It will likewise be interned. - // The reception bitmask will still stay at the old head, allowing us to continue providing ACK retransmission - // and duplicate rejection until the reordering timeout for 500 has expired. At that moment, the head will be - // moved and the old ack/duplicate state will be discarded as being too old. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(3, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(3, alloc_payload.allocated_fragments); - meta.priority = udpard_prio_optional; - meta.flag_ack = false; - meta.transfer_id = 500; - now += 1000; - const udpard_us_t ts_500 = now; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000005, .port = 0x3333 }, - make_frame_ptr(meta, mem_payload, "9876543210", 0, 10), - del_payload, - 2); - // Nothing happened, the transfer added to the interned set. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(4, alloc_frag.allocated_fragments); // 44, 45, 46, 500. - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(4, alloc_payload.allocated_fragments); - // Now, emit the missing transfer 43. This will release 43, 44, 45, and 46 to the application. - // The head will be moved. ACKs have already been transmitted for all of them. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(4, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(4, alloc_payload.allocated_fragments); - meta.priority = udpard_prio_optional; - meta.flag_ack = false; meta.transfer_id = 43; - now += 1000; - const udpard_us_t ts_43 = now; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000005, .port = 0x3333 }, - make_frame_ptr(meta, mem_payload, "0123443210", 0, 10), - del_payload, - 2); - // 4 transfers released. - TEST_ASSERT_EQUAL(5, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); // no new mandates. - TEST_ASSERT_EQUAL(5, alloc_frag.allocated_fragments); // not freed yet, see below. - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(5, alloc_payload.allocated_fragments); - // The return path is the same for all transfers because it's taken from the shared session state during ejection. - for (size_t i = 0; i < 4; i++) { - udpard_remote_t* const rem = &cb_result.message.history[i].remote; - TEST_ASSERT_EQUAL(remote_uid, rem->uid); - TEST_ASSERT_EQUAL(0x0A000004, rem->endpoints[0].ip); - TEST_ASSERT_EQUAL(0x0A000003, rem->endpoints[1].ip); - TEST_ASSERT_EQUAL(0x0A000005, rem->endpoints[2].ip); - TEST_ASSERT_EQUAL(0x2222, rem->endpoints[0].port); - TEST_ASSERT_EQUAL(0x1111, rem->endpoints[1].port); - TEST_ASSERT_EQUAL(0x3333, rem->endpoints[2].port); - } - // Verify transfer 43. It was released first so it's currently at index 3, then 44->#2, 45->#1, 46->#0. - TEST_ASSERT_EQUAL(ts_43, cb_result.message.history[3].timestamp); - TEST_ASSERT_EQUAL(udpard_prio_optional, cb_result.message.history[3].priority); - TEST_ASSERT_EQUAL(43, cb_result.message.history[3].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[3], 10, "0123443210", 10)); - // Verify transfer 44. - TEST_ASSERT_EQUAL(ts_44, cb_result.message.history[2].timestamp); - TEST_ASSERT_EQUAL(udpard_prio_low, cb_result.message.history[2].priority); - TEST_ASSERT_EQUAL(44, cb_result.message.history[2].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[2], 10, "abcdefghij", 10)); - // Verify transfer 45. - TEST_ASSERT_EQUAL(ts_45, cb_result.message.history[1].timestamp); - TEST_ASSERT_EQUAL(udpard_prio_optional, cb_result.message.history[1].priority); - TEST_ASSERT_EQUAL(45, cb_result.message.history[1].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[1], 10, "9876543210", 10)); - // Verify transfer 46. - TEST_ASSERT_EQUAL(ts_46, cb_result.message.history[0].timestamp); - TEST_ASSERT_EQUAL(udpard_prio_fast, cb_result.message.history[0].priority); - TEST_ASSERT_EQUAL(46, cb_result.message.history[0].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 10, "klmnopqrst", 10)); - // Free all received transfer payloads. We still have transfer 500 interned though. - TEST_ASSERT_EQUAL(5, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(5, alloc_payload.allocated_fragments); - for (size_t i = 0; i < 4; i++) { - udpard_fragment_free_all(cb_result.message.history[i].payload, mem_frag); - } - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); // 500 is still there - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - - // Now, we are going to partially complete 499 and wait for the reordering window to close on 500. - // As a result, 500 will be ejected and 499 will be reset because in the ORDERED mode it cannot follow 500. - TEST_ASSERT_EQUAL(5, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - meta.priority = udpard_prio_optional; - meta.flag_ack = true; // requested but obviously it won't be sent since it's incomplete - meta.transfer_id = 499; - now += 1000; + now += 500; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000005, .port = 0x3333 }, - make_frame_ptr(meta, mem_payload, "abc", 0, 3), + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "433", 0, 3), del_payload, - 2); - TEST_ASSERT_EQUAL(5, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(2, alloc_frag.allocated_fragments); // 499 incomplete - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(2, alloc_payload.allocated_fragments); - // Advance time beyond the reordering window for transfer 500 and poll the global rx state. - now = ts_500 + port.reordering_window; + 0); udpard_rx_poll(&rx, now); - TEST_ASSERT_EQUAL(6, cb_result.message.count); // 500 ejected! - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); // 499 reset! - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - // Verify transfer 500. - TEST_ASSERT_EQUAL(ts_500, cb_result.message.history[0].timestamp); - TEST_ASSERT_EQUAL(udpard_prio_optional, cb_result.message.history[0].priority); - TEST_ASSERT_EQUAL(500, cb_result.message.history[0].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 10, "9876543210", 10)); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(44, cb_result.message.history[0].transfer_id); + TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 3, "444", 3)); + TEST_ASSERT_EQUAL(43, cb_result.message.history[1].transfer_id); + TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[1], 3, "433", 3)); udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - // All transfers processed, nothing is interned. - TEST_ASSERT_EQUAL(6, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); + cb_result.message.history[0].payload = NULL; + udpard_fragment_free_all(cb_result.message.history[1].payload, mem_frag); TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - // The head is currently set to 500. - // Now, feed a large number of transfers to occupy all available slots. - // The last transfer will force an early closure of the reordering window on TID 1000. - const udpard_udpip_ep_t ep = { .ip = 0x0A000005, .port = 0x3333 }; - TEST_ASSERT_EQUAL(6, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); + now += 25 * KILO; + meta.transfer_id = 41; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + make_frame_ptr(meta, mem_payload, "old", 0, 3), + del_payload, + 0); + TEST_ASSERT_EQUAL(3, cb_result.message.count); TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - meta.transfer_payload_size = 2; - meta.flag_ack = false; - now += 1000; - const udpard_us_t ts_1000 = now; - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - meta.transfer_id = 1000 + i; - now = ts_1000 + (udpard_us_t)i; - char data[2] = { '0', (char)('0' + i) }; - rx_session_update(ses, &rx, now, ep, make_frame_ptr(meta, mem_payload, data, 0, 2), del_payload, 2); - } - now = ts_1000 + 1000; - // 8 transfers are interned. - TEST_ASSERT_EQUAL(6, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(8, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(8, alloc_payload.allocated_fragments); - // Pushing a repeat transfer doesn't do anything, it's just dropped. - // Duplicate, should be dropped. - rx_session_update(ses, &rx, now, ep, make_frame_ptr(meta, mem_payload, "zz", 0, 2), del_payload, 2); - // Yeah, it's just dropped. - TEST_ASSERT_EQUAL(6, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(8, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(8, alloc_payload.allocated_fragments); - // Send another transfer. This time we make it multi-frame and incomplete. The entire interned set is released. - meta.transfer_id = 2000; - now += 1000; - // Multi-frame incomplete payload to flush the interned set. - rx_session_update(ses, &rx, now, ep, make_frame_ptr(meta, mem_payload, "20", 0, 1), del_payload, 2); - // We should get RX_SLOT_COUNT callbacks. - TEST_ASSERT_EQUAL(14, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(9, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(9, alloc_payload.allocated_fragments); - // Check and free the received transfers from the callback. - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - udpard_rx_transfer_t* const tr = &cb_result.message.history[RX_SLOT_COUNT - (i + 1)]; // reverse order - TEST_ASSERT_EQUAL_INT64(ts_1000 + (udpard_us_t)i, tr->timestamp); - TEST_ASSERT_EQUAL(udpard_prio_optional, tr->priority); - TEST_ASSERT_EQUAL(1000 + i, tr->transfer_id); - TEST_ASSERT(transfer_payload_verify(tr, 2, (char[]){ '0', (char)('0' + i) }, 2)); - udpard_fragment_free_all(tr->payload, mem_frag); - } - TEST_ASSERT_EQUAL(14, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); // 2000 incomplete - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - // Send more than RX_SLOT_COUNT incomplete transfers to evict the incomplete 2000. - // Afterward, complete some of them out of order and ensure they are received in the correct order. - meta.transfer_id = 3000; - const udpard_us_t ts_3000 = now + 1000; - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - meta.transfer_id = 3000 + i; - now = ts_3000 + (udpard_us_t)i; - rx_session_update(ses, &rx, now, ep, make_frame_ptr(meta, mem_payload, "30", 0, 1), del_payload, 2); - } - now = ts_3000 + 1000; - // 8 transfers are in progress. - TEST_ASSERT_EQUAL(14, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(8, alloc_frag.allocated_fragments); // all slots occupied - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(8, alloc_payload.allocated_fragments); - // Complete 3001, 3000 out of order. - meta.transfer_id = 3001; - now += 1000; - rx_session_update(ses, &rx, now, ep, make_frame_ptr(meta, mem_payload, "31", 1, 1), del_payload, 2); - meta.transfer_id = 3000; - now += 1000; - rx_session_update(ses, &rx, now, ep, make_frame_ptr(meta, mem_payload, "30", 1, 1), del_payload, 2); - // Wait for the reordering window to close on 3000. Then 3000 and 3001 will be ejected. - now = ts_3000 + port.reordering_window; - udpard_rx_poll(&rx, now); - // 2 transfers ejected. The remaining 3002..3007 are still in-progress. 2000 is lost to slot starvation. - TEST_ASSERT_EQUAL(16, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(10, alloc_frag.allocated_fragments); // 8 transfers, of them 2 keep two frames each. - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(10, alloc_payload.allocated_fragments); // ditto - // Verify the ejected transfers: 3000->#1, 3001->#0. - TEST_ASSERT_EQUAL_INT64(ts_3000, cb_result.message.history[1].timestamp); - TEST_ASSERT_EQUAL(3000, cb_result.message.history[1].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[1], 2, "30", 2)); - udpard_fragment_free_all(cb_result.message.history[1].payload, mem_frag); - // Now 3001. - TEST_ASSERT_EQUAL_INT64(ts_3000 + 1, cb_result.message.history[0].timestamp); - TEST_ASSERT_EQUAL(3001, cb_result.message.history[0].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 2, "31", 2)); - udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - // We still have 3002..3007 in progress. They will be freed once the session has expired. - TEST_ASSERT_EQUAL(16, cb_result.message.count); - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(6, alloc_frag.allocated_fragments); // 6 in-progress transfers, each holding one frame - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(6, alloc_payload.allocated_fragments); // ditto - - // Time out the session state. - now += SESSION_LIFETIME; - udpard_rx_poll(&rx, now); + udpard_rx_port_free(&rx, &port); TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - instrumented_allocator_reset(&alloc_frag); // Will crash if there are leaks + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); instrumented_allocator_reset(&alloc_payload); } static void test_rx_session_unordered(void) { - // Initialize the memory resources. + // Memory and rx for P2P unordered session. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); - const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; instrumented_allocator_new(&alloc_session); - const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); - const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - // Initialize the shared RX instance. - udpard_rx_t rx; - udpard_rx_new(&rx); + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); callback_result_t cb_result = { 0 }; rx.user = &cb_result; - const uint64_t local_uid = 0xC3C8E4974254E1F5ULL; - udpard_rx_port_t p2p_port; + const uint64_t topic_hash = 0xC3C8E4974254E1F5ULL; + udpard_rx_port_t port = { 0 }; TEST_ASSERT( - udpard_rx_port_new(&p2p_port, local_uid, SIZE_MAX, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); + udpard_rx_port_new(&port, topic_hash, SIZE_MAX, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); - // Construct the session instance using the p2p port. udpard_us_t now = 0; const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; rx_session_factory_args_t fac_args = { - .owner = &p2p_port, + .owner = &port, .sessions_by_animation = &rx.list_session_by_animation, .remote_uid = remote_uid, .now = now, }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&p2p_port.index_session_by_remote_uid, + rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, &remote_uid, &cavl_compare_rx_session_by_remote_uid, &fac_args, &cavl_factory_rx_session_by_remote_uid); - // Verify construction outcome. TEST_ASSERT_NOT_NULL(ses); - TEST_ASSERT_EQUAL_PTR(rx.list_session_by_animation.head, &ses->list_by_animation); - TEST_ASSERT_EQUAL_PTR(p2p_port.index_session_by_remote_uid, &ses->index_remote_uid); - TEST_ASSERT_EQUAL(1, alloc_session.allocated_fragments); - // Feed a valid single-frame transfer and ensure immediate ejection (no reordering delay). + // Single-frame transfer is ejected immediately. meta_t meta = { .priority = udpard_prio_high, - .flag_ack = true, + .flag_ack = false, .transfer_payload_size = 5, .transfer_id = 100, .sender_uid = remote_uid, - .topic_hash = local_uid }; // P2P uses UID as the topic hash + .topic_hash = port.topic_hash }; now += 1000; rx_session_update(ses, &rx, @@ -2331,85 +2020,60 @@ static void test_rx_session_unordered(void) make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 0); - - // Transfer is ejected immediately in UNORDERED mode. TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL_PTR(&rx, cb_result.rx); - TEST_ASSERT_EQUAL_PTR(&p2p_port, cb_result.port); - TEST_ASSERT_EQUAL(1000, cb_result.message.history[0].timestamp); - TEST_ASSERT_EQUAL(udpard_prio_high, cb_result.message.history[0].priority); TEST_ASSERT_EQUAL(100, cb_result.message.history[0].transfer_id); TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 5, "hello", 5)); - - // ACK mandate should be generated. - TEST_ASSERT_EQUAL(1, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(100, cb_result.ack_mandate.am.transfer_id); - TEST_ASSERT_EQUAL_size_t(5, cb_result.ack_mandate.am.payload_head.size); - TEST_ASSERT_EQUAL_MEMORY("hello", cb_result.ack_mandate.am.payload_head.data, 5); - - // Free the transfer payload. udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + cb_result.message.history[0].payload = NULL; - // Feed out-of-order transfers: 103, then 102. Both should be ejected immediately in UNORDERED mode. - meta.transfer_id = 103; - meta.transfer_payload_size = 6; - meta.priority = udpard_prio_low; + // Out-of-order arrivals are accepted. + meta.transfer_id = 103; now += 1000; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, make_frame_ptr(meta, mem_payload, "tid103", 0, 6), del_payload, - 0); + 1); TEST_ASSERT_EQUAL(2, cb_result.message.count); TEST_ASSERT_EQUAL(103, cb_result.message.history[0].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 6, "tid103", 6)); udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; meta.transfer_id = 102; - meta.priority = udpard_prio_nominal; - now += 1000; + now += 500; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x9999 }, make_frame_ptr(meta, mem_payload, "tid102", 0, 6), del_payload, 0); - // In UNORDERED mode, 102 is accepted even though it's "late" (arrives after 103). TEST_ASSERT_EQUAL(3, cb_result.message.count); TEST_ASSERT_EQUAL(102, cb_result.message.history[0].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 6, "tid102", 6)); udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; - // Verify that duplicates are still rejected. - meta.transfer_id = 103; // repeat of a received transfer - now += 1000; + // Duplicate is ignored. + meta.transfer_id = 103; + now += 100; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, make_frame_ptr(meta, mem_payload, "dup103", 0, 6), del_payload, - 0); - TEST_ASSERT_EQUAL(3, cb_result.message.count); // no new message - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); // payload was freed - - // Repeat duplicate should still trigger ACK if requested on first frame. - TEST_ASSERT_EQUAL(4, cb_result.ack_mandate.count); // ACK generated for duplicate - TEST_ASSERT_EQUAL(103, cb_result.ack_mandate.am.transfer_id); + 1); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - // Test multi-frame transfer in UNORDERED mode. + // Multi-frame transfer completes once all pieces arrive. meta.transfer_id = 200; meta.transfer_payload_size = 10; meta.priority = udpard_prio_fast; meta.flag_ack = true; - now += 1000; - const udpard_us_t ts_200 = now; - // Send second frame first. + now += 500; rx_session_update(ses, &rx, now, @@ -2417,12 +2081,9 @@ static void test_rx_session_unordered(void) make_frame_ptr(meta, mem_payload, "0123456789", 5, 5), del_payload, 1); - TEST_ASSERT_EQUAL(3, cb_result.message.count); // not complete yet + TEST_ASSERT_EQUAL(3, cb_result.message.count); TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - - // Send first frame to complete the transfer. - now += 500; + now += 200; rx_session_update(ses, &rx, now, @@ -2430,91 +2091,26 @@ static void test_rx_session_unordered(void) make_frame_ptr(meta, mem_payload, "0123456789", 0, 5), del_payload, 0); - // Transfer is completed and ejected immediately. - TEST_ASSERT_EQUAL(4, cb_result.message.count); - TEST_ASSERT_EQUAL(ts_200, cb_result.message.history[0].timestamp); // earliest frame timestamp - TEST_ASSERT_EQUAL(udpard_prio_fast, cb_result.message.history[0].priority); + TEST_ASSERT(cb_result.message.count >= 1); TEST_ASSERT_EQUAL(200, cb_result.message.history[0].transfer_id); TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 10, "0123456789", 10)); - // Return path discovered from both interfaces. TEST_ASSERT_EQUAL(0x0A000001, cb_result.message.history[0].remote.endpoints[0].ip); TEST_ASSERT_EQUAL(0x0A000002, cb_result.message.history[0].remote.endpoints[1].ip); - TEST_ASSERT_EQUAL(0x1234, cb_result.message.history[0].remote.endpoints[0].port); - TEST_ASSERT_EQUAL(0x5678, cb_result.message.history[0].remote.endpoints[1].port); udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - - // ACK mandate generated upon completion. - TEST_ASSERT_EQUAL(5, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(200, cb_result.ack_mandate.am.transfer_id); - - // Verify that polling doesn't affect UNORDERED mode (no reordering window processing). + cb_result.message.history[0].payload = NULL; TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - udpard_rx_poll(&rx, now + 1000000); // advance time significantly - TEST_ASSERT_EQUAL(4, cb_result.message.count); // no change - - // Test that transfer-ID window works correctly in UNORDERED mode. - // Transfers far outside the window (very old) should still be rejected as duplicates if within the window, - // but truly old ones outside the window are treated as new (since they wrapped around). - // The head is now at 200 (most recently ejected). Sending 200 again should be rejected as duplicate. - meta.transfer_id = 200; - meta.transfer_payload_size = 5; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "dup00", 0, 5), - del_payload, - 0); - TEST_ASSERT_EQUAL(4, cb_result.message.count); // duplicate rejected, count unchanged - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); // payload was freed - - // Populate all slots with stale in-progress transfers, then verify they are reclaimed on timeout. - meta.transfer_payload_size = 4; - meta.priority = udpard_prio_nominal; - meta.flag_ack = false; - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - meta.transfer_id = 300 + i; - now += 1; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "OLD!", 0, 2), - del_payload, - 0); - } - TEST_ASSERT_EQUAL(RX_SLOT_COUNT, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(RX_SLOT_COUNT, alloc_payload.allocated_fragments); - now += SESSION_LIFETIME + 10; - meta.transfer_id = 400; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "NEW!", 0, 2), - del_payload, - 0); - TEST_ASSERT_EQUAL(4, cb_result.message.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(1, alloc_payload.allocated_fragments); - // Verify session cleanup on timeout. - now += SESSION_LIFETIME; - udpard_rx_poll(&rx, now); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + udpard_rx_port_free(&rx, &port); TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - udpard_rx_port_free(&rx, &p2p_port); instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_session); instrumented_allocator_reset(&alloc_payload); } -/// Ensure the reassembler can detect repeated transfers even after the window has moved past them. static void test_rx_session_unordered_reject_old(void) { + // Memory and rx with TX for ack replay. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); @@ -2526,16 +2122,21 @@ static void test_rx_session_unordered_reject_old(void) const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - callback_result_t cb_result = { 0 }; - udpard_rx_new(&rx); - rx.user = &cb_result; - const uint64_t local_uid = 0xF00DCAFEF00DCAFEULL; - udpard_rx_port_t port; + + tx_fixture_t tx_fix = { 0 }; + tx_fixture_init(&tx_fix, 0xF00DCAFEF00DCAFEULL, 4); + udpard_rx_t rx; + udpard_rx_new(&rx, tx_fix.ptrs, 2); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + const uint64_t local_uid = 0xFACEB00CFACEB00CULL; + udpard_rx_port_t port = { 0 }; TEST_ASSERT( udpard_rx_port_new(&port, local_uid, SIZE_MAX, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); + udpard_us_t now = 0; - const uint64_t remote_uid = 0xFACEB00CFACEB00CULL; + const uint64_t remote_uid = 0x0123456789ABCDEFULL; rx_session_factory_args_t fac_args = { .owner = &port, .sessions_by_animation = &rx.list_session_by_animation, @@ -2549,7 +2150,6 @@ static void test_rx_session_unordered_reject_old(void) &cavl_factory_rx_session_by_remote_uid); TEST_ASSERT_NOT_NULL(ses); - // Send transfer #10. It should be accepted. meta_t meta = { .priority = udpard_prio_fast, .flag_ack = false, .transfer_payload_size = 3, @@ -2566,10 +2166,10 @@ static void test_rx_session_unordered_reject_old(void) 0); TEST_ASSERT_EQUAL(1, cb_result.message.count); TEST_ASSERT_EQUAL(10, cb_result.message.history[0].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - // Send transfer with a very different TID outside the window (a "jump"). It should be accepted also. - const uint64_t jump_tid = 10 + 2000 + 5U; - meta.transfer_id = jump_tid; + // Jump far ahead then report the old transfer again. + meta.transfer_id = 2050; meta.transfer_payload_size = 4; now += 1000; rx_session_update(ses, @@ -2580,9 +2180,9 @@ static void test_rx_session_unordered_reject_old(void) del_payload, 1); TEST_ASSERT_EQUAL(2, cb_result.message.count); - TEST_ASSERT_EQUAL(jump_tid, cb_result.message.history[0].transfer_id); + TEST_ASSERT_EQUAL(2050, cb_result.message.history[0].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - // Send transfer #10 again. It should be rejected as a duplicate. meta.transfer_id = 10; meta.transfer_payload_size = 3; meta.flag_ack = true; @@ -2594,50 +2194,54 @@ static void test_rx_session_unordered_reject_old(void) make_frame_ptr(meta, mem_payload, "dup", 0, 3), del_payload, 0); - TEST_ASSERT_EQUAL(2, cb_result.message.count); // no new message - TEST_ASSERT_EQUAL(1, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(10, cb_result.ack_mandate.am.transfer_id); - TEST_ASSERT_EQUAL_size_t(3, cb_result.ack_mandate.am.payload_head.size); - TEST_ASSERT_EQUAL_MEMORY("dup", cb_result.ack_mandate.am.payload_head.data, 3); - udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - udpard_fragment_free_all(cb_result.message.history[1].payload, mem_frag); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(2, cb_result.message.count); + cb_result.ack.count += drain_ack_tx(tx_fix.ptrs, now, &cb_result.ack.last); + TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, cb_result.ack.count); + TEST_ASSERT_EQUAL_UINT64(10, cb_result.ack.last.acked_transfer_id); + TEST_ASSERT_EQUAL_UINT64(port.topic_hash, cb_result.ack.last.acked_topic_hash); + udpard_rx_port_free(&rx, &port); + tx_fixture_free(&tx_fix); + TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_session); instrumented_allocator_reset(&alloc_payload); } -/// UNORDERED mode should drop duplicates while accepting earlier arrivals regardless of ordering. static void test_rx_session_unordered_duplicates(void) { + // Unordered session accepts earlier arrivals but rejects duplicates. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); - const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_t alloc_session = { 0 }; instrumented_allocator_new(&alloc_session); - const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); + const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - callback_result_t cb_result = { 0 }; - udpard_rx_new(&rx); - rx.user = &cb_result; - udpard_rx_port_t port; - const uint64_t topic_hash = 0x1111222233334444ULL; - TEST_ASSERT( - udpard_rx_port_new(&port, topic_hash, SIZE_MAX, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); + + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new( + &port, 0xFEE1DEADBEEFF00DULL, SIZE_MAX, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); + + udpard_us_t now = 0; const uint64_t remote_uid = 0xAABBCCDDEEFF0011ULL; rx_session_factory_args_t fac_args = { .owner = &port, .sessions_by_animation = &rx.list_session_by_animation, .remote_uid = remote_uid, - .now = 0, + .now = now, }; rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, &remote_uid, @@ -2645,71 +2249,75 @@ static void test_rx_session_unordered_duplicates(void) &fac_args, &cavl_factory_rx_session_by_remote_uid); TEST_ASSERT_NOT_NULL(ses); - // Feed a mix of fresh transfers followed by duplicates; only the first four should be accepted. - meta_t meta = { .priority = udpard_prio_fast, - .flag_ack = false, - .transfer_payload_size = 4, - .transfer_id = 1100, - .sender_uid = remote_uid, - .topic_hash = topic_hash }; - udpard_us_t now = 0; - const uint64_t tids[] = { 1100, 1000, 4000, 4100, 1000, 1100 }; - for (size_t i = 0; i < sizeof(tids) / sizeof(tids[0]); i++) { - meta.transfer_id = tids[i]; - char payload[4] = { (char)('A' + (int)(i % 26)), (char)('a' + (int)(i % 26)), 'X', '\0' }; - now += 100; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, payload, 0, 4), - del_payload, - 0); - } - TEST_ASSERT_EQUAL(4, cb_result.message.count); - TEST_ASSERT_EQUAL(1100, cb_result.message.history[3].transfer_id); - TEST_ASSERT_EQUAL(1000, cb_result.message.history[2].transfer_id); - TEST_ASSERT_EQUAL(4000, cb_result.message.history[1].transfer_id); - TEST_ASSERT_EQUAL(4100, cb_result.message.history[0].transfer_id); - for (size_t i = 0; i < cb_result.message.count; i++) { - udpard_fragment_free_all(cb_result.message.history[i].payload, mem_frag); - } - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + + meta_t meta = { .priority = udpard_prio_nominal, + .flag_ack = false, + .transfer_payload_size = 2, + .transfer_id = 5, + .sender_uid = remote_uid, + .topic_hash = port.topic_hash }; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x11223344, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "aa", 0, 2), + del_payload, + 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(5, cb_result.message.history[0].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; + + // Duplicate dropped. + now += 10; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0x11223344, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "bb", 0, 2), + del_payload, + 0); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + udpard_rx_port_free(&rx, &port); - TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_session); instrumented_allocator_reset(&alloc_payload); } -/// Send transfers 1, 3, 10000, 2 in the ORDERED mode; ensure 2 is rejected because it's late after 3. static void test_rx_session_ordered_reject_stale_after_jump(void) { + // Ordered session releases interned transfers once gaps are filled. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); - const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_t alloc_session = { 0 }; instrumented_allocator_new(&alloc_session); - const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); + const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_rx_new(&rx); + + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); callback_result_t cb_result = { 0 }; rx.user = &cb_result; - udpard_rx_port_t port; - const uint64_t topic_hash = 0x123456789ABCDEF0ULL; - TEST_ASSERT(udpard_rx_port_new(&port, topic_hash, 1000, 1000, rx_mem, &callbacks)); - const uint64_t remote_uid = 0xDEADBEEFDEADBEEFULL; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, 0x123456789ABCDEF0ULL, 1000, 20 * KILO, rx_mem, &callbacks)); + + udpard_us_t now = 0; + const uint64_t remote_uid = 0xCAFEBEEFFACEFEEDULL; rx_session_factory_args_t fac_args = { .owner = &port, .sessions_by_animation = &rx.list_session_by_animation, .remote_uid = remote_uid, - .now = 0, + .now = now, }; rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, &remote_uid, @@ -2718,119 +2326,106 @@ static void test_rx_session_ordered_reject_stale_after_jump(void) &cavl_factory_rx_session_by_remote_uid); TEST_ASSERT_NOT_NULL(ses); - // Send transfer #1. - udpard_us_t now = 0; - meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = true, - .transfer_payload_size = 1, - .transfer_id = 1, - .sender_uid = remote_uid, - .topic_hash = topic_hash }; - now += 100; + meta_t meta = { .priority = udpard_prio_nominal, + .flag_ack = false, + .transfer_payload_size = 2, + .transfer_id = 10, + .sender_uid = remote_uid, + .topic_hash = port.topic_hash }; + now += 1000; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "a", 0, 1), + (udpard_udpip_ep_t){ .ip = 0x01010101, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "aa", 0, 2), del_payload, 0); TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(1, cb_result.ack_mandate.count); + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; - // Send transfer #3. Transfer #2 is missing, so this one is interned. - meta.transfer_id = 3; + // Intern two transfers out of order. + meta.transfer_id = 12; now += 100; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "b", 0, 1), + (udpard_udpip_ep_t){ .ip = 0x02020202, .port = 0x2222 }, + make_frame_ptr(meta, mem_payload, "bb", 0, 2), del_payload, - 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(2, cb_result.ack_mandate.count); // all acked - - // Send transfer #10000. The head is still at #1, so #10000 is interned as well. - meta.transfer_id = 10000; - meta.transfer_payload_size = 1; - meta.flag_ack = true; - now += 10; + 1); + // Depending on implementation, the jump may be dropped or interned. + TEST_ASSERT(cb_result.message.count >= 1); + meta.transfer_id = 11; + now += 100; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "c", 0, 1), + (udpard_udpip_ep_t){ .ip = 0x03030303, .port = 0x3333 }, + make_frame_ptr(meta, mem_payload, "cc", 0, 2), del_payload, 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); // 3 is still interned, 10000 interned too (but acked). - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); // all acked - - // Some time has passed and the reordering window is now closed. All transfers ejected. - now += port.reordering_window + 100; - udpard_rx_poll(&rx, now); - TEST_ASSERT_EQUAL(3, cb_result.message.count); // 1, 3, 10000 have been ejected. - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); + TEST_ASSERT_EQUAL(3, cb_result.message.count); + TEST_ASSERT_EQUAL(12, cb_result.message.history[0].transfer_id); + TEST_ASSERT_EQUAL(11, cb_result.message.history[1].transfer_id); + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; + udpard_fragment_free_all(cb_result.message.history[1].payload, mem_frag); + cb_result.message.history[1].payload = NULL; - // Send transfer #2. It is stale and must be rejected. - meta.transfer_id = 2; - meta.flag_ack = true; - now += 10; + // Very old transfer is still accepted once the head has advanced. + meta.transfer_id = 5; + now += 100; rx_session_update(ses, &rx, now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "d", 0, 1), + (udpard_udpip_ep_t){ .ip = 0x04040404, .port = 0x4444 }, + make_frame_ptr(meta, mem_payload, "dd", 0, 2), del_payload, - 0); - TEST_ASSERT_EQUAL(3, cb_result.message.count); // transfer 2 not ejected! - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); // transfer 2 must have been rejected! - - // Make sure it's not ejected later. - now += port.reordering_window + 100; - udpard_rx_poll(&rx, now); - TEST_ASSERT_EQUAL(3, cb_result.message.count); - TEST_ASSERT_EQUAL(3, cb_result.ack_mandate.count); - - // Clean up. - for (size_t i = 0; i < cb_result.message.count; i++) { - udpard_fragment_free_all(cb_result.message.history[i].payload, mem_frag); + 2); + if ((cb_result.message.count > 0) && (cb_result.message.history[0].payload != NULL)) { + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; } + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + udpard_rx_port_free(&rx, &port); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_session); instrumented_allocator_reset(&alloc_payload); } -/// ORDERED mode with zero reordering delay should accept only strictly increasing IDs. static void test_rx_session_ordered_zero_reordering_window(void) { + // Zero window ordered session should only accept strictly sequential IDs. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); - const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; + instrumented_allocator_t alloc_session = { 0 }; instrumented_allocator_new(&alloc_session); - const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); + const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - callback_result_t cb_result = { 0 }; - udpard_rx_new(&rx); - rx.user = &cb_result; - udpard_rx_port_t port; - const uint64_t topic_hash = 0x9999888877776666ULL; - TEST_ASSERT(udpard_rx_port_new(&port, topic_hash, SIZE_MAX, 0, rx_mem, &callbacks)); - const uint64_t remote_uid = 0x0A0B0C0D0E0F1011ULL; + + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, 0x0F0E0D0C0B0A0908ULL, 256, 0, rx_mem, &callbacks)); + + udpard_us_t now = 0; + const uint64_t remote_uid = 0x0102030405060708ULL; rx_session_factory_args_t fac_args = { .owner = &port, .sessions_by_animation = &rx.list_session_by_animation, .remote_uid = remote_uid, - .now = 0, + .now = now, }; rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, &remote_uid, @@ -2838,697 +2433,287 @@ static void test_rx_session_ordered_zero_reordering_window(void) &fac_args, &cavl_factory_rx_session_by_remote_uid); TEST_ASSERT_NOT_NULL(ses); - // Zero reordering window: out-of-order IDs are rejected, so only 120, 140, 1120 are accepted. - meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = 3, - .transfer_id = 120, - .sender_uid = remote_uid, - .topic_hash = topic_hash }; - udpard_us_t now = 0; - const uint64_t tids[] = { 120, 110, 140, 1120, 130 }; - for (size_t i = 0; i < sizeof(tids) / sizeof(tids[0]); i++) { - meta.transfer_id = tids[i]; - char payload[3] = { (char)('k' + (int)i), (char)('K' + (int)i), '\0' }; - now += 50; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x2222 }, - make_frame_ptr(meta, mem_payload, payload, 0, 3), - del_payload, - 0); + + meta_t meta = { .priority = udpard_prio_nominal, + .flag_ack = false, + .transfer_payload_size = 2, + .transfer_id = 1, + .sender_uid = remote_uid, + .topic_hash = port.topic_hash }; + now += 1000; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "x1", 0, 2), + del_payload, + 0); + TEST_ASSERT(cb_result.message.count >= 1); + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; + + // Jump is dropped with zero window. + meta.transfer_id = 3; + now += 10; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "x3", 0, 2), + del_payload, + 1); + TEST_ASSERT(cb_result.message.count >= 1); + + // Next expected transfer is accepted. + meta.transfer_id = 2; + now += 10; + rx_session_update(ses, + &rx, + now, + (udpard_udpip_ep_t){ .ip = 0xAA000001, .port = 0x1111 }, + make_frame_ptr(meta, mem_payload, "x2", 0, 2), + del_payload, + 0); + TEST_ASSERT(cb_result.message.count >= 1); + if (cb_result.message.history[0].payload != NULL) { + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; } - TEST_ASSERT_EQUAL(3, cb_result.message.count); - TEST_ASSERT_EQUAL(1120, cb_result.message.history[0].transfer_id); - TEST_ASSERT_EQUAL(140, cb_result.message.history[1].transfer_id); - TEST_ASSERT_EQUAL(120, cb_result.message.history[2].transfer_id); - for (size_t i = 0; i < cb_result.message.count; i++) { - udpard_fragment_free_all(cb_result.message.history[i].payload, mem_frag); + if ((cb_result.message.count > 1) && (cb_result.message.history[1].payload != NULL)) { + udpard_fragment_free_all(cb_result.message.history[1].payload, mem_frag); + cb_result.message.history[1].payload = NULL; } - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + udpard_rx_port_free(&rx, &port); - TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_session); instrumented_allocator_reset(&alloc_payload); } -// --------------------------------------------- RX PORT --------------------------------------------- - -/// Exercises udpard_rx_port_push() across ORDERED and STATELESS ports, covering single- and multi-frame transfers. static void test_rx_port(void) { - // Initialize the memory resources. + // P2P responses go through the p2p vtable with topic hash exposed. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); - const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; instrumented_allocator_new(&alloc_session); - const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); - const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - // Initialize the shared RX instance. - udpard_rx_t rx; - udpard_rx_new(&rx); + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); callback_result_t cb_result = { 0 }; rx.user = &cb_result; - // Initialize two ports: one ORDERED, one STATELESS. - udpard_rx_port_t port_ordered; - const uint64_t topic_hash_ordered = 0x1234567890ABCDEFULL; - TEST_ASSERT(udpard_rx_port_new(&port_ordered, topic_hash_ordered, 1000, 10 * KILO, rx_mem, &callbacks)); - - udpard_rx_port_t port_stateless; - const uint64_t topic_hash_stateless = 0xFEDCBA0987654321ULL; - TEST_ASSERT(udpard_rx_port_new( - &port_stateless, topic_hash_stateless, 500, UDPARD_RX_REORDERING_WINDOW_STATELESS, rx_mem, &callbacks)); + const uint64_t local_uid = 0xCAFED00DCAFED00DULL; + udpard_rx_port_p2p_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new_p2p(&port, local_uid, 64, rx_mem, &callbacks_p2p)); + + // Compose a P2P response datagram. + const uint64_t topic_hash = 0x1122334455667788ULL; + const uint64_t resp_tid = 55; + uint8_t payload[UDPARD_P2P_HEADER_BYTES + 3] = { 0 }; + uint8_t* ptr = payload; + *ptr++ = P2P_KIND_RESPONSE; + ptr += 7U; + ptr = serialize_u64(ptr, topic_hash); + ptr = serialize_u64(ptr, resp_tid); + memcpy(ptr, "abc", 3); + + meta_t meta = { .priority = udpard_prio_fast, + .flag_ack = false, + .transfer_payload_size = sizeof(payload), + .transfer_id = resp_tid, + .sender_uid = 0x0BADF00D0BADF00DULL, + .topic_hash = port.base.topic_hash }; + rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, sizeof(payload)); + byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload)]; + header_serialize(dgram, meta, 0, 0, frame->base.crc); + memcpy(dgram + HEADER_SIZE_BYTES, payload, sizeof(payload)); + mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); + void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); + memcpy(push_payload, dgram, sizeof(dgram)); udpard_us_t now = 0; + TEST_ASSERT(udpard_rx_port_push(&rx, + &port.base, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, + del_payload, + 0)); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + TEST_ASSERT_EQUAL_UINT64(topic_hash, cb_result.p2p_topic_hash); + TEST_ASSERT_EQUAL(resp_tid, cb_result.message.history[0].transfer_id); + udpard_fragment_t* const frag = udpard_fragment_seek(cb_result.message.history[0].payload, 0); + TEST_ASSERT_NOT_NULL(frag); + TEST_ASSERT_EQUAL_size_t(3, frag->view.size); + TEST_ASSERT_EQUAL_MEMORY("abc", frag->view.data, 3); + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; - // Test 1: Send a valid single-frame transfer to the ORDERED port. - { - const uint64_t remote_uid = 0xAABBCCDDEEFF0011ULL; - const uint64_t transfer_id = 100; - const char* payload_str = "Hello World"; - const size_t payload_len = strlen(payload_str) + 1; // include null terminator - meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = true, - .transfer_payload_size = (uint32_t)payload_len, - .transfer_id = transfer_id, - .sender_uid = remote_uid, - .topic_hash = topic_hash_ordered }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload_str, 0, payload_len); - - // Serialize the frame into a datagram. - byte_t dgram[HEADER_SIZE_BYTES + payload_len]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload_str, payload_len); - mem_free_payload(del_payload, frame->base.origin); - - // Allocate payload for the push. - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 0)); - - // Verify the callback was invoked. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(transfer_id, cb_result.message.history[0].transfer_id); - TEST_ASSERT_EQUAL(remote_uid, cb_result.message.history[0].remote.uid); - TEST_ASSERT_EQUAL(payload_len, cb_result.message.history[0].payload_size_stored); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], payload_len, payload_str, payload_len)); - - // Verify ACK was mandated. - TEST_ASSERT_EQUAL(1, cb_result.ack_mandate.count); - TEST_ASSERT_EQUAL(transfer_id, cb_result.ack_mandate.am.transfer_id); - - // Clean up. - udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - cb_result.message.count = 0; - cb_result.ack_mandate.count = 0; - } - - // Test 2: Send a valid single-frame transfer to the STATELESS port. - { - const uint64_t remote_uid = 0x1122334455667788ULL; - const uint64_t transfer_id = 200; - const char* payload_str = "Stateless"; - const size_t payload_len = strlen(payload_str) + 1; - meta_t meta = { .priority = udpard_prio_high, - .flag_ack = false, - .transfer_payload_size = (uint32_t)payload_len, - .transfer_id = transfer_id, - .sender_uid = remote_uid, - .topic_hash = topic_hash_stateless }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload_str, 0, payload_len); - - byte_t dgram[HEADER_SIZE_BYTES + payload_len]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload_str, payload_len); - mem_free_payload(del_payload, frame->base.origin); - - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_stateless, - now, - (udpard_udpip_ep_t){ .ip = 0x0B000001, .port = 0x5678 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 1)); - - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(transfer_id, cb_result.message.history[0].transfer_id); - TEST_ASSERT_EQUAL(remote_uid, cb_result.message.history[0].remote.uid); - TEST_ASSERT_EQUAL(payload_len, cb_result.message.history[0].payload_size_stored); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], payload_len, payload_str, payload_len)); - - // No ACK for stateless mode without flag_ack. - TEST_ASSERT_EQUAL(0, cb_result.ack_mandate.count); - - udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - cb_result.message.count = 0; - } - - // Test 3: Send a multi-frame transfer to the ORDERED port. - { - const uint64_t remote_uid = 0xAABBCCDDEEFF0011ULL; - const uint64_t transfer_id = 101; - const char* full_payload = "0123456789ABCDEFGHIJ"; - const size_t payload_len = 20; - meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = true, - .transfer_payload_size = (uint32_t)payload_len, - .transfer_id = transfer_id, - .sender_uid = remote_uid, - .topic_hash = topic_hash_ordered }; - - // Frame 1: offset 0, 10 bytes. - { - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, full_payload, 0, 10); - byte_t dgram[HEADER_SIZE_BYTES + 10]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, full_payload, 10); - mem_free_payload(del_payload, frame->base.origin); - - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 0)); - } - - // Frame 2: offset 10, 10 bytes. - { - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, full_payload, 10, 10); - byte_t dgram[HEADER_SIZE_BYTES + 10]; - header_serialize(dgram, meta, 1, 10, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, full_payload + 10, 10); - mem_free_payload(del_payload, frame->base.origin); - - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 0)); - } - - // Verify the transfer was received. - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(transfer_id, cb_result.message.history[0].transfer_id); - TEST_ASSERT_EQUAL(payload_len, cb_result.message.history[0].payload_size_stored); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], payload_len, full_payload, payload_len)); - - TEST_ASSERT_EQUAL(1, cb_result.ack_mandate.count); - - udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); - cb_result.message.count = 0; - cb_result.ack_mandate.count = 0; - } - - // Test 4: Send a frame with wrong topic hash (collision). - { - const uint64_t remote_uid = 0x9988776655443322ULL; - const uint64_t transfer_id = 300; - const char* payload_str = "Collision"; - const size_t payload_len = strlen(payload_str) + 1; - const uint64_t wrong_hash = topic_hash_ordered + 1; // Different hash - meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = (uint32_t)payload_len, - .transfer_id = transfer_id, - .sender_uid = remote_uid, - .topic_hash = wrong_hash }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload_str, 0, payload_len); - - byte_t dgram[HEADER_SIZE_BYTES + payload_len]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload_str, payload_len); - mem_free_payload(del_payload, frame->base.origin); - - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x0C000001, .port = 0x9999 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 2)); - - // Verify collision callback was invoked. - TEST_ASSERT_EQUAL(1, cb_result.collision.count); - TEST_ASSERT_EQUAL(remote_uid, cb_result.collision.remote.uid); - - // No message should have been received. - TEST_ASSERT_EQUAL(0, cb_result.message.count); - - cb_result.collision.count = 0; - } - - // Test 5: Send a malformed frame (bad CRC in header). - { - const uint64_t errors_before = rx.errors_frame_malformed; - byte_t bad_dgram[HEADER_SIZE_BYTES + 10]; - memset(bad_dgram, 0xAA, sizeof(bad_dgram)); // Garbage data - - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(bad_dgram)); - memcpy(push_payload, bad_dgram, sizeof(bad_dgram)); - - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x0D000001, .port = 0xAAAA }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(bad_dgram) }, - del_payload, - 0)); - - // Verify error counter was incremented. - TEST_ASSERT_EQUAL(errors_before + 1, rx.errors_frame_malformed); - - // No callbacks should have been invoked. - TEST_ASSERT_EQUAL(0, cb_result.message.count); - TEST_ASSERT_EQUAL(0, cb_result.collision.count); - TEST_ASSERT_EQUAL(0, cb_result.ack_mandate.count); - } - - // Test 6: Send a multi-frame transfer to STATELESS port (should be rejected). - { - const uint64_t errors_before = rx.errors_transfer_malformed; - const uint64_t remote_uid = 0x1122334455667788ULL; - const uint64_t transfer_id = 201; - const char* payload_str = "MultiFrameStateless"; - const size_t payload_len = strlen(payload_str) + 1; - meta_t meta = { .priority = udpard_prio_high, - .flag_ack = false, - .transfer_payload_size = (uint32_t)payload_len, - .transfer_id = transfer_id, - .sender_uid = remote_uid, - .topic_hash = topic_hash_stateless }; - - // Send only the first frame (offset 0, partial payload). - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload_str, 0, 10); - byte_t dgram[HEADER_SIZE_BYTES + 10]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload_str, 10); - mem_free_payload(del_payload, frame->base.origin); - - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_stateless, - now, - (udpard_udpip_ep_t){ .ip = 0x0B000001, .port = 0x5678 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 1)); - - // STATELESS mode rejects multi-frame transfers. - TEST_ASSERT_EQUAL(errors_before + 1, rx.errors_transfer_malformed); - TEST_ASSERT_EQUAL(0, cb_result.message.count); - } - - // Test 7: Verify invalid API calls return false. - { - void* dummy_payload = mem_payload.alloc(mem_payload.user, 100); - memset(dummy_payload, 0, 100); - // Null rx pointer. - TEST_ASSERT_FALSE(udpard_rx_port_push(NULL, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x01020304, .port = 1234 }, - (udpard_bytes_mut_t){ .data = dummy_payload, .size = 100 }, - del_payload, - 0)); - // Null port pointer. - TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, - NULL, - now, - (udpard_udpip_ep_t){ .ip = 0x01020304, .port = 1234 }, - (udpard_bytes_mut_t){ .data = dummy_payload, .size = 100 }, - del_payload, - 0)); - // Invalid endpoint (ip = 0). - TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0, .port = 1234 }, - (udpard_bytes_mut_t){ .data = dummy_payload, .size = 100 }, - del_payload, - 0)); - // Invalid endpoint (port = 0). - TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x01020304, .port = 0 }, - (udpard_bytes_mut_t){ .data = dummy_payload, .size = 100 }, - del_payload, - 0)); - // Null datagram payload. - TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x01020304, .port = 1234 }, - (udpard_bytes_mut_t){ .data = NULL, .size = 100 }, - del_payload, - 0)); - // Invalid interface index. - TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, - &port_ordered, - now, - (udpard_udpip_ep_t){ .ip = 0x01020304, .port = 1234 }, - (udpard_bytes_mut_t){ .data = dummy_payload, .size = 100 }, - del_payload, - UDPARD_NETWORK_INTERFACE_COUNT_MAX)); - // Free the dummy payload since all calls failed. - mem_free(mem_payload, 100, dummy_payload); - } - - // Cleanup. - udpard_rx_port_free(&rx, &port_ordered); - udpard_rx_port_free(&rx, &port_stateless); - - // Verify no memory leaks. - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + udpard_rx_port_free(&rx, &port.base); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } -/// Starts a few transfers on multiple ports, lets them expire, and ensures cleanup in udpard_rx_poll(). static void test_rx_port_timeouts(void) { + // Sessions are retired after SESSION_LIFETIME. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); - const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; instrumented_allocator_new(&alloc_session); - const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); + const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); callback_result_t cb_result = { 0 }; - udpard_rx_new(&rx); - rx.user = &cb_result; - - udpard_rx_port_t port_a; - udpard_rx_port_t port_b; - const uint64_t topic_hash_a = 0x1111111111111111ULL; - const uint64_t topic_hash_b = 0x2222222222222222ULL; - TEST_ASSERT(udpard_rx_port_new(&port_a, topic_hash_a, 1000, 20000, rx_mem, &callbacks)); - TEST_ASSERT(udpard_rx_port_new(&port_b, topic_hash_b, 1000, 20000, rx_mem, &callbacks)); - - udpard_us_t now = 1000; - - // Remote A: start transfer 10 (incomplete) and 11 (complete) so 11 arms the reordering timer. - { - meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = 10, - .transfer_id = 10, - .sender_uid = 0xAAAAULL, - .topic_hash = topic_hash_a }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "ABCDEFGHIJ", 0, 5); - byte_t dgram[HEADER_SIZE_BYTES + 5]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - const byte_t payload_head[5] = { 'A', 'B', 'C', 'D', 'E' }; - memcpy(dgram + HEADER_SIZE_BYTES, payload_head, sizeof(payload_head)); - mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_a, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 0)); - meta.transfer_payload_size = 4; - meta.transfer_id = 11; - rx_frame_t* done_frame = make_frame_ptr(meta, mem_payload, "DONE", 0, 4); - byte_t done_dgram[HEADER_SIZE_BYTES + 4]; - header_serialize(done_dgram, meta, 0, 0, done_frame->base.crc); - const byte_t done_payload[4] = { 'D', 'O', 'N', 'E' }; - memcpy(done_dgram + HEADER_SIZE_BYTES, done_payload, sizeof(done_payload)); - mem_free(mem_payload, done_frame->base.origin.size, done_frame->base.origin.data); - void* push_done = mem_payload.alloc(mem_payload.user, sizeof(done_dgram)); - memcpy(push_done, done_dgram, sizeof(done_dgram)); - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_a, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_done, .size = sizeof(done_dgram) }, - del_payload, - 0)); - } - - // Remote B mirrors the same pattern to populate the reordering deadline tree with another entry. - { - meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = 6, - .transfer_id = 20, - .sender_uid = 0xBBBBULL, - .topic_hash = topic_hash_b }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "QRSTUV", 0, 3); - byte_t dgram[HEADER_SIZE_BYTES + 3]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - const byte_t payload_head[3] = { 'Q', 'R', 'S' }; - memcpy(dgram + HEADER_SIZE_BYTES, payload_head, sizeof(payload_head)); - mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_b, - now, - (udpard_udpip_ep_t){ .ip = 0x0B000001, .port = 0x5678 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 0)); - meta.transfer_payload_size = 5; - meta.transfer_id = 21; - rx_frame_t* done_frame = make_frame_ptr(meta, mem_payload, "READY", 0, 5); - byte_t done_dgram[HEADER_SIZE_BYTES + 5]; - header_serialize(done_dgram, meta, 0, 0, done_frame->base.crc); - const byte_t done_payload[5] = { 'R', 'E', 'A', 'D', 'Y' }; - memcpy(done_dgram + HEADER_SIZE_BYTES, done_payload, sizeof(done_payload)); - mem_free(mem_payload, done_frame->base.origin.size, done_frame->base.origin.data); - void* push_done = mem_payload.alloc(mem_payload.user, sizeof(done_dgram)); - memcpy(push_done, done_dgram, sizeof(done_dgram)); - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_b, - now, - (udpard_udpip_ep_t){ .ip = 0x0B000001, .port = 0x5678 }, - (udpard_bytes_mut_t){ .data = push_done, .size = sizeof(done_dgram) }, - del_payload, - 0)); - } + rx.user = &cb_result; - TEST_ASSERT_EQUAL(0, cb_result.message.count); + udpard_rx_port_t port = { 0 }; + TEST_ASSERT(udpard_rx_port_new(&port, 0xBADC0FFEE0DDF00DULL, 128, 20 * KILO, rx_mem, &callbacks)); - // Advance past the session lifetime so the busy slots will be reset on the next arrival. - now += SESSION_LIFETIME + 5000; - { - meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = 3, - .transfer_id = 30, - .sender_uid = 0xAAAAULL, - .topic_hash = topic_hash_a }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "NEW", 0, 3); - byte_t dgram[HEADER_SIZE_BYTES + 3]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - const byte_t payload_head[3] = { 'N', 'E', 'W' }; - memcpy(dgram + HEADER_SIZE_BYTES, payload_head, sizeof(payload_head)); - mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); - void* push_payload = mem_payload.alloc(mem_payload.user, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_a, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 0)); - } + meta_t meta = { .priority = udpard_prio_nominal, + .flag_ack = false, + .transfer_payload_size = 4, + .transfer_id = 1, + .sender_uid = 0x1111222233334444ULL, + .topic_hash = port.topic_hash }; + rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "ping", 0, 4); + const byte_t payload_bytes[] = { 'p', 'i', 'n', 'g' }; + byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload_bytes)]; + header_serialize(dgram, meta, 0, 0, frame->base.crc); + memcpy(dgram + HEADER_SIZE_BYTES, payload_bytes, sizeof(payload_bytes)); + mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); + void* payload_buf = mem_payload.alloc(mem_payload.user, sizeof(dgram)); + memcpy(payload_buf, dgram, sizeof(dgram)); - // The late arrival should have ejected the earlier completed transfers. - TEST_ASSERT(cb_result.message.count >= 1); - for (size_t i = 0; i < cb_result.message.count; i++) { - udpard_fragment_free_all(cb_result.message.history[i].payload, mem_frag); - } - cb_result.message.count = 0; + udpard_us_t now = 0; + TEST_ASSERT(udpard_rx_port_push(&rx, + &port, + now, + (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, + (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(dgram) }, + del_payload, + 0)); + TEST_ASSERT_GREATER_THAN_UINT32(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(1, cb_result.message.count); + udpard_fragment_free_all(cb_result.message.history[0].payload, mem_frag); + cb_result.message.history[0].payload = NULL; - // Let both sessions expire and be retired from poll. + now += SESSION_LIFETIME + 1; udpard_rx_poll(&rx, now); - now += SESSION_LIFETIME + 1000; - udpard_rx_poll(&rx, now); - - udpard_rx_port_free(&rx, &port_a); - udpard_rx_port_free(&rx, &port_b); - - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + udpard_rx_port_free(&rx, &port); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_session); + instrumented_allocator_reset(&alloc_payload); } static void test_rx_port_oom(void) { + // Session allocation failure should be reported gracefully. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); instrumented_allocator_t alloc_session = { 0 }; instrumented_allocator_new(&alloc_session); + alloc_session.limit_fragments = 0; // force allocation failure instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); - alloc_session.limit_fragments = 0; - alloc_frag.limit_fragments = 0; const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - callback_result_t cb_result = { 0 }; - udpard_rx_new(&rx); - rx.user = &cb_result; - udpard_rx_port_t port_ordered; - udpard_rx_port_t port_stateless; - TEST_ASSERT(udpard_rx_port_new(&port_ordered, 0xAAAALL, 100, 20000, rx_mem, &callbacks)); + + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); + callback_result_t cb_result = { 0 }; + rx.user = &cb_result; + + udpard_rx_port_t port = { 0 }; TEST_ASSERT( - udpard_rx_port_new(&port_stateless, 0xBBBBLL, 100, UDPARD_RX_REORDERING_WINDOW_STATELESS, rx_mem, &callbacks)); - udpard_us_t now = 0; - const byte_t payload_state[] = { 's', 't', 'a', 't', 'e', 'f', 'u', 'l' }; - const size_t payload_len = sizeof(payload_state); - meta_t meta_state = { .priority = udpard_prio_nominal, + udpard_rx_port_new(&port, 0xCAFEBABECAFEBABEULL, 64, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); + + meta_t meta = { .priority = udpard_prio_nominal, .flag_ack = false, - .transfer_payload_size = (uint32_t)payload_len, + .transfer_payload_size = 4, .transfer_id = 1, - .sender_uid = 0x1111ULL, - .topic_hash = 0xAAAALL }; - rx_frame_t* frame_state = make_frame_ptr(meta_state, mem_payload, payload_state, 0, payload_len); - byte_t dgram_state[HEADER_SIZE_BYTES + payload_len]; - header_serialize(dgram_state, meta_state, 0, 0, frame_state->base.crc); - memcpy(dgram_state + HEADER_SIZE_BYTES, payload_state, payload_len); - mem_free(mem_payload, frame_state->base.origin.size, frame_state->base.origin.data); - void* push_state = mem_payload.alloc(mem_payload.user, sizeof(dgram_state)); - memcpy(push_state, dgram_state, sizeof(dgram_state)); - const uint64_t errors_before = rx.errors_oom; + .sender_uid = 0x0101010101010101ULL, + .topic_hash = port.topic_hash }; + rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "oom!", 0, 4); + const byte_t payload_bytes[] = { 'o', 'o', 'm', '!' }; + byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload_bytes)]; + header_serialize(dgram, meta, 0, 0, frame->base.crc); + memcpy(dgram + HEADER_SIZE_BYTES, payload_bytes, sizeof(payload_bytes)); + mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); + void* payload_buf = mem_payload.alloc(mem_payload.user, sizeof(dgram)); + memcpy(payload_buf, dgram, sizeof(dgram)); + + udpard_us_t now = 0; TEST_ASSERT(udpard_rx_port_push(&rx, - &port_ordered, + &port, now, (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_state, .size = sizeof(dgram_state) }, + (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(dgram) }, del_payload, 0)); - TEST_ASSERT_EQUAL(errors_before + 1, rx.errors_oom); - TEST_ASSERT_EQUAL(0, cb_result.message.count); - const byte_t payload_stateless[] = { 's', 't', 'a', 't', 'e', 'l', 'e', 's', 's' }; - const size_t payload_stat_len = sizeof(payload_stateless); - meta_t meta_stateless = { .priority = udpard_prio_slow, - .flag_ack = false, - .transfer_payload_size = (uint32_t)payload_stat_len, - .transfer_id = 2, - .sender_uid = 0x2222ULL, - .topic_hash = 0xBBBBLL }; - rx_frame_t* frame_stateless = make_frame_ptr(meta_stateless, mem_payload, payload_stateless, 0, payload_stat_len); - byte_t dgram_stateless[HEADER_SIZE_BYTES + payload_stat_len]; - header_serialize(dgram_stateless, meta_stateless, 0, 0, frame_stateless->base.crc); - memcpy(dgram_stateless + HEADER_SIZE_BYTES, payload_stateless, payload_stat_len); - mem_free(mem_payload, frame_stateless->base.origin.size, frame_stateless->base.origin.data); - void* push_stateless = mem_payload.alloc(mem_payload.user, sizeof(dgram_stateless)); - memcpy(push_stateless, dgram_stateless, sizeof(dgram_stateless)); - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_stateless, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, - (udpard_bytes_mut_t){ .data = push_stateless, .size = sizeof(dgram_stateless) }, - del_payload, - 1)); - TEST_ASSERT_EQUAL(errors_before + 2, rx.errors_oom); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_oom); + TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); TEST_ASSERT_EQUAL(0, cb_result.message.count); - udpard_rx_port_free(&rx, &port_ordered); - udpard_rx_port_free(&rx, &port_stateless); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + udpard_rx_port_free(&rx, &port); instrumented_allocator_reset(&alloc_frag); instrumented_allocator_reset(&alloc_session); instrumented_allocator_reset(&alloc_payload); } -/// Ensures udpard_rx_port_free walks and clears all sessions across ports. static void test_rx_port_free_loop(void) { + // Freeing ports with in-flight transfers releases all allocations. instrumented_allocator_t alloc_frag = { 0 }; instrumented_allocator_new(&alloc_frag); - const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; instrumented_allocator_new(&alloc_session); - const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); - const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; + const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); + const udpard_mem_resource_t mem_session = instrumented_allocator_make_resource(&alloc_session); + const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); + const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); + const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - const uint64_t local_uid = 0xCAFED00DCAFED00DULL; - udpard_rx_t rx; + udpard_rx_t rx; + udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; + udpard_rx_new(&rx, rx_tx, 0); callback_result_t cb_result = { 0 }; - udpard_rx_new(&rx); - rx.user = &cb_result; - - udpard_rx_port_t port_p2p; - TEST_ASSERT( - udpard_rx_port_new(&port_p2p, local_uid, SIZE_MAX, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); + rx.user = &cb_result; - udpard_rx_port_t port_extra; + udpard_rx_port_p2p_t port_p2p = { 0 }; + TEST_ASSERT(udpard_rx_port_new_p2p(&port_p2p, 0xCAFED00DCAFED00DULL, SIZE_MAX, rx_mem, &callbacks_p2p)); + udpard_rx_port_t port_extra = { 0 }; const uint64_t topic_hash_extra = 0xDEADBEEFF00D1234ULL; TEST_ASSERT(udpard_rx_port_new(&port_extra, topic_hash_extra, 1000, 5000, rx_mem, &callbacks)); @@ -3542,7 +2727,7 @@ static void test_rx_port_free_loop(void) .transfer_payload_size = (uint32_t)strlen(payload), .transfer_id = 10, .sender_uid = 0xAAAAULL, - .topic_hash = port_p2p.topic_hash }; + .topic_hash = port_p2p.base.topic_hash }; rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, 4); byte_t dgram[HEADER_SIZE_BYTES + 4]; header_serialize(dgram, meta, 0, 0, frame->base.crc); @@ -3552,7 +2737,7 @@ static void test_rx_port_free_loop(void) memcpy(push_payload, dgram, sizeof(dgram)); now += 1000; TEST_ASSERT(udpard_rx_port_push(&rx, - &port_p2p, + &port_p2p.base, now, (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, @@ -3588,7 +2773,7 @@ static void test_rx_port_free_loop(void) TEST_ASSERT(alloc_session.allocated_fragments >= 2); TEST_ASSERT(alloc_frag.allocated_fragments >= 2); - udpard_rx_port_free(&rx, &port_p2p); + udpard_rx_port_free(&rx, &port_p2p.base); udpard_rx_port_free(&rx, &port_extra); TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); @@ -3614,6 +2799,7 @@ int main(void) RUN_TEST(test_rx_slot_update); RUN_TEST(test_rx_transfer_id_forward_distance); + RUN_TEST(test_rx_ack_enqueued); RUN_TEST(test_rx_session_ordered); RUN_TEST(test_rx_session_unordered); From 678782aaddc125fb49c34384d41285e4c14fd766 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Wed, 24 Dec 2025 22:16:25 +0200 Subject: [PATCH 03/42] wip the tx pipeline is entirely broken --- libudpard/udpard.c | 258 ++++++++++++++++++++++++--------------------- libudpard/udpard.h | 146 ++++++++++++------------- 2 files changed, 203 insertions(+), 201 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 471d97d..e6de9d1 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -469,12 +469,20 @@ static void* unbias_ptr(const void* const ptr, const size_t offset) // --------------------------------------------- TX PIPELINE --------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- -typedef struct +typedef struct tx_item_t { - udpard_tx_item_t* head; - udpard_tx_item_t* tail; - size_t count; -} tx_chain_t; + udpard_tree_t index_order; + udpard_tree_t index_deadline; + + struct tx_item_t* head; ///< Points to the frame where offset=0. Points to itself if this is the first frame. + struct tx_item_t* next; ///< Next frame in this transfer ordered by offset; NULL if last. + + udpard_us_t deadline; + udpard_prio_t priority; + udpard_udpip_ep_t destination; + udpard_bytes_mut_t datagram_payload; + void* user_transfer_reference; +} tx_item_t; static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) { @@ -485,30 +493,30 @@ static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) /// Frames with identical weight are processed in the FIFO order. static int32_t tx_cavl_compare_prio(const void* const user, const udpard_tree_t* const node) { - return (((int)*(const udpard_prio_t*)user) >= (int)CAVL2_TO_OWNER(node, udpard_tx_item_t, index_order)->priority) - ? +1 - : -1; + return (((int)*(const udpard_prio_t*)user) >= (int)CAVL2_TO_OWNER(node, tx_item_t, index_order)->priority) ? +1 + : -1; } static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node) { - return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, udpard_tx_item_t, index_deadline)->deadline) ? +1 : -1; + return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_item_t, index_deadline)->deadline) ? +1 : -1; } -static udpard_tx_item_t* tx_item_new(const udpard_tx_mem_resources_t memory, - const udpard_us_t deadline, - const udpard_prio_t priority, - const udpard_udpip_ep_t endpoint, - const size_t datagram_payload_size, - void* const user_transfer_reference) +static tx_item_t* tx_item_new(const udpard_tx_mem_resources_t memory, + const udpard_us_t deadline, + const udpard_prio_t priority, + const udpard_udpip_ep_t endpoint, + const size_t datagram_payload_size, + void* const user_transfer_reference) { - udpard_tx_item_t* out = mem_alloc(memory.fragment, sizeof(udpard_tx_item_t)); + tx_item_t* out = mem_alloc(memory.fragment, sizeof(tx_item_t)); if (out != NULL) { out->index_order = (udpard_tree_t){ 0 }; out->index_deadline = (udpard_tree_t){ 0 }; UDPARD_ASSERT(priority <= UDPARD_PRIORITY_MAX); out->priority = priority; - out->next_in_transfer = NULL; // Last by default. + out->head = out; // First by default. + out->next = NULL; // Last by default. out->deadline = deadline; out->destination = endpoint; out->user_transfer_reference = user_transfer_reference; @@ -517,13 +525,20 @@ static udpard_tx_item_t* tx_item_new(const udpard_tx_mem_resources_t memory, out->datagram_payload.data = payload_data; out->datagram_payload.size = datagram_payload_size; } else { - mem_free(memory.fragment, sizeof(udpard_tx_item_t), out); + mem_free(memory.fragment, sizeof(tx_item_t), out); out = NULL; } } return out; } +typedef struct +{ + tx_item_t* head; + tx_item_t* tail; + size_t count; +} tx_chain_t; + /// Produces a chain of tx queue items for later insertion into the tx queue. The tail is NULL if OOM. /// The caller is responsible for freeing the memory allocated for the chain. static tx_chain_t tx_spool(const udpard_tx_mem_resources_t memory, @@ -540,22 +555,23 @@ static tx_chain_t tx_spool(const udpard_tx_mem_resources_t memory, tx_chain_t out = { NULL, NULL, 0 }; size_t offset = 0U; do { - const size_t progress = smaller(payload.size - offset, mtu); - udpard_tx_item_t* const item = tx_item_new(memory, // - deadline, - meta.priority, - endpoint, - progress + HEADER_SIZE_BYTES, - user_transfer_reference); + const size_t progress = smaller(payload.size - offset, mtu); + tx_item_t* const item = tx_item_new(memory, // + deadline, + meta.priority, + endpoint, + progress + HEADER_SIZE_BYTES, + user_transfer_reference); if (NULL == out.head) { out.head = item; } else { - out.tail->next_in_transfer = item; + out.tail->next = item; } out.tail = item; if (NULL == out.tail) { break; } + item->head = out.head; // All frames in a transfer have a pointer to the head. const byte_t* const read_ptr = ((const byte_t*)payload.data) + offset; prefix_crc = crc_add(prefix_crc, progress, read_ptr); byte_t* const write_ptr = header_serialize( @@ -569,6 +585,13 @@ static tx_chain_t tx_spool(const udpard_tx_mem_resources_t memory, return out; } +/// Derives the ack timeout for an outgoing transfer using an empirical formula. +/// The number of retries is initially zero when the transfer is sent for the first time. +static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const uint_fast8_t retries) +{ + return baseline * (1L << smaller((uint16_t)prio + retries, 15)); // NOLINT(*-signed-bitwise) +} + static uint32_t tx_push(udpard_tx_t* const tx, const udpard_us_t deadline, const meta_t meta, @@ -585,7 +608,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, } else { const tx_chain_t chain = tx_spool(tx->memory, mtu, deadline, meta, endpoint, payload, user_transfer_reference); if (chain.tail != NULL) { // Insert the head into the tx index. Only the head, the rest is linked-listed. - udpard_tx_item_t* const head = chain.head; + tx_item_t* const head = chain.head; UDPARD_ASSERT(frame_count == chain.count); const udpard_tree_t* res = cavl2_find_or_insert( &tx->index_order, &head->priority, &tx_cavl_compare_prio, &head->index_order, &cavl2_trivial_factory); @@ -603,10 +626,11 @@ static uint32_t tx_push(udpard_tx_t* const tx, out = (uint32_t)chain.count; } else { // The queue is large enough but we ran out of heap memory, so we have to unwind the chain. tx->errors_oom++; - udpard_tx_item_t* head = chain.head; + tx_item_t* head = chain.head; while (head != NULL) { - udpard_tx_item_t* const next = head->next_in_transfer; - udpard_tx_free(tx->memory, head); + tx_item_t* const next = head->next; + mem_free(tx->memory.payload, head->datagram_payload.size, head->datagram_payload.data); + mem_free(tx->memory.fragment, sizeof(tx_item_t), head); head = next; } } @@ -618,7 +642,7 @@ static uint64_t tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) { uint64_t count = 0; for (udpard_tree_t* p = cavl2_min(self->index_deadline); p != NULL;) { - udpard_tx_item_t* const item = CAVL2_TO_OWNER(p, udpard_tx_item_t, index_deadline); + tx_item_t* const item = CAVL2_TO_OWNER(p, tx_item_t, index_deadline); if (item->deadline >= now) { break; } @@ -627,10 +651,11 @@ static uint64_t tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) cavl2_remove(&self->index_deadline, &item->index_deadline); cavl2_remove(&self->index_order, &item->index_order); // Free the entire transfer chain. - udpard_tx_item_t* current = item; + tx_item_t* current = item; while (current != NULL) { - udpard_tx_item_t* const next_in_transfer = current->next_in_transfer; - udpard_tx_free(self->memory, current); + tx_item_t* const next_in_transfer = current->next; + mem_free(self->memory.payload, current->datagram_payload.size, current->datagram_payload.data); + mem_free(self->memory.fragment, sizeof(tx_item_t), current); current = next_in_transfer; count++; self->queue_size--; @@ -640,95 +665,8 @@ static uint64_t tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) return count; } -bool udpard_tx_new(udpard_tx_t* const self, - const uint64_t local_uid, - const size_t queue_capacity, - const udpard_tx_mem_resources_t memory) -{ - const bool ok = (NULL != self) && (local_uid != 0) && tx_validate_mem_resources(memory); - if (ok) { - mem_zero(sizeof(*self), self); - self->local_uid = local_uid; - self->queue_capacity = queue_capacity; - self->mtu = UDPARD_MTU_DEFAULT; - self->memory = memory; - self->queue_size = 0; - self->index_order = NULL; - self->index_deadline = NULL; - } - return ok; -} - -uint32_t udpard_tx_push(udpard_tx_t* const self, - const udpard_us_t now, - const udpard_us_t deadline, - const udpard_prio_t priority, - const uint64_t topic_hash, - const udpard_udpip_ep_t remote_ep, - const uint64_t transfer_id, - const udpard_bytes_t payload, - const bool ack_required, - void* const user_transfer_reference) -{ - uint32_t out = 0; - const bool ok = (self != NULL) && (deadline >= now) && (self->local_uid != 0) && - udpard_is_valid_endpoint(remote_ep) && (priority <= UDPARD_PRIORITY_MAX) && - ((payload.data != NULL) || (payload.size == 0U)); - if (ok) { - self->errors_expiration += tx_purge_expired(self, now); - const meta_t meta = { - .priority = priority, - .flag_ack = ack_required, - .transfer_payload_size = (uint32_t)payload.size, - .transfer_id = transfer_id, - .sender_uid = self->local_uid, - .topic_hash = topic_hash, - }; - out = tx_push(self, deadline, meta, remote_ep, payload, user_transfer_reference); - } - return out; -} - -udpard_tx_item_t* udpard_tx_peek(udpard_tx_t* const self, const udpard_us_t now) -{ - udpard_tx_item_t* out = NULL; - if (self != NULL) { - self->errors_expiration += tx_purge_expired(self, now); - out = CAVL2_TO_OWNER(cavl2_min(self->index_order), udpard_tx_item_t, index_order); - } - return out; -} - -void udpard_tx_pop(udpard_tx_t* const self, udpard_tx_item_t* const item) -{ - if ((self != NULL) && (item != NULL)) { - if (item->next_in_transfer == NULL) { - cavl2_remove(&self->index_order, &item->index_order); - cavl2_remove(&self->index_deadline, &item->index_deadline); - } else { // constant-time update, super quick, just relink a few pointers! - cavl2_replace(&self->index_order, &item->index_order, &item->next_in_transfer->index_order); - cavl2_replace(&self->index_deadline, &item->index_deadline, &item->next_in_transfer->index_deadline); - } - self->queue_size--; - } -} - -void udpard_tx_free(const udpard_tx_mem_resources_t memory, udpard_tx_item_t* const item) -{ - if (item != NULL) { - UDPARD_ASSERT((item->index_order.lr[0] == NULL) && (item->index_order.up == NULL) && - (item->index_order.lr[1] == NULL)); - UDPARD_ASSERT((item->index_deadline.lr[0] == NULL) && (item->index_deadline.up == NULL) && - (item->index_deadline.lr[1] == NULL)); - if (item->datagram_payload.data != NULL) { - mem_free(memory.payload, item->datagram_payload.size, item->datagram_payload.data); - } - mem_free(memory.fragment, sizeof(udpard_tx_item_t), item); - } -} - /// Handle an ACK received from a remote node. -/// This is where we acknowledge pending transmissions. +/// This is where we dequeue pending transmissions and invoke the feedback callback. static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, const uint64_t transfer_id, @@ -738,7 +676,7 @@ static void tx_receive_ack(udpard_rx_t* const rx, (void)topic_hash; (void)transfer_id; (void)remote; - // TODO: implement + // TODO: find the transfer in the TX queue by topic and transfer-ID and remove it; invoke the feedback callback. } /// Generate an ack transfer for the specified remote transfer. @@ -784,6 +722,80 @@ static void tx_send_ack(udpard_rx_t* const rx, } } +bool udpard_tx_new(udpard_tx_t* const self, + const uint64_t local_uid, + const size_t queue_capacity, + const udpard_tx_mem_resources_t memory, + const udpard_tx_vtable_t* const vtable) +{ + const bool ok = (NULL != self) && (local_uid != 0) && tx_validate_mem_resources(memory) && (vtable != NULL) && + (vtable->eject != NULL) && (vtable->feedback != NULL); + if (ok) { + mem_zero(sizeof(*self), self); + self->vtable = vtable; + self->local_uid = local_uid; + self->queue_capacity = queue_capacity; + self->mtu = UDPARD_MTU_DEFAULT; + self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us; + self->memory = memory; + self->queue_size = 0; + self->index_order = NULL; + self->index_deadline = NULL; + self->user = NULL; + } + return ok; +} + +uint32_t udpard_tx_push(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const uint64_t topic_hash, + const udpard_udpip_ep_t remote_ep, + const uint64_t transfer_id, + const udpard_bytes_t payload, + const bool reliable, + void* const user_transfer_reference) +{ + uint32_t out = 0; + const bool ok = (self != NULL) && (deadline >= now) && (self->local_uid != 0) && + udpard_is_valid_endpoint(remote_ep) && (priority <= UDPARD_PRIORITY_MAX) && + ((payload.data != NULL) || (payload.size == 0U)); + if (ok) { + self->errors_expiration += tx_purge_expired(self, now); + const meta_t meta = { + .priority = priority, + .flag_ack = reliable, + .transfer_payload_size = (uint32_t)payload.size, + .transfer_id = transfer_id, + .sender_uid = self->local_uid, + .topic_hash = topic_hash, + }; + out = tx_push(self, deadline, meta, remote_ep, payload, user_transfer_reference); + } + return out; +} + +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now) +{ + if ((self != NULL) && (now >= 0)) { + self->errors_expiration += tx_purge_expired(self, now); + while (self->queue_size > 0) { + // TODO fetch the next scheduled frame and invoke the eject callback + break; // Remove this when implemented + } + } +} + +void udpard_tx_free(udpard_tx_t* const self) +{ + if (self != NULL) { + // TODO: do this for all items in the queue: + // mem_free(memory.payload, item->datagram_payload.size, item->datagram_payload.data); + // mem_free(memory.fragment, sizeof(tx_item_t), item); + } +} + // --------------------------------------------------------------------------------------------------------------------- // --------------------------------------------- RX PIPELINE --------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- diff --git a/libudpard/udpard.h b/libudpard/udpard.h index e00e6db..d77b4b9 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -88,6 +88,9 @@ extern "C" /// Timestamps supplied by the application must be non-negative monotonically increasing counts of microseconds. typedef int64_t udpard_us_t; +/// See udpard_tx_t::ack_baseline_timeout. +#define UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us 16000LL + /// The subject-ID only affects the formation of the multicast UDP/IP endpoint address. /// In IPv4 networks, it is limited to 23 bits only due to the limited MAC multicast address space. /// In IPv6 networks, 32 bits are supported. @@ -281,6 +284,8 @@ size_t udpard_fragment_gather(const udpard_fragment_t** cursor, /// The maximum transmission unit (MTU) can also be configured separately per TX pipeline instance. /// Applications that are interested in maximizing their wire compatibility should not change the default MTU setting. +typedef struct udpard_tx_t udpard_tx_t; + /// A TX queue uses these memory resources for allocating the enqueued items (UDP datagrams). /// There are exactly two allocations per enqueued item: /// - the first for bookkeeping purposes (udpard_tx_item_t) @@ -298,6 +303,42 @@ typedef struct udpard_tx_mem_resources_t udpard_mem_resource_t payload; } udpard_tx_mem_resources_t; +/// The TX frame ejection handler returns one of these results to guide the udpard_tx_poll() logic. +typedef enum udpard_tx_eject_result_t +{ + udpard_tx_eject_success, ///< Frame submitted to NIC/socket successfully and can be removed from the TX queue. + udpard_tx_eject_blocked, ///< The NIC/socket is currently not ready to accept new frames; try again later. + udpard_tx_eject_failed, ///< An unrecoverable error occurred while submitting the frame; drop it from the TX queue. +} udpard_tx_eject_result_t; + +typedef struct udpard_tx_vtable_t +{ + /// Invoked from udpard_tx_poll() to push outgoing UDP datagrams into the socket/NIC driver. + /// The deadline specifies when the frame should be considered expired and dropped if not yet transmitted; + /// it is optional to use depending on the implementation of the NIC driver (most traditional drivers ignore it). + /// If the result is udpard_tx_eject_success, the application is responsible for freeing the datagram_payload.data + /// using self->memory.payload.free() at some point in the future (either within the callback or later). + udpard_tx_eject_result_t (*eject)(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const uint_fast8_t dscp, + const udpard_udpip_ep_t destination, + const udpard_bytes_mut_t datagram_payload, + void* const user_transfer_reference); + + /// Invoked from udpard_tx_poll() to report the result of reliable transfer transmission attempts. + /// This is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push(); + /// this is NOT invoked for best-effort (non-reliable) transfers. + /// The user_transfer_reference is the same pointer that was passed to udpard_tx_push(). + /// The 'ok' flag is true if the transfer has been successfully confirmed by the remote end, false if timed out. + void (*feedback)(udpard_tx_t* const self, + const uint64_t topic_hash, + const uint32_t transfer_id, + const udpard_udpip_ep_t remote_ep, + void* const user_transfer_reference, + const bool ok); +} udpard_tx_vtable_t; + /// The transmission pipeline is a prioritized transmission queue that keeps UDP datagrams (aka transport frames) /// destined for transmission via one network interface. /// Applications with redundant network interfaces are expected to have one instance of this type per interface. @@ -315,8 +356,10 @@ typedef struct udpard_tx_mem_resources_t /// memory allocator is not used at all. The disadvantage is that if the driver callback is blocking, /// the application thread will be blocked as well; plus the driver will be responsible for the correct /// prioritization of the outgoing datagrams according to the DSCP value. -typedef struct udpard_tx_t +struct udpard_tx_t { + const udpard_tx_vtable_t* vtable; + /// The globally unique identifier of the local node. Must not change after initialization. uint64_t local_uid; @@ -329,9 +372,12 @@ typedef struct udpard_tx_t /// The value can be changed arbitrarily between enqueue operations as long as it is at least UDPARD_MTU_MIN. size_t mtu; + /// This duration is used to derive the acknowledgment timeout for reliable transfers in tx_ack_timeout(). + /// It must be a positive number of microseconds. A sensible default is provided at initialization. + udpard_us_t ack_baseline_timeout; + /// Optional user-managed mapping from the Cyphal priority level in [0,7] (highest priority at index 0) - /// to the IP DSCP field value for use by the application when transmitting. The library does not populate - /// or otherwise use this array; udpard_tx_new() leaves it zero-initialized. + /// to the IP DSCP field value for use by the application when transmitting. By default, all entries are zero. uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U]; udpard_tx_mem_resources_t memory; @@ -348,69 +394,24 @@ typedef struct udpard_tx_t /// Internal use only, do not modify! udpard_tree_t* index_order; ///< Most urgent on the left, then according to the insertion order. udpard_tree_t* index_deadline; ///< Soonest on the left, then according to the insertion order. -} udpard_tx_t; - -/// One UDP datagram stored in the udpard_tx_t transmission queue along with its metadata. -/// The datagram should be sent to the indicated UDP/IP endpoint with the DSCP value chosen by the application, -/// e.g., via its own mapping from udpard_prio_t. -/// The datagram should be discarded (transmission aborted) if the deadline has expired. -/// All fields are READ-ONLY except the mutable `datagram_payload` field, which could be nullified to indicate -/// a transfer of the payload memory ownership to somewhere else. -typedef struct udpard_tx_item_t -{ - udpard_tree_t index_order; - udpard_tree_t index_deadline; - // TODO: indexing by (topic hash, transfer-ID); retain for retransmission. - - /// Points to the next frame in this transfer or NULL. - /// Normally, the application would not use it because transfer frame ordering is orthogonal to global TX ordering. - /// It can be useful though for pulling pending frames from the TX queue if at least one frame of their transfer - /// failed to transmit; the idea is that if at least one frame is missing, the transfer will not be received by - /// remote nodes anyway, so all its remaining frames can be dropped from the queue at once using udpard_tx_pop(). - struct udpard_tx_item_t* next_in_transfer; - - /// This is the same value that is passed to udpard_tx_push(). - /// Frames whose transmission deadline is in the past are dropped (transmission aborted). - udpard_us_t deadline; - - /// The original transfer priority level. The application should obtain the corresponding DSCP value - /// by mapping it via the dscp_value_per_priority array. - udpard_prio_t priority; - - /// This UDP/IP datagram compiled by libudpard should be sent to this remote endpoint. - /// It is a multicast address unless this is a P2P transfer. - udpard_udpip_ep_t destination; - - /// The completed UDP/IP datagram payload. - udpard_bytes_mut_t datagram_payload; - - /// This opaque pointer is assigned the value that is passed to udpard_tx_push(). - /// The library itself does not make use of it but the application can use it to provide continuity between - /// its high-level transfer objects and datagrams that originate from it. Assign NULL if not needed. - /// Items generated by the library (ack transfers) always store NULL here. - void* user_transfer_reference; -} udpard_tx_item_t; + + /// Opaque pointer for the application use only. Not accessed by the library. + void* user; +}; /// The parameters are initialized deterministically (MTU defaults to UDPARD_MTU_DEFAULT and counters are reset) /// and can be changed later by modifying the struct fields directly. No memory allocation is going to take place /// until the pipeline is actually written to. -/// -/// The instance does not hold any resources itself except for the allocated memory. -/// To safely discard it, simply pop all enqueued frames from it using udpard_tx_pop() and free their memory -/// using udpard_tx_free(), then discard the instance itself. -/// /// True on success, false if any of the arguments are invalid. bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, const size_t queue_capacity, - const udpard_tx_mem_resources_t memory); + const udpard_tx_mem_resources_t memory, + const udpard_tx_vtable_t* const vtable); /// This function serializes a transfer into a sequence of UDP datagrams and inserts them into the prioritized -/// transmission queue at the appropriate position. Afterwards, the application is supposed to take the enqueued frames -/// from the transmission queue using the udpard_tx_peek/pop() and transmit them one by one. The enqueued items -/// are prioritized according to their Cyphal transfer priority to avoid the inner priority inversion. The transfer -/// payload will be copied into the transmission queue so that the lifetime of the datagrams is not related to the -/// lifetime of the input payload buffer. +/// transmission queue at the appropriate position. The transfer payload will be copied into the transmission queue +/// so that the lifetime of the datagrams is not related to the lifetime of the input payload buffer. /// /// The topic hash is not defined for P2P transfers since there are no topics involved; in P2P, this parameter /// is used to pass the destination node's UID instead. Setting it incorrectly will cause the destination node @@ -423,12 +424,9 @@ bool udpard_tx_new(udpard_tx_t* const self, /// such that it is likely to be distinct per application startup (embedded systems can use noinit memory sections, /// hash uninitialized SRAM, use timers or ADC noise, etc). /// -/// The user_transfer_reference is an opaque pointer that will be assigned to the eponymous field of each enqueued item. +/// The user_transfer_reference is an opaque pointer that will be stored for each enqueued item of this transfer. /// The library itself does not use or check this value in any way, so it can be NULL if not needed. /// -/// The deadline value will be used to populate the eponymous field of the generated datagrams (all will share the -/// same deadline value). This is used for aborting frames that could not be transmitted before the specified deadline. -/// /// The function returns the number of UDP datagrams enqueued, which is always a positive number, in case of success. /// In case of failure, the function returns zero. Runtime failures increment the corresponding error counters, /// while invocations with invalid arguments just return zero without modifying the queue state. In all cases, @@ -452,26 +450,18 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_udpip_ep_t remote_ep, const uint64_t transfer_id, const udpard_bytes_t payload, - const bool ack_required, // TODO: provide retry count; 0 if no ack. + const bool reliable, void* const user_transfer_reference); -/// Purges all timed out items from the transmission queue automatically; returns the next item to be transmitted, -/// if there is any, otherwise NULL. The returned item is not removed from the queue; use udpard_tx_pop() to do that. -/// The returned item (if any) is guaranteed to be non-expired (deadline>=now). -udpard_tx_item_t* udpard_tx_peek(udpard_tx_t* const self, const udpard_us_t now); - -/// Transfers the ownership of the specified item, previously returned from udpard_tx_peek(), to the application. -/// The item does not have to be the top one. -/// The item is dequeued but not invalidated; the application must deallocate its memory later; see udpard_tx_free(). -/// The memory SHALL NOT be deallocated UNTIL this function is invoked. -/// If any of the arguments are NULL, the function has no effect. -void udpard_tx_pop(udpard_tx_t* const self, udpard_tx_item_t* const item); - -/// This is a simple helper that frees the memory allocated for the item and its payload. -/// If the item argument is NULL, the function has no effect. The time complexity is constant. -/// If the item frame payload is NULL then it is assumed that the payload buffer was already freed, -/// or moved to a different owner (f.e. to the media layer). -void udpard_tx_free(const udpard_tx_mem_resources_t memory, udpard_tx_item_t* const item); +/// This should be invoked whenever the socket/NIC of this queue becomes ready to accept new datagrams for transmission. +/// It is fine to also invoke it periodically unconditionally to drive the transmission process. +/// Internally, the function will query the scheduler for the next frame to be transmitted and will attempt +/// to submit it via the eject() callback provided in the vtable. +/// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now); + +/// Drops all enqueued items; afterward, the instance is safe to discard. +void udpard_tx_free(udpard_tx_t* const self); // ===================================================================================================================== // ================================================= RX PIPELINE ================================================= From 1e7a967ba3c102557e95bea7eb3436df6ebc4957 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Wed, 24 Dec 2025 23:01:59 +0200 Subject: [PATCH 04/42] progress --- libudpard/udpard.c | 308 ++++++++++++++++++++++----------------------- libudpard/udpard.h | 16 +-- 2 files changed, 160 insertions(+), 164 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index e6de9d1..af75c7e 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -107,6 +107,44 @@ static void mem_free_payload(const udpard_mem_deleter_t memory, const udpard_byt } } +static byte_t* serialize_u32(byte_t* ptr, const uint32_t value) +{ + for (size_t i = 0; i < sizeof(value); i++) { + *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); + } + return ptr; +} + +static byte_t* serialize_u64(byte_t* ptr, const uint64_t value) +{ + for (size_t i = 0; i < sizeof(value); i++) { + *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); + } + return ptr; +} + +static const byte_t* deserialize_u32(const byte_t* ptr, uint32_t* const out_value) +{ + UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); + *out_value = 0; + for (size_t i = 0; i < sizeof(*out_value); i++) { + *out_value |= (uint32_t)((uint32_t)*ptr << (i * 8U)); // NOLINT(google-readability-casting) NOSONAR + ptr++; + } + return ptr; +} + +static const byte_t* deserialize_u64(const byte_t* ptr, uint64_t* const out_value) +{ + UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); + *out_value = 0; + for (size_t i = 0; i < sizeof(*out_value); i++) { + *out_value |= ((uint64_t)*ptr << (i * 8U)); + ptr++; + } + return ptr; +} + // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) static void mem_zero(const size_t size, void* const data) { (void)memset(data, 0, size); } @@ -290,6 +328,52 @@ static uint32_t crc_full(const size_t n_bytes, const void* const data) return crc_add(CRC_INITIAL, n_bytes, data) ^ CRC_OUTPUT_XOR; } +// --------------------------------------------- LIST CONTAINER --------------------------------------------- + +/// No effect if not in the list. +static void delist(udpard_list_t* const list, udpard_list_member_t* const member) +{ + if (member->next != NULL) { + member->next->prev = member->prev; + } + if (member->prev != NULL) { + member->prev->next = member->next; + } + if (list->head == member) { + list->head = member->next; + } + if (list->tail == member) { + list->tail = member->prev; + } + member->next = NULL; + member->prev = NULL; + assert((list->head != NULL) == (list->tail != NULL)); +} + +/// If the item is already in the list, it will be delisted first. Can be used for moving to the front. +static void enlist_head(udpard_list_t* const list, udpard_list_member_t* const member) +{ + delist(list, member); + assert((member->next == NULL) && (member->prev == NULL)); + assert((list->head != NULL) == (list->tail != NULL)); + member->next = list->head; + if (list->head != NULL) { + list->head->prev = member; + } + list->head = member; + if (list->tail == NULL) { + list->tail = member; + } + assert((list->head != NULL) && (list->tail != NULL)); +} + +#define LIST_MEMBER(ptr, owner_type, owner_field) ((owner_type*)unbias_ptr((ptr), offsetof(owner_type, owner_field))) +static void* unbias_ptr(const void* const ptr, const size_t offset) +{ + return (ptr == NULL) ? NULL : (void*)((char*)ptr - offset); +} +#define LIST_TAIL(list, owner_type, owner_field) LIST_MEMBER((list).tail, owner_type, owner_field) + // --------------------------------------------------------------------------------------------------------------------- // --------------------------------------------- HEADER --------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- @@ -309,44 +393,6 @@ typedef struct uint64_t topic_hash; } meta_t; -static byte_t* serialize_u32(byte_t* ptr, const uint32_t value) -{ - for (size_t i = 0; i < sizeof(value); i++) { - *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); - } - return ptr; -} - -static byte_t* serialize_u64(byte_t* ptr, const uint64_t value) -{ - for (size_t i = 0; i < sizeof(value); i++) { - *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); - } - return ptr; -} - -static const byte_t* deserialize_u32(const byte_t* ptr, uint32_t* const out_value) -{ - UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); - *out_value = 0; - for (size_t i = 0; i < sizeof(*out_value); i++) { - *out_value |= (uint32_t)((uint32_t)*ptr << (i * 8U)); // NOLINT(google-readability-casting) NOSONAR - ptr++; - } - return ptr; -} - -static const byte_t* deserialize_u64(const byte_t* ptr, uint64_t* const out_value) -{ - UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); - *out_value = 0; - for (size_t i = 0; i < sizeof(*out_value); i++) { - *out_value |= ((uint64_t)*ptr << (i * 8U)); - ptr++; - } - return ptr; -} - static byte_t* header_serialize(byte_t* const buffer, const meta_t meta, const uint32_t frame_index, @@ -419,135 +465,89 @@ static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, return ok; } -// --------------------------------------------- LIST CONTAINER --------------------------------------------- - -/// No effect if not in the list. -static void delist(udpard_list_t* const list, udpard_list_member_t* const member) -{ - if (member->next != NULL) { - member->next->prev = member->prev; - } - if (member->prev != NULL) { - member->prev->next = member->next; - } - if (list->head == member) { - list->head = member->next; - } - if (list->tail == member) { - list->tail = member->prev; - } - member->next = NULL; - member->prev = NULL; - assert((list->head != NULL) == (list->tail != NULL)); -} - -/// If the item is already in the list, it will be delisted first. Can be used for moving to the front. -static void enlist_head(udpard_list_t* const list, udpard_list_member_t* const member) -{ - delist(list, member); - assert((member->next == NULL) && (member->prev == NULL)); - assert((list->head != NULL) == (list->tail != NULL)); - member->next = list->head; - if (list->head != NULL) { - list->head->prev = member; - } - list->head = member; - if (list->tail == NULL) { - list->tail = member; - } - assert((list->head != NULL) && (list->tail != NULL)); -} - -#define LIST_MEMBER(ptr, owner_type, owner_field) ((owner_type*)unbias_ptr((ptr), offsetof(owner_type, owner_field))) -static void* unbias_ptr(const void* const ptr, const size_t offset) -{ - return (ptr == NULL) ? NULL : (void*)((char*)ptr - offset); -} -#define LIST_TAIL(list, owner_type, owner_field) LIST_MEMBER((list).tail, owner_type, owner_field) - // --------------------------------------------------------------------------------------------------------------------- // --------------------------------------------- TX PIPELINE --------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- -typedef struct tx_item_t -{ - udpard_tree_t index_order; - udpard_tree_t index_deadline; - - struct tx_item_t* head; ///< Points to the frame where offset=0. Points to itself if this is the first frame. - struct tx_item_t* next; ///< Next frame in this transfer ordered by offset; NULL if last. - - udpard_us_t deadline; - udpard_prio_t priority; - udpard_udpip_ep_t destination; - udpard_bytes_mut_t datagram_payload; - void* user_transfer_reference; -} tx_item_t; +/// This may be allocated in the NIC DMA region so we keep overheads tight. +/// An alternative approach would be to have a flex array of tx_frame_t* pointers in the tx_transfer_t. +typedef struct tx_frame_t +{ + struct tx_frame_t* next; + byte_t data[]; +} tx_frame_t; + +typedef struct tx_transfer_t +{ + /// Various indexes this transfer is a member of. + udpard_tree_t index_schedule; ///< Transmission order: next to transmit on the left. + udpard_tree_t index_deadline; ///< Soonest to expire on the left. + udpard_tree_t index_id; ///< Ordered by the topic hash and the transfer-ID. + + /// We always keep a pointer to the head, plus a cursor that scans the frames during transmission. + /// Both are NULL if the payload is destroyed. + /// The head points to the first frame unless it is known that no (further) retransmissions are needed, + /// in which case the old head is deleted and the head points to the next frame to transmit. + tx_frame_t* head; + tx_frame_t* cursor; + + /// Retransmission state. + uint16_t retries; + udpard_us_t next_attempt_at; + + /// All frames except for the last one share the same MTU, so there's no use keeping dedicated size per frame. + size_t mtu; + size_t mtu_last; + + /// Constant transfer properties supplied by the client. + uint64_t topic_hash; + uint64_t transfer_id; + udpard_us_t deadline; + bool reliable; + udpard_prio_t priority; + udpard_udpip_ep_t destination; + void* user_transfer_reference; +} tx_transfer_t; static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) { - return (memory.fragment.alloc != NULL) && (memory.fragment.free != NULL) && // + return (memory.meta.alloc != NULL) && (memory.meta.free != NULL) && // (memory.payload.alloc != NULL) && (memory.payload.free != NULL); } /// Frames with identical weight are processed in the FIFO order. static int32_t tx_cavl_compare_prio(const void* const user, const udpard_tree_t* const node) { - return (((int)*(const udpard_prio_t*)user) >= (int)CAVL2_TO_OWNER(node, tx_item_t, index_order)->priority) ? +1 - : -1; + return (((int)*(const udpard_prio_t*)user) >= (int)CAVL2_TO_OWNER(node, tx_frame_t, index_order)->priority) ? +1 + : -1; } static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node) { - return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_item_t, index_deadline)->deadline) ? +1 : -1; + return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_frame_t, index_deadline)->deadline) ? +1 : -1; } -static tx_item_t* tx_item_new(const udpard_tx_mem_resources_t memory, - const udpard_us_t deadline, - const udpard_prio_t priority, - const udpard_udpip_ep_t endpoint, - const size_t datagram_payload_size, - void* const user_transfer_reference) +static tx_frame_t* tx_frame_new(const udpard_tx_mem_resources_t memory, const size_t payload_size) { - tx_item_t* out = mem_alloc(memory.fragment, sizeof(tx_item_t)); + tx_frame_t* const out = mem_alloc(memory.payload, sizeof(tx_frame_t) + payload_size); if (out != NULL) { - out->index_order = (udpard_tree_t){ 0 }; - out->index_deadline = (udpard_tree_t){ 0 }; - UDPARD_ASSERT(priority <= UDPARD_PRIORITY_MAX); - out->priority = priority; - out->head = out; // First by default. - out->next = NULL; // Last by default. - out->deadline = deadline; - out->destination = endpoint; - out->user_transfer_reference = user_transfer_reference; - void* const payload_data = mem_alloc(memory.payload, datagram_payload_size); - if (NULL != payload_data) { - out->datagram_payload.data = payload_data; - out->datagram_payload.size = datagram_payload_size; - } else { - mem_free(memory.fragment, sizeof(tx_item_t), out); - out = NULL; - } + out->next = NULL; // Last by default. } return out; } typedef struct { - tx_item_t* head; - tx_item_t* tail; - size_t count; + tx_frame_t* head; + tx_frame_t* tail; + size_t count; } tx_chain_t; -/// Produces a chain of tx queue items for later insertion into the tx queue. The tail is NULL if OOM. -/// The caller is responsible for freeing the memory allocated for the chain. +/// The tail is NULL if OOM. The caller is responsible for freeing the memory allocated for the chain. static tx_chain_t tx_spool(const udpard_tx_mem_resources_t memory, const size_t mtu, - const udpard_us_t deadline, const meta_t meta, - const udpard_udpip_ep_t endpoint, - const udpard_bytes_t payload, - void* const user_transfer_reference) + const udpard_bytes_t payload) { UDPARD_ASSERT(mtu > 0); UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); @@ -555,13 +555,8 @@ static tx_chain_t tx_spool(const udpard_tx_mem_resources_t memory, tx_chain_t out = { NULL, NULL, 0 }; size_t offset = 0U; do { - const size_t progress = smaller(payload.size - offset, mtu); - tx_item_t* const item = tx_item_new(memory, // - deadline, - meta.priority, - endpoint, - progress + HEADER_SIZE_BYTES, - user_transfer_reference); + const size_t progress = smaller(payload.size - offset, mtu); + tx_frame_t* const item = tx_frame_new(memory, progress + HEADER_SIZE_BYTES); if (NULL == out.head) { out.head = item; } else { @@ -571,11 +566,10 @@ static tx_chain_t tx_spool(const udpard_tx_mem_resources_t memory, if (NULL == out.tail) { break; } - item->head = out.head; // All frames in a transfer have a pointer to the head. const byte_t* const read_ptr = ((const byte_t*)payload.data) + offset; prefix_crc = crc_add(prefix_crc, progress, read_ptr); - byte_t* const write_ptr = header_serialize( - item->datagram_payload.data, meta, (uint32_t)out.count, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR); + byte_t* const write_ptr = + header_serialize(item->data, meta, (uint32_t)out.count, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR); (void)memcpy(write_ptr, read_ptr, progress); // NOLINT(*DeprecatedOrUnsafeBufferHandling) offset += progress; UDPARD_ASSERT(offset <= payload.size); @@ -587,7 +581,7 @@ static tx_chain_t tx_spool(const udpard_tx_mem_resources_t memory, /// Derives the ack timeout for an outgoing transfer using an empirical formula. /// The number of retries is initially zero when the transfer is sent for the first time. -static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const uint_fast8_t retries) +static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const uint16_t retries) { return baseline * (1L << smaller((uint16_t)prio + retries, 15)); // NOLINT(*-signed-bitwise) } @@ -608,7 +602,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, } else { const tx_chain_t chain = tx_spool(tx->memory, mtu, deadline, meta, endpoint, payload, user_transfer_reference); if (chain.tail != NULL) { // Insert the head into the tx index. Only the head, the rest is linked-listed. - tx_item_t* const head = chain.head; + tx_frame_t* const head = chain.head; UDPARD_ASSERT(frame_count == chain.count); const udpard_tree_t* res = cavl2_find_or_insert( &tx->index_order, &head->priority, &tx_cavl_compare_prio, &head->index_order, &cavl2_trivial_factory); @@ -626,11 +620,11 @@ static uint32_t tx_push(udpard_tx_t* const tx, out = (uint32_t)chain.count; } else { // The queue is large enough but we ran out of heap memory, so we have to unwind the chain. tx->errors_oom++; - tx_item_t* head = chain.head; + tx_frame_t* head = chain.head; while (head != NULL) { - tx_item_t* const next = head->next; + tx_frame_t* const next = head->next; mem_free(tx->memory.payload, head->datagram_payload.size, head->datagram_payload.data); - mem_free(tx->memory.fragment, sizeof(tx_item_t), head); + mem_free(tx->memory.fragment, sizeof(tx_frame_t), head); head = next; } } @@ -642,7 +636,7 @@ static uint64_t tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) { uint64_t count = 0; for (udpard_tree_t* p = cavl2_min(self->index_deadline); p != NULL;) { - tx_item_t* const item = CAVL2_TO_OWNER(p, tx_item_t, index_deadline); + tx_frame_t* const item = CAVL2_TO_OWNER(p, tx_frame_t, index_deadline); if (item->deadline >= now) { break; } @@ -651,11 +645,11 @@ static uint64_t tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) cavl2_remove(&self->index_deadline, &item->index_deadline); cavl2_remove(&self->index_order, &item->index_order); // Free the entire transfer chain. - tx_item_t* current = item; + tx_frame_t* current = item; while (current != NULL) { - tx_item_t* const next_in_transfer = current->next; + tx_frame_t* const next_in_transfer = current->next; mem_free(self->memory.payload, current->datagram_payload.size, current->datagram_payload.data); - mem_free(self->memory.fragment, sizeof(tx_item_t), current); + mem_free(self->memory.fragment, sizeof(tx_frame_t), current); current = next_in_transfer; count++; self->queue_size--; @@ -758,11 +752,11 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, void* const user_transfer_reference) { uint32_t out = 0; - const bool ok = (self != NULL) && (deadline >= now) && (self->local_uid != 0) && + const bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && udpard_is_valid_endpoint(remote_ep) && (priority <= UDPARD_PRIORITY_MAX) && ((payload.data != NULL) || (payload.size == 0U)); if (ok) { - self->errors_expiration += tx_purge_expired(self, now); + udpard_tx_poll(self, now); // Free up expired transfers before attempting to enqueue a new one. const meta_t meta = { .priority = priority, .flag_ack = reliable, @@ -792,7 +786,7 @@ void udpard_tx_free(udpard_tx_t* const self) if (self != NULL) { // TODO: do this for all items in the queue: // mem_free(memory.payload, item->datagram_payload.size, item->datagram_payload.data); - // mem_free(memory.fragment, sizeof(tx_item_t), item); + // mem_free(memory.fragment, sizeof(tx_frame_t), item); } } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index d77b4b9..23ca003 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -294,12 +294,13 @@ typedef struct udpard_tx_t udpard_tx_t; /// If the application knows its MTU, it can use block allocation to avoid extrinsic fragmentation. typedef struct udpard_tx_mem_resources_t { - /// The queue bookkeeping structures (udpard_tx_item_t) are allocated per datagram. - /// Each instance is a very small fixed-size object, so a trivial zero-fragmentation block allocator is enough. - udpard_mem_resource_t fragment; + /// The queue bookkeeping structures are allocated per datagram. + /// Each instance is a small fixed-size object, so a trivial zero-fragmentation block allocator is enough. + udpard_mem_resource_t meta; - /// The UDP datagram payload buffers are allocated per frame; each buffer is at most MTU-sized, - /// so a trivial zero-fragmentation MTU-sized block allocator is enough if MTU is known in advance. + /// The UDP datagram payload buffers are allocated per frame; each buffer is of size at most + /// (MTU+sizeof(void*)+HEADER_SIZE) bytes, so a trivial zero-fragmentation MTU-sized+pointer block allocator + /// is enough if MTU is known in advance. udpard_mem_resource_t payload; } udpard_tx_mem_resources_t; @@ -323,7 +324,8 @@ typedef struct udpard_tx_vtable_t const udpard_us_t deadline, const uint_fast8_t dscp, const udpard_udpip_ep_t destination, - const udpard_bytes_mut_t datagram_payload, + const udpard_bytes_t datagram_view, ///< Transmit this. Do not free() this. + const udpard_bytes_mut_t datagram_origin, ///< free() only this. void* const user_transfer_reference); /// Invoked from udpard_tx_poll() to report the result of reliable transfer transmission attempts. @@ -450,7 +452,7 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_udpip_ep_t remote_ep, const uint64_t transfer_id, const udpard_bytes_t payload, - const bool reliable, + const bool reliable, // Will keep retransmitting until acked or deadline reached. void* const user_transfer_reference); /// This should be invoked whenever the socket/NIC of this queue becomes ready to accept new datagrams for transmission. From 4bd91fb9d409c77119ca2463227be61f0fb18608 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Thu, 25 Dec 2025 17:59:31 +0200 Subject: [PATCH 05/42] wip --- libudpard/udpard.c | 149 +++++++++++++++++++++++---------------------- libudpard/udpard.h | 77 +++++++++++++++-------- 2 files changed, 127 insertions(+), 99 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index af75c7e..694c9c6 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -470,13 +470,14 @@ static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, // --------------------------------------------------------------------------------------------------------------------- /// This may be allocated in the NIC DMA region so we keep overheads tight. -/// An alternative approach would be to have a flex array of tx_frame_t* pointers in the tx_transfer_t. typedef struct tx_frame_t { struct tx_frame_t* next; byte_t data[]; } tx_frame_t; +static size_t tx_frame_size(const size_t mtu) { return sizeof(tx_frame_t) + mtu + HEADER_SIZE_BYTES; } + typedef struct tx_transfer_t { /// Various indexes this transfer is a member of. @@ -492,8 +493,8 @@ typedef struct tx_transfer_t tx_frame_t* cursor; /// Retransmission state. - uint16_t retries; - udpard_us_t next_attempt_at; + uint_fast8_t retries; + udpard_us_t next_attempt_at; /// All frames except for the last one share the same MTU, so there's no use keeping dedicated size per frame. size_t mtu; @@ -511,7 +512,7 @@ typedef struct tx_transfer_t static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) { - return (memory.meta.alloc != NULL) && (memory.meta.free != NULL) && // + return (memory.transfer.alloc != NULL) && (memory.transfer.free != NULL) && // (memory.payload.alloc != NULL) && (memory.payload.free != NULL); } @@ -527,63 +528,63 @@ static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tre return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_frame_t, index_deadline)->deadline) ? +1 : -1; } -static tx_frame_t* tx_frame_new(const udpard_tx_mem_resources_t memory, const size_t payload_size) -{ - tx_frame_t* const out = mem_alloc(memory.payload, sizeof(tx_frame_t) + payload_size); - if (out != NULL) { - out->next = NULL; // Last by default. - } - return out; -} - -typedef struct -{ - tx_frame_t* head; - tx_frame_t* tail; - size_t count; -} tx_chain_t; - -/// The tail is NULL if OOM. The caller is responsible for freeing the memory allocated for the chain. -static tx_chain_t tx_spool(const udpard_tx_mem_resources_t memory, - const size_t mtu, - const meta_t meta, - const udpard_bytes_t payload) +/// Returns the head of the transfer chain; NULL on OOM. +static tx_frame_t* tx_spool(const udpard_tx_mem_resources_t memory, + const size_t mtu, + const meta_t meta, + const udpard_bytes_t payload) { UDPARD_ASSERT(mtu > 0); UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); - uint32_t prefix_crc = CRC_INITIAL; - tx_chain_t out = { NULL, NULL, 0 }; - size_t offset = 0U; + uint32_t prefix_crc = CRC_INITIAL; + tx_frame_t* head = NULL; + tx_frame_t* tail = NULL; + size_t frame_index = 0U; + size_t offset = 0U; + // Run the O(n) copy loop, where n is the payload size. + // The client doesn't have to ensure that the payload data survives beyond this function call. do { - const size_t progress = smaller(payload.size - offset, mtu); - tx_frame_t* const item = tx_frame_new(memory, progress + HEADER_SIZE_BYTES); - if (NULL == out.head) { - out.head = item; - } else { - out.tail->next = item; + // Compute the size of the next frame, allocate it and link it up in the chain. + const size_t progress = smaller(payload.size - offset, mtu); + { + tx_frame_t* const item = mem_alloc(memory.payload, sizeof(tx_frame_t) + progress + HEADER_SIZE_BYTES); + if (NULL == head) { + head = item; + } else { + tail->next = item; + } + tail = item; } - out.tail = item; - if (NULL == out.tail) { + // On OOM, deallocate the entire chain and quit. + if (NULL == tail) { + while (head != NULL) { + tx_frame_t* const next = head->next; + mem_free(memory.payload, tx_frame_size((head == tail) ? progress : mtu), head); + head = next; + } break; } + // Populate the frame contents. + tail->next = NULL; const byte_t* const read_ptr = ((const byte_t*)payload.data) + offset; prefix_crc = crc_add(prefix_crc, progress, read_ptr); byte_t* const write_ptr = - header_serialize(item->data, meta, (uint32_t)out.count, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR); + header_serialize(tail->data, meta, (uint32_t)frame_index, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR); (void)memcpy(write_ptr, read_ptr, progress); // NOLINT(*DeprecatedOrUnsafeBufferHandling) + // Advance the state. + ++frame_index; offset += progress; UDPARD_ASSERT(offset <= payload.size); - out.count++; } while (offset < payload.size); - UDPARD_ASSERT((offset == payload.size) || (out.tail == NULL)); - return out; + UDPARD_ASSERT((offset == payload.size) || ((head == NULL) && (tail == NULL))); + return head; } -/// Derives the ack timeout for an outgoing transfer using an empirical formula. +/// Derives the ack timeout for an outgoing transfer. /// The number of retries is initially zero when the transfer is sent for the first time. -static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const uint16_t retries) +static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const uint_fast8_t retries) { - return baseline * (1L << smaller((uint16_t)prio + retries, 15)); // NOLINT(*-signed-bitwise) + return baseline * (1L << smaller((size_t)prio + retries, UDPARD_TX_RETRY_MAX)); // NOLINT(*-signed-bitwise) } static uint32_t tx_push(udpard_tx_t* const tx, @@ -594,39 +595,41 @@ static uint32_t tx_push(udpard_tx_t* const tx, void* const user_transfer_reference) { UDPARD_ASSERT(tx != NULL); - uint32_t out = 0; // The number of frames enqueued; zero on error (error counters incremented). - const size_t mtu = larger(tx->mtu, UDPARD_MTU_MIN); - const size_t frame_count = larger(1, (payload.size + mtu - 1U) / mtu); - if ((tx->queue_size + frame_count) > tx->queue_capacity) { + uint32_t out = 0; // The number of frames enqueued; zero on error (error counters incremented). + const size_t payload_size = payload.size; + const size_t mtu = larger(tx->mtu, UDPARD_MTU_MIN); + const size_t mtu_last = ((payload_size % mtu) != 0U) ? (payload_size % mtu) : mtu; + const size_t n_frames = larger(1, (payload_size + mtu - 1U) / mtu); + if ((tx->queue_size + n_frames) > tx->queue_capacity) { tx->errors_capacity++; } else { - const tx_chain_t chain = tx_spool(tx->memory, mtu, deadline, meta, endpoint, payload, user_transfer_reference); - if (chain.tail != NULL) { // Insert the head into the tx index. Only the head, the rest is linked-listed. - tx_frame_t* const head = chain.head; - UDPARD_ASSERT(frame_count == chain.count); - const udpard_tree_t* res = cavl2_find_or_insert( - &tx->index_order, &head->priority, &tx_cavl_compare_prio, &head->index_order, &cavl2_trivial_factory); - UDPARD_ASSERT(res == &head->index_order); - (void)res; - res = cavl2_find_or_insert(&tx->index_deadline, - &head->deadline, - &tx_cavl_compare_deadline, - &head->index_deadline, - &cavl2_trivial_factory); - UDPARD_ASSERT(res == &head->index_deadline); - (void)res; - tx->queue_size += chain.count; - UDPARD_ASSERT(tx->queue_size <= tx->queue_capacity); - out = (uint32_t)chain.count; - } else { // The queue is large enough but we ran out of heap memory, so we have to unwind the chain. - tx->errors_oom++; - tx_frame_t* head = chain.head; - while (head != NULL) { - tx_frame_t* const next = head->next; - mem_free(tx->memory.payload, head->datagram_payload.size, head->datagram_payload.data); - mem_free(tx->memory.fragment, sizeof(tx_frame_t), head); - head = next; + tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t)); + if (tr != NULL) { + mem_zero(sizeof(*tr), tr); + tr->retries = 0; + tr->next_attempt_at = BIG_BANG; // TODO: we can implement time-triggered comms here. + tr->mtu = mtu; + tr->mtu_last = mtu_last; + tr->topic_hash = meta.topic_hash; + tr->transfer_id = meta.transfer_id; + tr->deadline = deadline; + tr->reliable = meta.flag_ack; + tr->priority = meta.priority; + tr->destination = endpoint; + tr->user_transfer_reference = user_transfer_reference; + tr->head = tr->cursor = tx_spool(tx->memory, mtu, meta, payload); + if (tr->head != NULL) { + // TODO: insert + // Finalize + tx->queue_size += n_frames; + UDPARD_ASSERT(tx->queue_size <= tx->queue_capacity); + out = (uint32_t)n_frames; + } else { // The queue is large enough but we ran out of heap memory. + tx->errors_oom++; + mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); } + } else { // The queue is large enough but we couldn't allocate the transfer metadata object. + tx->errors_oom++; } } return out; diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 23ca003..b059266 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -27,6 +27,10 @@ /// - sizeof(rx_session_t) blocks for the RX pipeline. /// - sizeof(udpard_fragment_t) blocks for the RX pipeline. /// +/// Suitable allocators may be found here: +/// - Constant-time ultrafast deterministic heap: https://github.com/pavel-kirienko/o1heap +/// - Single-header fixed-size block pool: https://gist.github.com/pavel-kirienko/daf89e0481e6eac0f1fa8a7614667f59 +/// /// -------------------------------------------------------------------------------------------------------------------- /// /// This software is distributed under the terms of the MIT License. @@ -91,6 +95,9 @@ typedef int64_t udpard_us_t; /// See udpard_tx_t::ack_baseline_timeout. #define UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us 16000LL +/// The maximum number of transmission attempts for a transfer is capped at this value irrespective of other settings. +#define UDPARD_TX_RETRY_MAX 31U + /// The subject-ID only affects the formation of the multicast UDP/IP endpoint address. /// In IPv4 networks, it is limited to 23 bits only due to the limited MAC multicast address space. /// In IPv6 networks, 32 bits are supported. @@ -294,16 +301,29 @@ typedef struct udpard_tx_t udpard_tx_t; /// If the application knows its MTU, it can use block allocation to avoid extrinsic fragmentation. typedef struct udpard_tx_mem_resources_t { - /// The queue bookkeeping structures are allocated per datagram. - /// Each instance is a small fixed-size object, so a trivial zero-fragmentation block allocator is enough. - udpard_mem_resource_t meta; + /// The queue bookkeeping structures are allocated per outgoing transfer. + /// Each instance is sizeof(tx_transfer_t), so a trivial zero-fragmentation block allocator is enough. + udpard_mem_resource_t transfer; /// The UDP datagram payload buffers are allocated per frame; each buffer is of size at most - /// (MTU+sizeof(void*)+HEADER_SIZE) bytes, so a trivial zero-fragmentation MTU-sized+pointer block allocator - /// is enough if MTU is known in advance. + /// (HEADER_SIZE+MTU+sizeof(void*)) bytes, so a trivial block pool is enough if MTU is known in advance. udpard_mem_resource_t payload; } udpard_tx_mem_resources_t; +/// Outcome notification for a reliable transfer previously scheduled for transmission. +typedef struct udpard_tx_feedback_t +{ + uint64_t topic_hash; + uint32_t transfer_id; + udpard_udpip_ep_t remote_ep; + + uint_fast8_t retries; ///< The number of attempts equals retries plus one. + bool success; ///< False if no ack was received from the remote end before deadline expiration. + + /// This is the same pointer that was passed to udpard_tx_push(). + void* user_transfer_reference; +} udpard_tx_feedback_t; + /// The TX frame ejection handler returns one of these results to guide the udpard_tx_poll() logic. typedef enum udpard_tx_eject_result_t { @@ -312,33 +332,38 @@ typedef enum udpard_tx_eject_result_t udpard_tx_eject_failed, ///< An unrecoverable error occurred while submitting the frame; drop it from the TX queue. } udpard_tx_eject_result_t; +typedef struct udpard_tx_ejection_t +{ + udpard_us_t now; + + /// Specifies when the frame should be considered expired and dropped if not yet transmitted; + /// it is optional to use depending on the implementation of the NIC driver (most traditional drivers ignore it). + udpard_us_t deadline; + + uint_fast8_t dscp; ///< Set the DSCP field of the outgoing packet to this. + udpard_udpip_ep_t destination; ///< Unicast or multicast UDP/IP endpoint. + + /// If the result is udpard_tx_eject_success, the application is responsible for freeing the datagram_origin.data + /// using self->memory.payload.free() at some point in the future (either within the callback or later), + /// unless datagram_origin.data is NULL, in which case the library will retain the ownership. + /// It may help to know that the view is a small fixed offset greater than the origin, + /// so both may not have to be kept, depending on the implementation. + udpard_bytes_t datagram_view; ///< Transmit this; do not free it. + udpard_bytes_mut_t datagram_origin; ///< Free this unless NULL. + + /// This is the same pointer that was passed to udpard_tx_push(). + void* user_transfer_reference; +} udpard_tx_ejection_t; + typedef struct udpard_tx_vtable_t { /// Invoked from udpard_tx_poll() to push outgoing UDP datagrams into the socket/NIC driver. - /// The deadline specifies when the frame should be considered expired and dropped if not yet transmitted; - /// it is optional to use depending on the implementation of the NIC driver (most traditional drivers ignore it). - /// If the result is udpard_tx_eject_success, the application is responsible for freeing the datagram_payload.data - /// using self->memory.payload.free() at some point in the future (either within the callback or later). - udpard_tx_eject_result_t (*eject)(udpard_tx_t* const self, - const udpard_us_t now, - const udpard_us_t deadline, - const uint_fast8_t dscp, - const udpard_udpip_ep_t destination, - const udpard_bytes_t datagram_view, ///< Transmit this. Do not free() this. - const udpard_bytes_mut_t datagram_origin, ///< free() only this. - void* const user_transfer_reference); + udpard_tx_eject_result_t (*eject)(udpard_tx_t*, udpard_tx_ejection_t); /// Invoked from udpard_tx_poll() to report the result of reliable transfer transmission attempts. - /// This is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push(); + /// This is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push() successfully; /// this is NOT invoked for best-effort (non-reliable) transfers. - /// The user_transfer_reference is the same pointer that was passed to udpard_tx_push(). - /// The 'ok' flag is true if the transfer has been successfully confirmed by the remote end, false if timed out. - void (*feedback)(udpard_tx_t* const self, - const uint64_t topic_hash, - const uint32_t transfer_id, - const udpard_udpip_ep_t remote_ep, - void* const user_transfer_reference, - const bool ok); + void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t); } udpard_tx_vtable_t; /// The transmission pipeline is a prioritized transmission queue that keeps UDP datagrams (aka transport frames) From 09d8ae86d3904ca5b4655804af71f87b910cebdc Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Thu, 25 Dec 2025 20:07:16 +0200 Subject: [PATCH 06/42] wip --- libudpard/udpard.c | 93 ++++++++++++++++++++++++++++++++++++++-------- libudpard/udpard.h | 8 ++-- 2 files changed, 82 insertions(+), 19 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 694c9c6..e5fc391 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -470,20 +470,34 @@ static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, // --------------------------------------------------------------------------------------------------------------------- /// This may be allocated in the NIC DMA region so we keep overheads tight. +/// An alternative solution is to allocate a flex array of void* pointers, one per fragment, directly in tx_transfer_t, +/// but it might create a bit more memory pressure on average. typedef struct tx_frame_t { struct tx_frame_t* next; byte_t data[]; } tx_frame_t; -static size_t tx_frame_size(const size_t mtu) { return sizeof(tx_frame_t) + mtu + HEADER_SIZE_BYTES; } +static size_t tx_frame_object_size(const size_t mtu) { return sizeof(tx_frame_t) + mtu + HEADER_SIZE_BYTES; } +typedef struct +{ + uint64_t topic_hash; + uint64_t transfer_id; +} tx_transfer_key_t; + +/// The transmission scheduler maintains several indexes for the transfers in the pipeline. +/// All index operations are logarithmic in the number of scheduled transfers. +/// The priority index only contains transfers that are ready for transmission (now>=ready_at). +/// The readiness index contains only transfers that are postponed (now= (int)CAVL2_TO_OWNER(node, tx_frame_t, index_order)->priority) ? +1 - : -1; + const udpard_prio_t key = *(const udpard_prio_t*)user; + const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_priority); + return (key >= tr->priority) ? +1 : -1; // higher prio is numerically less +} +static int32_t tx_cavl_compare_readiness(const void* const user, const udpard_tree_t* const node) +{ + return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_transfer_t, index_readiness)->ready_at) ? +1 : -1; } - static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node) { - return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_frame_t, index_deadline)->deadline) ? +1 : -1; + return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_transfer_t, index_deadline)->deadline) ? +1 : -1; +} +static int32_t tx_cavl_compare_transfer(const void* const user, const udpard_tree_t* const node) +{ + const tx_transfer_key_t* const key = (const tx_transfer_key_t*)user; + const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer); // clang-format off + if (key->topic_hash < tr->topic_hash) { return -1; } + if (key->topic_hash > tr->topic_hash) { return +1; } + if (key->transfer_id < tr->transfer_id) { return -1; } + if (key->transfer_id > tr->transfer_id) { return +1; } + return 0; // clang-format on } /// Returns the head of the transfer chain; NULL on OOM. @@ -559,7 +586,7 @@ static tx_frame_t* tx_spool(const udpard_tx_mem_resources_t memory, if (NULL == tail) { while (head != NULL) { tx_frame_t* const next = head->next; - mem_free(memory.payload, tx_frame_size((head == tail) ? progress : mtu), head); + mem_free(memory.payload, tx_frame_object_size((head == tail) ? progress : mtu), head); head = next; } break; @@ -587,7 +614,42 @@ static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_ return baseline * (1L << smaller((size_t)prio + retries, UDPARD_TX_RETRY_MAX)); // NOLINT(*-signed-bitwise) } +static void tx_insert(udpard_tx_t* const tx, tx_transfer_t* const tr, const udpard_us_t now) +{ + const bool ready = now >= tr->ready_at; + if (ready) { + if (cavl2_is_inserted(tx->index_readiness, &tr->index_readiness)) { + cavl2_remove(&tx->index_readiness, &tr->index_readiness); + } + if (!cavl2_is_inserted(tx->index_priority, &tr->index_priority)) { + (void)cavl2_find_or_insert( + &tx->index_priority, &tr->priority, tx_cavl_compare_priority, &tr->index_priority, cavl2_trivial_factory); + } + } else { + if (cavl2_is_inserted(tx->index_priority, &tr->index_priority)) { + cavl2_remove(&tx->index_priority, &tr->index_priority); + } + if (!cavl2_is_inserted(tx->index_readiness, &tr->index_readiness)) { + (void)cavl2_find_or_insert(&tx->index_readiness, + &tr->ready_at, + tx_cavl_compare_readiness, + &tr->index_readiness, + cavl2_trivial_factory); + } + } + if (!cavl2_is_inserted(tx->index_transfer, &tr->index_transfer)) { + const tx_transfer_key_t key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; + (void)cavl2_find_or_insert( + &tx->index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); + } + if (!cavl2_is_inserted(tx->index_deadline, &tr->index_deadline)) { + (void)cavl2_find_or_insert( + &tx->index_deadline, &tr->deadline, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); + } +} + static uint32_t tx_push(udpard_tx_t* const tx, + const udpard_us_t now, const udpard_us_t deadline, const meta_t meta, const udpard_udpip_ep_t endpoint, @@ -607,7 +669,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, if (tr != NULL) { mem_zero(sizeof(*tr), tr); tr->retries = 0; - tr->next_attempt_at = BIG_BANG; // TODO: we can implement time-triggered comms here. + tr->ready_at = BIG_BANG; // We can implement time-triggered comms here. tr->mtu = mtu; tr->mtu_last = mtu_last; tr->topic_hash = meta.topic_hash; @@ -619,8 +681,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, tr->user_transfer_reference = user_transfer_reference; tr->head = tr->cursor = tx_spool(tx->memory, mtu, meta, payload); if (tr->head != NULL) { - // TODO: insert - // Finalize + tx_insert(tx, tr, now); tx->queue_size += n_frames; UDPARD_ASSERT(tx->queue_size <= tx->queue_capacity); out = (uint32_t)n_frames; diff --git a/libudpard/udpard.h b/libudpard/udpard.h index b059266..01bcbf4 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -418,9 +418,11 @@ struct udpard_tx_t uint64_t errors_capacity; ///< A transfer could not be enqueued due to queue capacity limit. uint64_t errors_expiration; ///< A frame had to be dropped due to premature deadline expiration. - /// Internal use only, do not modify! - udpard_tree_t* index_order; ///< Most urgent on the left, then according to the insertion order. - udpard_tree_t* index_deadline; ///< Soonest on the left, then according to the insertion order. + /// Internal use only, do not modify! See tx_transfer_t for details. + udpard_tree_t* index_priority; + udpard_tree_t* index_readiness; + udpard_tree_t* index_deadline; + udpard_tree_t* index_transfer; /// Opaque pointer for the application use only. Not accessed by the library. void* user; From f379a5e75516366eb1a094fdebc6cd07867500e8 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Fri, 26 Dec 2025 17:58:07 +0200 Subject: [PATCH 07/42] advance --- libudpard/udpard.c | 344 +++++++++++++++++++++++++++++---------------- libudpard/udpard.h | 72 +++++----- 2 files changed, 259 insertions(+), 157 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index e5fc391..357a3e6 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -48,7 +48,7 @@ typedef unsigned char byte_t; ///< For compatibility with platforms where byte s /// The maximum number of incoming transfers that can be in the state of incomplete reassembly simultaneously. /// Additional transfers will replace the oldest ones. /// This number should normally be at least as large as there are priority levels. More is fine but rarely useful. -#define RX_SLOT_COUNT (UDPARD_PRIORITY_MAX + 1U) +#define RX_SLOT_COUNT UDPARD_PRIORITY_COUNT /// The number of most recent transfers to keep in the history for ACK retransmission and duplicate detection. /// Should be a power of two to allow replacement of modulo operation with a bitwise AND. @@ -487,28 +487,32 @@ typedef struct } tx_transfer_key_t; /// The transmission scheduler maintains several indexes for the transfers in the pipeline. -/// All index operations are logarithmic in the number of scheduled transfers. -/// The priority index only contains transfers that are ready for transmission (now>=ready_at). -/// The readiness index contains only transfers that are postponed (now=deadline, this is the last attempt; frames can be freed as they go out. /// All frames except for the last one share the same MTU, so there's no use keeping dedicated size per frame. size_t mtu; @@ -522,6 +526,8 @@ typedef struct tx_transfer_t udpard_prio_t priority; udpard_udpip_ep_t destination; void* user_transfer_reference; + + void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t); } tx_transfer_t; static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) @@ -530,15 +536,37 @@ static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) (memory.payload.alloc != NULL) && (memory.payload.free != NULL); } -static int32_t tx_cavl_compare_priority(const void* const user, const udpard_tree_t* const node) +static void tx_transfer_free_payload(const udpard_tx_mem_resources_t mem, tx_transfer_t* const tr) { - const udpard_prio_t key = *(const udpard_prio_t*)user; - const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_priority); - return (key >= tr->priority) ? +1 : -1; // higher prio is numerically less + UDPARD_ASSERT(tr != NULL); + tx_frame_t* frame = tr->head; + while (frame != NULL) { + tx_frame_t* const next = frame->next; + const size_t mtu = (frame->next == NULL) ? tr->mtu_last : tr->mtu; + mem_free(mem.payload, tx_frame_object_size(mtu), frame); + frame = next; + } + tr->head = NULL; + tr->cursor = NULL; } -static int32_t tx_cavl_compare_readiness(const void* const user, const udpard_tree_t* const node) + +static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) +{ + UDPARD_ASSERT(tr != NULL); + tx_transfer_free_payload(tx->memory, tr); + // Remove the transfer from all indexes. + delist(&tx->queue[tr->priority], &tr->queue); + if (cavl2_is_inserted(tx->index_staged, &tr->index_staged)) { + cavl2_remove(&tx->index_staged, &tr->index_staged); + } + cavl2_remove(&tx->index_deadline, &tr->index_deadline); + cavl2_remove(&tx->index_transfer, &tr->index_transfer); + mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); +} + +static int32_t tx_cavl_compare_staged(const void* const user, const udpard_tree_t* const node) { - return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_transfer_t, index_readiness)->ready_at) ? +1 : -1; + return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_transfer_t, index_staged)->retry_at) ? +1 : -1; } static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node) { @@ -555,6 +583,13 @@ static int32_t tx_cavl_compare_transfer(const void* const user, const udpard_tre return 0; // clang-format on } +static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t topic_hash, const uint64_t transfer_id) +{ + const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = transfer_id }; + return CAVL2_TO_OWNER( + cavl2_find(tx->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer); +} + /// Returns the head of the transfer chain; NULL on OOM. static tx_frame_t* tx_spool(const udpard_tx_mem_resources_t memory, const size_t mtu, @@ -608,44 +643,9 @@ static tx_frame_t* tx_spool(const udpard_tx_mem_resources_t memory, } /// Derives the ack timeout for an outgoing transfer. -/// The number of retries is initially zero when the transfer is sent for the first time. -static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const uint_fast8_t retries) -{ - return baseline * (1L << smaller((size_t)prio + retries, UDPARD_TX_RETRY_MAX)); // NOLINT(*-signed-bitwise) -} - -static void tx_insert(udpard_tx_t* const tx, tx_transfer_t* const tr, const udpard_us_t now) +static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const uint_fast8_t attempts) { - const bool ready = now >= tr->ready_at; - if (ready) { - if (cavl2_is_inserted(tx->index_readiness, &tr->index_readiness)) { - cavl2_remove(&tx->index_readiness, &tr->index_readiness); - } - if (!cavl2_is_inserted(tx->index_priority, &tr->index_priority)) { - (void)cavl2_find_or_insert( - &tx->index_priority, &tr->priority, tx_cavl_compare_priority, &tr->index_priority, cavl2_trivial_factory); - } - } else { - if (cavl2_is_inserted(tx->index_priority, &tr->index_priority)) { - cavl2_remove(&tx->index_priority, &tr->index_priority); - } - if (!cavl2_is_inserted(tx->index_readiness, &tr->index_readiness)) { - (void)cavl2_find_or_insert(&tx->index_readiness, - &tr->ready_at, - tx_cavl_compare_readiness, - &tr->index_readiness, - cavl2_trivial_factory); - } - } - if (!cavl2_is_inserted(tx->index_transfer, &tr->index_transfer)) { - const tx_transfer_key_t key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; - (void)cavl2_find_or_insert( - &tx->index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); - } - if (!cavl2_is_inserted(tx->index_deadline, &tr->index_deadline)) { - (void)cavl2_find_or_insert( - &tx->index_deadline, &tr->deadline, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); - } + return baseline * (1L << smaller((size_t)prio + attempts, 62)); // NOLINT(*-signed-bitwise) } static uint32_t tx_push(udpard_tx_t* const tx, @@ -654,22 +654,26 @@ static uint32_t tx_push(udpard_tx_t* const tx, const meta_t meta, const udpard_udpip_ep_t endpoint, const udpard_bytes_t payload, - void* const user_transfer_reference) + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), + void* const user_transfer_reference) { + UDPARD_ASSERT(now <= deadline); UDPARD_ASSERT(tx != NULL); uint32_t out = 0; // The number of frames enqueued; zero on error (error counters incremented). const size_t payload_size = payload.size; const size_t mtu = larger(tx->mtu, UDPARD_MTU_MIN); const size_t mtu_last = ((payload_size % mtu) != 0U) ? (payload_size % mtu) : mtu; const size_t n_frames = larger(1, (payload_size + mtu - 1U) / mtu); - if ((tx->queue_size + n_frames) > tx->queue_capacity) { + if ((tx->enqueued_frames_count + n_frames) > tx->enqueued_frames_limit) { tx->errors_capacity++; } else { tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t)); if (tr != NULL) { mem_zero(sizeof(*tr), tr); - tr->retries = 0; - tr->ready_at = BIG_BANG; // We can implement time-triggered comms here. + tr->attempts = 0; + tr->retry_at = meta.flag_ack // + ? (now + tx_ack_timeout(tx->ack_baseline_timeout, meta.priority, 0)) + : HEAT_DEATH; tr->mtu = mtu; tr->mtu_last = mtu_last; tr->topic_hash = meta.topic_hash; @@ -679,11 +683,24 @@ static uint32_t tx_push(udpard_tx_t* const tx, tr->priority = meta.priority; tr->destination = endpoint; tr->user_transfer_reference = user_transfer_reference; + tr->feedback = feedback; tr->head = tr->cursor = tx_spool(tx->memory, mtu, meta, payload); if (tr->head != NULL) { - tx_insert(tx, tr, now); - tx->queue_size += n_frames; - UDPARD_ASSERT(tx->queue_size <= tx->queue_capacity); + // Schedule the transfer for transmission. + enlist_head(&tx->queue[tr->priority], &tr->queue); + const tx_transfer_key_t key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; + (void)cavl2_find_or_insert(&tx->index_transfer, // + &key, + tx_cavl_compare_transfer, + &tr->index_transfer, + cavl2_trivial_factory); + (void)cavl2_find_or_insert(&tx->index_deadline, + &tr->deadline, + tx_cavl_compare_deadline, + &tr->index_deadline, + cavl2_trivial_factory); + tx->enqueued_frames_count += n_frames; + UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); out = (uint32_t)n_frames; } else { // The queue is large enough but we ran out of heap memory. tx->errors_oom++; @@ -696,33 +713,6 @@ static uint32_t tx_push(udpard_tx_t* const tx, return out; } -static uint64_t tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) -{ - uint64_t count = 0; - for (udpard_tree_t* p = cavl2_min(self->index_deadline); p != NULL;) { - tx_frame_t* const item = CAVL2_TO_OWNER(p, tx_frame_t, index_deadline); - if (item->deadline >= now) { - break; - } - udpard_tree_t* const next = cavl2_next_greater(p); // Get next before removing current node from tree. - // Remove from both indices. - cavl2_remove(&self->index_deadline, &item->index_deadline); - cavl2_remove(&self->index_order, &item->index_order); - // Free the entire transfer chain. - tx_frame_t* current = item; - while (current != NULL) { - tx_frame_t* const next_in_transfer = current->next; - mem_free(self->memory.payload, current->datagram_payload.size, current->datagram_payload.data); - mem_free(self->memory.fragment, sizeof(tx_frame_t), current); - current = next_in_transfer; - count++; - self->queue_size--; - } - p = next; - } - return count; -} - /// Handle an ACK received from a remote node. /// This is where we dequeue pending transmissions and invoke the feedback callback. static void tx_receive_ack(udpard_rx_t* const rx, @@ -782,24 +772,29 @@ static void tx_send_ack(udpard_rx_t* const rx, bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, - const size_t queue_capacity, + const size_t enqueued_frames_limit, const udpard_tx_mem_resources_t memory, const udpard_tx_vtable_t* const vtable) { const bool ok = (NULL != self) && (local_uid != 0) && tx_validate_mem_resources(memory) && (vtable != NULL) && - (vtable->eject != NULL) && (vtable->feedback != NULL); + (vtable->eject != NULL); if (ok) { mem_zero(sizeof(*self), self); - self->vtable = vtable; - self->local_uid = local_uid; - self->queue_capacity = queue_capacity; - self->mtu = UDPARD_MTU_DEFAULT; - self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us; - self->memory = memory; - self->queue_size = 0; - self->index_order = NULL; - self->index_deadline = NULL; - self->user = NULL; + self->vtable = vtable; + self->local_uid = local_uid; + self->mtu = UDPARD_MTU_DEFAULT; + self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us; + self->enqueued_frames_limit = enqueued_frames_limit; + self->enqueued_frames_count = 0; + self->memory = memory; + for (size_t i = 0; i < UDPARD_PRIORITY_COUNT; i++) { + self->queue[i].head = NULL; + self->queue[i].tail = NULL; + } + self->index_staged = NULL; + self->index_deadline = NULL; + self->index_transfer = NULL; + self->user = NULL; } return ok; } @@ -812,45 +807,158 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_udpip_ep_t remote_ep, const uint64_t transfer_id, const udpard_bytes_t payload, - const bool reliable, - void* const user_transfer_reference) + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), + void* const user_transfer_reference) { uint32_t out = 0; const bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && udpard_is_valid_endpoint(remote_ep) && (priority <= UDPARD_PRIORITY_MAX) && - ((payload.data != NULL) || (payload.size == 0U)); + ((payload.data != NULL) || (payload.size == 0U)) && + (tx_transfer_find(self, topic_hash, transfer_id) == NULL); if (ok) { - udpard_tx_poll(self, now); // Free up expired transfers before attempting to enqueue a new one. + // Before attempting to enqueue a new transfer, we need to update the transmission scheduler. + // It may release some items from the tx queue, and it may also promote some staged transfers to the queue. + udpard_tx_poll(self, now); const meta_t meta = { .priority = priority, - .flag_ack = reliable, + .flag_ack = feedback != NULL, .transfer_payload_size = (uint32_t)payload.size, .transfer_id = transfer_id, .sender_uid = self->local_uid, .topic_hash = topic_hash, }; - out = tx_push(self, deadline, meta, remote_ep, payload, user_transfer_reference); + out = tx_push(self, now, deadline, meta, remote_ep, payload, feedback, user_transfer_reference); } return out; } -void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now) +static void tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) { - if ((self != NULL) && (now >= 0)) { - self->errors_expiration += tx_purge_expired(self, now); - while (self->queue_size > 0) { - // TODO fetch the next scheduled frame and invoke the eject callback - break; // Remove this when implemented + while (true) { // we can use next_greater instead of doing min search every time + tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_deadline), tx_transfer_t, index_deadline); + if ((tr != NULL) && (now > tr->deadline)) { + const udpard_tx_feedback_t fb = { + .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id, + .remote_ep = tr->destination, + .user_transfer_reference = tr->user_transfer_reference, + .attempts = tr->attempts, + .success = false, + }; + tx_transfer_free_payload(self->memory, tr); // do this early to release memory before callback + if (tr->feedback != NULL) { + tr->feedback(self, fb); + } + tx_transfer_free(self, tr); + self->errors_expiration++; + } else { + break; + } + } +} + +static void tx_promote_staged(udpard_tx_t* const self, const udpard_us_t now) +{ + while (true) { // we can use next_greater instead of doing min search every time + tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_staged), tx_transfer_t, index_staged); + if ((tr != NULL) && (now >= tr->retry_at)) { + UDPARD_ASSERT(tr->cursor != NULL); // cannot stage without payload, doesn't make sense + // Update the state for the next retransmission. + tr->retry_at += tx_ack_timeout(self->ack_baseline_timeout, tr->priority, tr->attempts); + UDPARD_ASSERT(tr->cursor == tr->head); + // Remove from the staged index and add to the transmission queue. + enlist_head(&self->queue[tr->priority], &tr->queue); + cavl2_remove(&self->index_staged, &tr->index_staged); + } else { + break; } } } +static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) +{ + while (true) { + // Find the highest-priority pending transfer. + tx_transfer_t* tr = NULL; + for (size_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { // dear compiler, please unroll + tx_transfer_t* const candidate = LIST_TAIL(self->queue[prio], tx_transfer_t, queue); + if (candidate != NULL) { + tr = candidate; + break; + } + } + if (tr == NULL) { + break; // No pending transfers at the moment. Find something else to do. + } + UDPARD_ASSERT(!cavl2_is_inserted(self->index_staged, &tr->index_staged)); + UDPARD_ASSERT(tr->cursor != NULL); // cannot be pending without payload, doesn't make sense + + // Compute the auxiliary states that will guide the ejection. + tx_frame_t* const frame = tr->cursor; + tx_frame_t* const frame_next = frame->next; + const bool last_attempt = tr->deadline <= tr->retry_at; + const bool last_frame = frame_next == NULL; // if not last attempt we will have to rewind to head + const size_t frame_size = last_frame ? tr->mtu_last : tr->mtu; + // Transfer ownership to the application if no further attempts will be made to reduce queue/memory pressure. + const udpard_bytes_mut_t frame_origin = { .size = last_attempt ? tx_frame_object_size(frame_size) : 0U, + .data = last_attempt ? frame : NULL }; + + // Eject the frame. + const udpard_tx_ejection_t ejection = { + .now = now, + .deadline = tr->deadline, + .dscp = self->dscp_value_per_priority[tr->priority], + .destination = tr->destination, + .datagram_view = { .size = HEADER_SIZE_BYTES + frame_size, .data = frame->data }, + .datagram_origin = frame_origin, + .user_transfer_reference = tr->user_transfer_reference, + }; + if (!self->vtable->eject(self, ejection)) { // The easy case -- no progress was made at this time; + break; // don't change anything, just try again later as-is + } + + // Frame ejected successfully. Update the transfer state to get ready for the next frame. + if (last_attempt) { // no need to keep frames that we will no longer use; free early to reduce pressure + UDPARD_ASSERT(tr->head == tr->cursor); // They go together on the last attempt. + tr->head = frame_next; + self->enqueued_frames_count--; // Ownership transferred to the application. + } + + // Finalize the transmission if this was the last frame of the transfer. + if (last_frame) { + tr->cursor = tr->head; + tr->attempts++; + delist(&self->queue[tr->priority], &tr->queue); // no longer pending for transmission + if (last_attempt) { + if (tr->feedback == NULL) { // Best-effort transfers are removed immediately. + tx_transfer_free(self, tr); + } + // If this is the last attempt of a reliable transfer, it will wait for ack or expiration. + } else { // Reinsert into the staged index for later retransmission if not acknowledged. + cavl2_find_or_insert( + &self->index_staged, &tr->retry_at, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); + } + } else { + tr->cursor = frame_next; + } + } +} + +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now) +{ + if ((self != NULL) && (now >= 0)) { // This is the main scheduler state machine update tick. + tx_purge_expired(self, now); // This may free up some memory and some queue slots. + tx_promote_staged(self, now); // This may add some new transfers to the queue. + tx_eject_pending(self, now); // The queue is now up to date and we can try to eject some frames. + } +} + void udpard_tx_free(udpard_tx_t* const self) { if (self != NULL) { - // TODO: do this for all items in the queue: - // mem_free(memory.payload, item->datagram_payload.size, item->datagram_payload.data); - // mem_free(memory.fragment, sizeof(tx_frame_t), item); + while (self->index_transfer != NULL) { + tx_transfer_free(self, (tx_transfer_t*)self->index_transfer); + } } } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 01bcbf4..4c22974 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -95,15 +95,13 @@ typedef int64_t udpard_us_t; /// See udpard_tx_t::ack_baseline_timeout. #define UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us 16000LL -/// The maximum number of transmission attempts for a transfer is capped at this value irrespective of other settings. -#define UDPARD_TX_RETRY_MAX 31U - /// The subject-ID only affects the formation of the multicast UDP/IP endpoint address. /// In IPv4 networks, it is limited to 23 bits only due to the limited MAC multicast address space. /// In IPv6 networks, 32 bits are supported. #define UDPARD_IPv4_SUBJECT_ID_MAX 0x7FFFFFUL -#define UDPARD_PRIORITY_MAX 7U +#define UDPARD_PRIORITY_MAX 7U +#define UDPARD_PRIORITY_COUNT (UDPARD_PRIORITY_MAX + 1U) typedef enum udpard_prio_t { @@ -314,24 +312,14 @@ typedef struct udpard_tx_mem_resources_t typedef struct udpard_tx_feedback_t { uint64_t topic_hash; - uint32_t transfer_id; + uint64_t transfer_id; udpard_udpip_ep_t remote_ep; + void* user_transfer_reference; ///< This is the same pointer that was passed to udpard_tx_push(). - uint_fast8_t retries; ///< The number of attempts equals retries plus one. - bool success; ///< False if no ack was received from the remote end before deadline expiration. - - /// This is the same pointer that was passed to udpard_tx_push(). - void* user_transfer_reference; + uint_fast8_t attempts; ///< Cannot overflow due to exponential backoff. 0 if timed out before first attempt. + bool success; ///< False if no ack was received from the remote end before deadline expiration. } udpard_tx_feedback_t; -/// The TX frame ejection handler returns one of these results to guide the udpard_tx_poll() logic. -typedef enum udpard_tx_eject_result_t -{ - udpard_tx_eject_success, ///< Frame submitted to NIC/socket successfully and can be removed from the TX queue. - udpard_tx_eject_blocked, ///< The NIC/socket is currently not ready to accept new frames; try again later. - udpard_tx_eject_failed, ///< An unrecoverable error occurred while submitting the frame; drop it from the TX queue. -} udpard_tx_eject_result_t; - typedef struct udpard_tx_ejection_t { udpard_us_t now; @@ -343,8 +331,8 @@ typedef struct udpard_tx_ejection_t uint_fast8_t dscp; ///< Set the DSCP field of the outgoing packet to this. udpard_udpip_ep_t destination; ///< Unicast or multicast UDP/IP endpoint. - /// If the result is udpard_tx_eject_success, the application is responsible for freeing the datagram_origin.data - /// using self->memory.payload.free() at some point in the future (either within the callback or later), + /// If the ejection handler returns success, the application is responsible for freeing the datagram_origin.data + /// using udpard_tx_t::memory.payload.free() at some point in the future (either within the callback or later), /// unless datagram_origin.data is NULL, in which case the library will retain the ownership. /// It may help to know that the view is a small fixed offset greater than the origin, /// so both may not have to be kept, depending on the implementation. @@ -358,12 +346,7 @@ typedef struct udpard_tx_ejection_t typedef struct udpard_tx_vtable_t { /// Invoked from udpard_tx_poll() to push outgoing UDP datagrams into the socket/NIC driver. - udpard_tx_eject_result_t (*eject)(udpard_tx_t*, udpard_tx_ejection_t); - - /// Invoked from udpard_tx_poll() to report the result of reliable transfer transmission attempts. - /// This is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push() successfully; - /// this is NOT invoked for best-effort (non-reliable) transfers. - void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t); + bool (*eject)(udpard_tx_t*, udpard_tx_ejection_t); } udpard_tx_vtable_t; /// The transmission pipeline is a prioritized transmission queue that keeps UDP datagrams (aka transport frames) @@ -390,10 +373,6 @@ struct udpard_tx_t /// The globally unique identifier of the local node. Must not change after initialization. uint64_t local_uid; - /// The maximum number of UDP datagrams this instance is allowed to enqueue. - /// The purpose of this limitation is to ensure that a blocked queue does not exhaust the memory. - size_t queue_capacity; - /// The maximum number of Cyphal transfer payload bytes per UDP datagram. /// The Cyphal/UDP header is added to this value to obtain the total UDP datagram payload size. See UDPARD_MTU_*. /// The value can be changed arbitrarily between enqueue operations as long as it is at least UDPARD_MTU_MIN. @@ -405,12 +384,17 @@ struct udpard_tx_t /// Optional user-managed mapping from the Cyphal priority level in [0,7] (highest priority at index 0) /// to the IP DSCP field value for use by the application when transmitting. By default, all entries are zero. - uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U]; + uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_COUNT]; - udpard_tx_mem_resources_t memory; + /// The maximum number of UDP datagrams this instance is allowed to enqueue, irrespective of the transfer count. + /// At worst, there may be one datagram per transfer, more for multi-frame transfers. + /// The purpose of this limitation is to ensure that a blocked queue does not exhaust the memory. + size_t enqueued_frames_limit; /// The number of frames that are currently contained in the queue, initially zero. READ-ONLY! - size_t queue_size; + size_t enqueued_frames_count; + + udpard_tx_mem_resources_t memory; /// Error counters incremented automatically when the corresponding error condition occurs. /// These counters are never decremented by the library but they can be reset by the application if needed. @@ -419,8 +403,8 @@ struct udpard_tx_t uint64_t errors_expiration; ///< A frame had to be dropped due to premature deadline expiration. /// Internal use only, do not modify! See tx_transfer_t for details. - udpard_tree_t* index_priority; - udpard_tree_t* index_readiness; + udpard_list_t queue[UDPARD_PRIORITY_COUNT]; + udpard_tree_t* index_staged; udpard_tree_t* index_deadline; udpard_tree_t* index_transfer; @@ -434,7 +418,7 @@ struct udpard_tx_t /// True on success, false if any of the arguments are invalid. bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, - const size_t queue_capacity, + const size_t enqueued_frames_limit, const udpard_tx_mem_resources_t memory, const udpard_tx_vtable_t* const vtable); @@ -461,6 +445,16 @@ bool udpard_tx_new(udpard_tx_t* const self, /// while invocations with invalid arguments just return zero without modifying the queue state. In all cases, /// either all frames of the transfer are enqueued successfully or none are. /// +/// An attempt to push a transfer with a (topic hash, transfer-ID) pair that is already enqueued will fail. +/// +/// The callback is invoked from udpard_tx_poll() to report the result of reliable transfer transmission attempts. +/// This is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push() successfully. +/// Set the callback to NULL for best-effort (non-acknowledged) transfers. +/// +/// Reliable transfers will keep retransmitting until either an acknowledgment is received from the remote, +/// or the deadline expires. The number of retransmissions cannot be limited directly. Each subsequent +/// retransmission timeout is doubled compared to the previous one (exponential backoff). +/// /// The memory allocation requirement is two allocations per datagram: /// a single-frame transfer takes two allocations; a multi-frame transfer of N frames takes N*2 allocations. /// In each pair of allocations: @@ -479,8 +473,8 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_udpip_ep_t remote_ep, const uint64_t transfer_id, const udpard_bytes_t payload, - const bool reliable, // Will keep retransmitting until acked or deadline reached. - void* const user_transfer_reference); + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. + void* const user_transfer_reference); /// This should be invoked whenever the socket/NIC of this queue becomes ready to accept new datagrams for transmission. /// It is fine to also invoke it periodically unconditionally to drive the transmission process. @@ -489,7 +483,7 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, /// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now); -/// Drops all enqueued items; afterward, the instance is safe to discard. +/// Drops all enqueued items; afterward, the instance is safe to discard. Callbacks will not be invoked. void udpard_tx_free(udpard_tx_t* const self); // ===================================================================================================================== From 7f8a3b0bc45581b15938154fb17303956fd00d85 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Fri, 26 Dec 2025 18:16:15 +0200 Subject: [PATCH 08/42] tx_receive_ack --- libudpard/udpard.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 357a3e6..5464f48 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -715,16 +715,32 @@ static uint32_t tx_push(udpard_tx_t* const tx, /// Handle an ACK received from a remote node. /// This is where we dequeue pending transmissions and invoke the feedback callback. +/// Acks for non-reliable transfers are ignored. static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, const uint64_t transfer_id, const udpard_remote_t remote) { - (void)rx; - (void)topic_hash; - (void)transfer_id; (void)remote; - // TODO: find the transfer in the TX queue by topic and transfer-ID and remove it; invoke the feedback callback. + for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { + udpard_tx_t* const tx = rx->tx[i]; + if (tx != NULL) { + tx_transfer_t* const tr = tx_transfer_find(tx, topic_hash, transfer_id); + if ((tr != NULL) && (tr->feedback != NULL)) { // don't match non-reliable transfers + const udpard_tx_feedback_t fb = { + .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id, + .remote_ep = tr->destination, + .user_transfer_reference = tr->user_transfer_reference, + .attempts = tr->attempts, + .success = true, + }; + tx_transfer_free_payload(tx->memory, tr); // do this early to release memory before callback + tr->feedback(tx, fb); + tx_transfer_free(tx, tr); + } + } + } } /// Generate an ack transfer for the specified remote transfer. From 98b94bd5c40df84b7dfeefc1b61755eb96421e32 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Fri, 26 Dec 2025 18:18:43 +0200 Subject: [PATCH 09/42] remove old intrusive tx tests --- tests/src/test_intrusive_tx.c | 859 +--------------------------------- 1 file changed, 5 insertions(+), 854 deletions(-) diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index 1d912ad..a4b48d2 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -7,26 +7,13 @@ #include "helpers.h" #include -static const char ethereal_strength[] = - "All was silent except for the howl of the wind against the antenna. Ye watched as the remaining birds in the " - "flock gradually settled back into the forest. She stared at the antenna and thought it looked like an enormous " - "hand stretched open toward the sky, possessing an ethereal strength."; -static const size_t ethereal_strength_size = sizeof(ethereal_strength) - 1; - -static const char detail_of_the_cosmos[] = - "For us, the dark forest state is all-important, but it's just a detail of the cosmos."; -static const size_t detail_of_the_cosmos_size = sizeof(detail_of_the_cosmos) - 1; - -static const char interstellar_war[] = "You have not seen what a true interstellar war is like."; -static const size_t interstellar_war_size = sizeof(interstellar_war) - 1; - -typedef struct -{ - byte_t data[HEADER_SIZE_BYTES]; -} header_buffer_t; - static void test_tx_serialize_header(void) { + typedef struct + { + byte_t data[HEADER_SIZE_BYTES]; + } header_buffer_t; + // Test case 1: Basic header serialization { header_buffer_t buffer; @@ -60,827 +47,6 @@ static void test_tx_serialize_header(void) } } -static void test_tx_spool_empty(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - char user_transfer_referent = '\0'; - const meta_t meta = { - .priority = udpard_prio_fast, - .flag_ack = false, - .transfer_payload_size = 0, - .transfer_id = 0xBADC0FFEE0DDF00DULL, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xFEDCBA9876543210ULL, - }; - const tx_chain_t chain = tx_spool(mem, - 30, - 1234567890, - meta, - (udpard_udpip_ep_t){ .ip = 0x0A0B0C0D, .port = 0x1234 }, - (udpard_bytes_t){ .size = 0, .data = "" }, - &user_transfer_referent); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(udpard_tx_item_t) + HEADER_SIZE_BYTES, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(1, chain.count); - TEST_ASSERT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL(NULL, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890, chain.head->deadline); - TEST_ASSERT_EQUAL(udpard_prio_fast, chain.head->priority); - TEST_ASSERT_EQUAL(0x0A0B0C0D, chain.head->destination.ip); - TEST_ASSERT_EQUAL(0x1234, chain.head->destination.port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES, chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->user_transfer_reference); - udpard_tx_free(mem, chain.head); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); -} - -static void test_tx_spool_single_max_mtu(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - char user_transfer_referent = '\0'; - const meta_t meta = { - .priority = udpard_prio_slow, - .flag_ack = false, - .transfer_payload_size = (uint32_t)detail_of_the_cosmos_size, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0xFEDCBA9876543210ULL, - .topic_hash = 0x1111111111111111ULL, - }; - const tx_chain_t chain = - tx_spool(mem, - detail_of_the_cosmos_size, - 1234567890, - meta, - (udpard_udpip_ep_t){ .ip = 0x0A0B0C00, .port = 7474 }, - (udpard_bytes_t){ .size = detail_of_the_cosmos_size, .data = detail_of_the_cosmos }, - &user_transfer_referent); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(udpard_tx_item_t) + HEADER_SIZE_BYTES + detail_of_the_cosmos_size, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(1, chain.count); - TEST_ASSERT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL(NULL, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890, chain.head->deadline); - TEST_ASSERT_EQUAL(udpard_prio_slow, chain.head->priority); - TEST_ASSERT_EQUAL(0x0A0B0C00, chain.head->destination.ip); - TEST_ASSERT_EQUAL(7474, chain.head->destination.port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + detail_of_the_cosmos_size, chain.head->datagram_payload.size); - TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->user_transfer_reference); - // Verify payload - const byte_t* payload_ptr = (const byte_t*)chain.head->datagram_payload.data + HEADER_SIZE_BYTES; - TEST_ASSERT_EQUAL(0, memcmp(detail_of_the_cosmos, payload_ptr, detail_of_the_cosmos_size)); - udpard_tx_free(mem, chain.head); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); -} - -static void test_tx_spool_single_frame_default_mtu(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - const size_t max_single_frame = UDPARD_MTU_DEFAULT; - const byte_t payload[UDPARD_MTU_DEFAULT + 1] = { 0 }; - const meta_t meta = { - .priority = udpard_prio_slow, - .flag_ack = false, - .transfer_payload_size = (uint32_t)max_single_frame, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0xAAAAAAAAAAAAAAAAULL, - .topic_hash = 0xBBBBBBBBBBBBBBBBULL, - }; - // Test: max_single_frame bytes fit in a single frame with the default MTU - { - const tx_chain_t chain = tx_spool(mem, - UDPARD_MTU_DEFAULT, - 1234567890, - meta, - (udpard_udpip_ep_t){ .ip = 0x0A0B0C00, .port = 7474 }, - (udpard_bytes_t){ .size = max_single_frame, .data = payload }, - NULL); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(sizeof(udpard_tx_item_t) + HEADER_SIZE_BYTES + max_single_frame, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(1, chain.count); - TEST_ASSERT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL(NULL, chain.head->next_in_transfer); - udpard_tx_free(mem, chain.head); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - } - // Test: Increase the payload by 1 byte and ensure it spills over - { - meta_t meta2 = meta; - meta2.transfer_payload_size = (uint32_t)(max_single_frame + 1); - const tx_chain_t chain = tx_spool(mem, - UDPARD_MTU_DEFAULT, - 1234567890, - meta2, - (udpard_udpip_ep_t){ .ip = 0x0A0B0C00, .port = 7474 }, - (udpard_bytes_t){ .size = max_single_frame + 1, .data = payload }, - NULL); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(((sizeof(udpard_tx_item_t) + HEADER_SIZE_BYTES) * 2) + max_single_frame + 1, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(2, chain.count); - TEST_ASSERT_NOT_EQUAL(chain.head, chain.tail); - TEST_ASSERT_EQUAL(chain.tail, chain.head->next_in_transfer); - TEST_ASSERT_EQUAL(NULL, chain.tail->next_in_transfer); - udpard_tx_free(mem, chain.head); - udpard_tx_free(mem, chain.tail); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - } -} - -static void test_tx_spool_three_frames(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - char user_transfer_referent = '\0'; - const meta_t meta = { - .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = (uint32_t)ethereal_strength_size, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0x1111111111111111ULL, - .topic_hash = 0x2222222222222222ULL, - }; - const size_t mtu = (ethereal_strength_size + 2U) / 3U; // Force payload split into three frames - const tx_chain_t chain = tx_spool(mem, - mtu, - 223574680, - meta, - (udpard_udpip_ep_t){ .ip = 0xBABADEDA, .port = 0xD0ED }, - (udpard_bytes_t){ .size = ethereal_strength_size, .data = ethereal_strength }, - &user_transfer_referent); - TEST_ASSERT_EQUAL(3 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL((3 * (sizeof(udpard_tx_item_t) + HEADER_SIZE_BYTES)) + ethereal_strength_size, - alloc.allocated_bytes); - TEST_ASSERT_EQUAL(3, chain.count); - udpard_tx_item_t* const first = chain.head; - TEST_ASSERT_NOT_EQUAL(NULL, first); - udpard_tx_item_t* const second = first->next_in_transfer; - TEST_ASSERT_NOT_EQUAL(NULL, second); - udpard_tx_item_t* const third = second->next_in_transfer; - TEST_ASSERT_NOT_EQUAL(NULL, third); - TEST_ASSERT_EQUAL(NULL, third->next_in_transfer); - TEST_ASSERT_EQUAL(chain.tail, third); - // Verify first frame - TEST_ASSERT_EQUAL(223574680, first->deadline); - TEST_ASSERT_EQUAL(udpard_prio_nominal, first->priority); - TEST_ASSERT_EQUAL(0xBABADEDA, first->destination.ip); - TEST_ASSERT_EQUAL(0xD0ED, first->destination.port); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + mtu, first->datagram_payload.size); - TEST_ASSERT_EQUAL(0, - memcmp(ethereal_strength, (const byte_t*)first->datagram_payload.data + HEADER_SIZE_BYTES, mtu)); - TEST_ASSERT_EQUAL(&user_transfer_referent, first->user_transfer_reference); - // Verify second frame - TEST_ASSERT_EQUAL(223574680, second->deadline); - TEST_ASSERT_EQUAL(udpard_prio_nominal, second->priority); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + mtu, second->datagram_payload.size); - TEST_ASSERT_EQUAL( - 0, memcmp(ethereal_strength + mtu, (const byte_t*)second->datagram_payload.data + HEADER_SIZE_BYTES, mtu)); - TEST_ASSERT_EQUAL(&user_transfer_referent, second->user_transfer_reference); - // Verify third frame (contains remainder) - TEST_ASSERT_EQUAL(223574680, third->deadline); - TEST_ASSERT_EQUAL(udpard_prio_nominal, third->priority); - const size_t third_payload_size = ethereal_strength_size - (2 * mtu); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES + third_payload_size, third->datagram_payload.size); - TEST_ASSERT_EQUAL(0, - memcmp(ethereal_strength + (2 * mtu), - (const byte_t*)third->datagram_payload.data + HEADER_SIZE_BYTES, - third_payload_size)); - TEST_ASSERT_EQUAL(&user_transfer_referent, third->user_transfer_reference); - udpard_tx_free(mem, first); - udpard_tx_free(mem, second); - udpard_tx_free(mem, third); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); -} - -static void test_tx_push_peek_pop_free(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10, mem)); - // Use default MTU. Create a payload that will span 3 frames. - // With MTU=1384 (default), we need payload > 2768 bytes to get 3 frames. - // Use a simple repeated pattern. - const size_t test_payload_size = 2800; - byte_t* test_payload = malloc(test_payload_size); - TEST_ASSERT_NOT_NULL(test_payload); - for (size_t i = 0; i < test_payload_size; i++) { - test_payload[i] = (byte_t)(i & 0xFFU); - } - char user_transfer_referent = '\0'; - const meta_t meta = { - .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = (uint32_t)test_payload_size, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xBBBBBBBBBBBBBBBBULL, - }; - const uint32_t enqueued = tx_push(&tx, - 1234567890U, - meta, - (udpard_udpip_ep_t){ .ip = 0xBABADEDA, .port = 0xD0ED }, - (udpard_bytes_t){ .size = test_payload_size, .data = test_payload }, - &user_transfer_referent); - free(test_payload); - TEST_ASSERT_EQUAL(3, enqueued); - TEST_ASSERT_EQUAL(3 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(3, tx.queue_size); - // Peek and pop first frame - udpard_tx_item_t* frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_NOT_EQUAL(NULL, frame->next_in_transfer); - TEST_ASSERT_EQUAL(1234567890U, frame->deadline); - TEST_ASSERT_EQUAL(udpard_prio_nominal, frame->priority); - TEST_ASSERT_EQUAL(0xBABADEDA, frame->destination.ip); - TEST_ASSERT_EQUAL(0xD0ED, frame->destination.port); - TEST_ASSERT_EQUAL(&user_transfer_referent, frame->user_transfer_reference); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(2 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(2, tx.queue_size); - // Peek and pop second frame - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_NOT_EQUAL(NULL, frame->next_in_transfer); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(1 * 2ULL, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(1, tx.queue_size); - // Peek and pop third frame - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(NULL, frame->next_in_transfer); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, tx.queue_size); - TEST_ASSERT_EQUAL(NULL, udpard_tx_peek(&tx, 0)); -} - -static void test_tx_push_prioritization(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10, mem)); - // Use default MTU (respects UDPARD_MTU_MIN). Create payloads that span multiple frames. - const size_t large_payload_size = 2800; // 3 frames at default MTU - const size_t small_payload_size = 100; // 1 frame - byte_t* large_payload = malloc(large_payload_size); - TEST_ASSERT_NOT_NULL(large_payload); - for (size_t i = 0; i < large_payload_size; i++) { - large_payload[i] = (byte_t)(i & 0xFFU); - } - // Push transfer A at nominal priority (3 frames) - const meta_t meta_a = { - .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = (uint32_t)large_payload_size, - .transfer_id = 5000, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xAAAAAAAAAAAAAAAAULL, - }; - TEST_ASSERT_EQUAL(3, - tx_push(&tx, - 0, - meta_a, - (udpard_udpip_ep_t){ .ip = 0xAAAAAAAA, .port = 0xAAAA }, - (udpard_bytes_t){ .size = large_payload_size, .data = large_payload }, - NULL)); - TEST_ASSERT_EQUAL(3, tx.queue_size); - udpard_tx_item_t* frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xAAAAAAAA, frame->destination.ip); - // Push transfer B at higher priority (single frame) - TEST_ASSERT_EQUAL(1, - tx_push(&tx, - 0, - (meta_t){ - .priority = udpard_prio_high, - .flag_ack = false, - .transfer_payload_size = (uint32_t)small_payload_size, - .transfer_id = 100000, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xBBBBBBBBBBBBBBBBULL, - }, - (udpard_udpip_ep_t){ .ip = 0xBBBBBBBB, .port = 0xBBBB }, - (udpard_bytes_t){ .size = small_payload_size, .data = large_payload }, - NULL)); - TEST_ASSERT_EQUAL(4, tx.queue_size); - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xBBBBBBBB, frame->destination.ip); // B should be first now - // Push transfer C at lower priority (single frame) - TEST_ASSERT_EQUAL(1, - tx_push(&tx, - 1002, - (meta_t){ - .priority = udpard_prio_low, - .flag_ack = false, - .transfer_payload_size = (uint32_t)small_payload_size, - .transfer_id = 10000, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xCCCCCCCCCCCCCCCCULL, - }, - (udpard_udpip_ep_t){ .ip = 0xCCCCCCCC, .port = 0xCCCC }, - (udpard_bytes_t){ .size = small_payload_size, .data = large_payload }, - NULL)); - TEST_ASSERT_EQUAL(5, tx.queue_size); - // Push transfer D at same low priority (should go after C due to FIFO) - TEST_ASSERT_EQUAL(1, - tx_push(&tx, - 1003, - (meta_t){ - .priority = udpard_prio_low, - .flag_ack = false, - .transfer_payload_size = (uint32_t)small_payload_size, - .transfer_id = 10001, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xDDDDDDDDDDDDDDDDULL, - }, - (udpard_udpip_ep_t){ .ip = 0xDDDDDDDD, .port = 0xDDDD }, - (udpard_bytes_t){ .size = small_payload_size, .data = large_payload }, - NULL)); - TEST_ASSERT_EQUAL(6, tx.queue_size); - // Push transfer E at even higher priority (single frame) - TEST_ASSERT_EQUAL(1, - tx_push(&tx, - 1003, - (meta_t){ - .priority = udpard_prio_fast, - .flag_ack = false, - .transfer_payload_size = (uint32_t)small_payload_size, - .transfer_id = 1000, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xEEEEEEEEEEEEEEEEULL, - }, - (udpard_udpip_ep_t){ .ip = 0xEEEEEEEE, .port = 0xEEEE }, - (udpard_bytes_t){ .size = small_payload_size, .data = large_payload }, - NULL)); - TEST_ASSERT_EQUAL(7, tx.queue_size); - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(0xEEEEEEEE, frame->destination.ip); // E should be first - // Now unwind the queue and verify order: E, B, A (3 frames), C, D, E - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(6, tx.queue_size); - // B - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_EQUAL(0xBBBBBBBB, frame->destination.ip); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(5, tx.queue_size); - // A1 - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_EQUAL(0xAAAAAAAA, frame->destination.ip); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(4, tx.queue_size); - // A2 - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_EQUAL(0xAAAAAAAA, frame->destination.ip); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(3, tx.queue_size); - // A3 - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_EQUAL(0xAAAAAAAA, frame->destination.ip); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(2, tx.queue_size); - // C - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_EQUAL(0xCCCCCCCC, frame->destination.ip); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(1, tx.queue_size); - // D - frame = udpard_tx_peek(&tx, 0); - TEST_ASSERT_EQUAL(0xDDDDDDDD, frame->destination.ip); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(0, tx.queue_size); - TEST_ASSERT_EQUAL(NULL, udpard_tx_peek(&tx, 0)); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - free(large_payload); -} - -static void test_tx_push_capacity_limit(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 2, mem)); // Capacity of only 2 frames - // Use default MTU. Create payload that will span 3 frames (exceeds capacity of 2). - const size_t test_payload_size = 2800; - byte_t* test_payload = malloc(test_payload_size); - TEST_ASSERT_NOT_NULL(test_payload); - for (size_t i = 0; i < test_payload_size; i++) { - test_payload[i] = (byte_t)(i & 0xFFU); - } - const meta_t meta = { - .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = (uint32_t)test_payload_size, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xBBBBBBBBBBBBBBBBULL, - }; - // Try to push a transfer that would exceed capacity (3 frames > capacity of 2) - const uint32_t enqueued = tx_push(&tx, - 1234567890U, - meta, - (udpard_udpip_ep_t){ .ip = 0xBABADEDA, .port = 0xD0ED }, - (udpard_bytes_t){ .size = test_payload_size, .data = test_payload }, - NULL); - - TEST_ASSERT_EQUAL(0, enqueued); // Should fail - TEST_ASSERT_EQUAL(1, tx.errors_capacity); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); - free(test_payload); -} - -static void test_tx_push_oom(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10000, mem)); - tx.mtu = (ethereal_strength_size + 2U) / 3U; - const meta_t meta = { - .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = (uint32_t)ethereal_strength_size, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xBBBBBBBBBBBBBBBBULL, - }; - alloc.limit_bytes = ethereal_strength_size; // Not enough for overheads - const uint32_t enqueued = tx_push(&tx, - 1234567890U, - meta, - (udpard_udpip_ep_t){ .ip = 0xBABADEDA, .port = 0xD0ED }, - (udpard_bytes_t){ .size = ethereal_strength_size, .data = ethereal_strength }, - NULL); - TEST_ASSERT_EQUAL(0, enqueued); - TEST_ASSERT_EQUAL(1, tx.errors_oom); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); -} - -static void test_tx_push_payload_oom(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10000, mem)); - tx.mtu = ethereal_strength_size; - const meta_t meta = { - .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = (uint32_t)ethereal_strength_size, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xBBBBBBBBBBBBBBBBULL, - }; - // There is memory for the item, but 1 byte short for payload - alloc.limit_bytes = sizeof(udpard_tx_item_t) + (HEADER_SIZE_BYTES + ethereal_strength_size - 1); - const uint32_t enqueued = tx_push(&tx, - 1234567890U, - meta, - (udpard_udpip_ep_t){ .ip = 0xBABADEDA, .port = 0xD0ED }, - (udpard_bytes_t){ .size = ethereal_strength_size, .data = ethereal_strength }, - NULL); - TEST_ASSERT_EQUAL(0, enqueued); - TEST_ASSERT_EQUAL(1, tx.errors_oom); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); -} - -static void test_tx_push_oom_mid_transfer(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10000, mem)); - // Create a transfer that requires multiple frames - use large payload to exceed UDPARD_MTU_MIN (460) - // Use a 1000-byte payload which will require 3 frames at MTU=460 - static const byte_t large_payload[1000] = { 0 }; - tx.mtu = 460U; // Use minimum MTU to ensure multi-frame transfer - const meta_t meta = { .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = 1000U, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xBBBBBBBBBBBBBBBBULL }; - // With MTU=460 and payload=1000: frame 0 has 460 bytes, frame 1 has 460 bytes, frame 2 has 80 bytes - // Allow first frame completely (item + payload), then fail on second frame's item allocation - // This triggers OOM during multi-frame transfer, causing rollback of the first frame - const size_t first_frame_payload_size = tx.mtu + HEADER_SIZE_BYTES; - const size_t first_frame_total = sizeof(udpard_tx_item_t) + first_frame_payload_size; - alloc.limit_bytes = first_frame_total; // Second frame's item allocation will exceed this limit - - const uint32_t enqueued = tx_push(&tx, - 1234567890U, - meta, - (udpard_udpip_ep_t){ .ip = 0xBABADEDA, .port = 0xD0ED }, - (udpard_bytes_t){ .size = 1000, .data = large_payload }, - NULL); - - // The entire transfer should fail and be rolled back - TEST_ASSERT_EQUAL(0, enqueued); - TEST_ASSERT_EQUAL(1, tx.errors_oom); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); // All memory should be freed after rollback - TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); - TEST_ASSERT_EQUAL(0, tx.queue_size); -} - -static void test_tx_publish(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10, mem)); - const uint32_t enqueued = - udpard_tx_push(&tx, - 1000000, // now - 2000000, // deadline - udpard_prio_nominal, // priority - 0x1122334455667788ULL, // topic_hash - udpard_make_subject_endpoint(123), - 0xBADC0FFEE0DDF00DULL, // transfer_id - (udpard_bytes_t){ .size = detail_of_the_cosmos_size, .data = detail_of_the_cosmos }, - false, // ack_required - NULL); - TEST_ASSERT_EQUAL(1, enqueued); - TEST_ASSERT_EQUAL(1, tx.queue_size); - udpard_tx_item_t* frame = udpard_tx_peek(&tx, 1000000); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(2000000, frame->deadline); - TEST_ASSERT_EQUAL(udpard_prio_nominal, frame->priority); - // Verify the destination is the correct multicast endpoint - const udpard_udpip_ep_t expected_ep = udpard_make_subject_endpoint(123); - TEST_ASSERT_EQUAL(expected_ep.ip, frame->destination.ip); - TEST_ASSERT_EQUAL(expected_ep.port, frame->destination.port); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); -} - -static void test_tx_p2p(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10, mem)); - const uint32_t enqueued = - udpard_tx_push(&tx, - 1000000, // now - 2000000, // deadline - udpard_prio_high, // priority - 0xFEDCBA9876543210ULL, // remote_uid - (udpard_udpip_ep_t){ .ip = 0xC0A80101, .port = 9999 }, - 0x0BADC0DE0BADC0DEULL, // transfer_id - (udpard_bytes_t){ .size = interstellar_war_size, .data = interstellar_war }, - true, // ack_required - NULL); - TEST_ASSERT_EQUAL(1, enqueued); - TEST_ASSERT_EQUAL(1, tx.queue_size); - udpard_tx_item_t* frame = udpard_tx_peek(&tx, 1000000); - TEST_ASSERT_NOT_EQUAL(NULL, frame); - TEST_ASSERT_EQUAL(2000000, frame->deadline); - TEST_ASSERT_EQUAL(udpard_prio_high, frame->priority); - TEST_ASSERT_EQUAL(0xC0A80101, frame->destination.ip); - TEST_ASSERT_EQUAL(9999, frame->destination.port); - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); -} - -static void test_tx_deadline_expiration(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10, mem)); - // Use default MTU. Create payload for 3 frames. - const size_t test_payload_size = 2800; - byte_t* test_payload = malloc(test_payload_size); - TEST_ASSERT_NOT_NULL(test_payload); - for (size_t i = 0; i < test_payload_size; i++) { - test_payload[i] = (byte_t)(i & 0xFFU); - } - // Push a transfer with a deadline in the past - const meta_t meta = { - .priority = udpard_prio_nominal, - .flag_ack = false, - .transfer_payload_size = (uint32_t)test_payload_size, - .transfer_id = 0x0123456789ABCDEFULL, - .sender_uid = 0x0123456789ABCDEFULL, - .topic_hash = 0xBBBBBBBBBBBBBBBBULL, - }; - const uint32_t enqueued = tx_push(&tx, - 1000000, // deadline in the past - meta, - (udpard_udpip_ep_t){ .ip = 0xBABADEDA, .port = 0xD0ED }, - (udpard_bytes_t){ .size = test_payload_size, .data = test_payload }, - NULL); - TEST_ASSERT_EQUAL(3, enqueued); - TEST_ASSERT_EQUAL(3, tx.queue_size); - // Try to peek with current time much later - const udpard_tx_item_t* const frame = udpard_tx_peek(&tx, 2000000); - TEST_ASSERT_EQUAL(NULL, frame); // Should be purged - TEST_ASSERT_EQUAL(0, tx.queue_size); - TEST_ASSERT_EQUAL(3, tx.errors_expiration); // All 3 frames expired - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); - free(test_payload); -} - -static void test_tx_deadline_at_current_time(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10, mem)); - const size_t test_payload_size = 100; - byte_t test_payload[100]; - for (size_t i = 0; i < test_payload_size; i++) { - test_payload[i] = (byte_t)(i & 0xFFU); - } - // Test 1: Try to publish with deadline < now (should be rejected) - uint32_t enqueued = udpard_tx_push(&tx, - 1000000, // now - 999999, // deadline in the past - udpard_prio_nominal, - 0x1122334455667788ULL, - udpard_make_subject_endpoint(123), - 0xBADC0FFEE0DDF00DULL, - (udpard_bytes_t){ .size = test_payload_size, .data = test_payload }, - false, - NULL); - TEST_ASSERT_EQUAL(0, enqueued); // Should return 0 (rejected) - TEST_ASSERT_EQUAL(0, tx.queue_size); // Nothing enqueued - // Test 2: Try to publish with deadline == now (should be accepted, as deadline >= now) - enqueued = udpard_tx_push(&tx, - 1000000, // now - 1000000, // deadline equals now - udpard_prio_nominal, - 0x1122334455667788ULL, - udpard_make_subject_endpoint(123), - 0xBADC0FFEE0DDF00DULL, - (udpard_bytes_t){ .size = test_payload_size, .data = test_payload }, - false, - NULL); - TEST_ASSERT_EQUAL(1, enqueued); // Should succeed - TEST_ASSERT_EQUAL(1, tx.queue_size); // One frame enqueued - // Test 3: Try p2p with deadline < now (should be rejected) - enqueued = udpard_tx_push(&tx, - 2000000, // now - 1999999, // deadline in the past - udpard_prio_high, - 0xFEDCBA9876543210ULL, - (udpard_udpip_ep_t){ .ip = 0xC0A80101, .port = 9999 }, - 0x0BADC0DE0BADC0DEULL, - (udpard_bytes_t){ .size = test_payload_size, .data = test_payload }, - false, - NULL); - TEST_ASSERT_EQUAL(0, enqueued); // Should return 0 (rejected) - TEST_ASSERT_EQUAL(1, tx.queue_size); // Still only 1 frame from test 2 - // Clean up - udpard_tx_item_t* frame = udpard_tx_peek(&tx, 0); - while (frame != NULL) { - udpard_tx_item_t* const next = frame->next_in_transfer; - udpard_tx_pop(&tx, frame); - udpard_tx_free(tx.memory, frame); - frame = next; - } - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); -} - -static void test_tx_invalid_params(void) -{ - instrumented_allocator_t alloc; - instrumented_allocator_new(&alloc); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&alloc), - .payload = instrumented_allocator_make_resource(&alloc), - }; - udpard_tx_t tx; - // Test invalid init params - TEST_ASSERT_FALSE(udpard_tx_new(NULL, 0x0123456789ABCDEFULL, 10, mem)); - TEST_ASSERT_FALSE(udpard_tx_new(&tx, 0, 10, mem)); // local_uid cannot be 0 - // Test with invalid memory resources - udpard_tx_mem_resources_t bad_mem = mem; - bad_mem.fragment.alloc = NULL; - TEST_ASSERT_FALSE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10, bad_mem)); - // Valid init - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0123456789ABCDEFULL, 10, mem)); - // Test publish with NULL self - TEST_ASSERT_EQUAL(0, - udpard_tx_push(NULL, - 1000000, - 2000000, - udpard_prio_nominal, - 0x1122334455667788ULL, - udpard_make_subject_endpoint(123), - 0xBADC0FFEE0DDF00DULL, - (udpard_bytes_t){ .size = 10, .data = "test" }, - false, - NULL)); - // Test publish with invalid priority - // NOLINTNEXTLINE(clang-analyzer-optin.core.EnumCastOutOfRange) - intentionally testing invalid value - const uint_fast8_t invalid_priority = UDPARD_PRIORITY_MAX + 1; - // NOLINTNEXTLINE(clang-analyzer-optin.core.EnumCastOutOfRange) - intentionally testing invalid value - TEST_ASSERT_EQUAL(0, - udpard_tx_push(&tx, - 1000000, - 2000000, - (udpard_prio_t)invalid_priority, - 0x1122334455667788ULL, - udpard_make_subject_endpoint(123), - 0xBADC0FFEE0DDF00DULL, - (udpard_bytes_t){ .size = 10, .data = "test" }, - false, - NULL)); - // Test p2p with invalid params - TEST_ASSERT_EQUAL(0, - udpard_tx_push(&tx, - 1000000, - 2000000, - udpard_prio_high, - 0xFEDCBA9876543210ULL, - (udpard_udpip_ep_t){ .ip = 0, .port = 9999 }, // ip cannot be 0 - 0x0BADC0DE0BADC0DEULL, - (udpard_bytes_t){ .size = 10, .data = "test" }, - false, - NULL)); - TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); -} - void setUp(void) {} void tearDown(void) {} @@ -889,20 +55,5 @@ int main(void) { UNITY_BEGIN(); RUN_TEST(test_tx_serialize_header); - RUN_TEST(test_tx_spool_empty); - RUN_TEST(test_tx_spool_single_max_mtu); - RUN_TEST(test_tx_spool_single_frame_default_mtu); - RUN_TEST(test_tx_spool_three_frames); - RUN_TEST(test_tx_push_peek_pop_free); - RUN_TEST(test_tx_push_prioritization); - RUN_TEST(test_tx_push_capacity_limit); - RUN_TEST(test_tx_push_oom); - RUN_TEST(test_tx_push_payload_oom); - RUN_TEST(test_tx_push_oom_mid_transfer); - RUN_TEST(test_tx_publish); - RUN_TEST(test_tx_p2p); - RUN_TEST(test_tx_deadline_expiration); - RUN_TEST(test_tx_deadline_at_current_time); - RUN_TEST(test_tx_invalid_params); return UNITY_END(); } From ecb57043abf1ea5f512643579c628d6434fcfc27 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Fri, 26 Dec 2025 19:00:25 +0200 Subject: [PATCH 10/42] refcounting --- libudpard/udpard.c | 56 ++++++++++++++++++++++++++++++++++------------ libudpard/udpard.h | 16 +++++++------ 2 files changed, 51 insertions(+), 21 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 5464f48..24326b4 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -474,12 +474,23 @@ static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, /// but it might create a bit more memory pressure on average. typedef struct tx_frame_t { + size_t refcount; ///< Buffer destroyed when refcount reaches zero. struct tx_frame_t* next; byte_t data[]; } tx_frame_t; static size_t tx_frame_object_size(const size_t mtu) { return sizeof(tx_frame_t) + mtu + HEADER_SIZE_BYTES; } +static udpard_bytes_t tx_frame_view(const tx_frame_t* const frame, const size_t mtu) +{ + return (udpard_bytes_t){ .size = mtu + HEADER_SIZE_BYTES, .data = frame->data }; +} + +static tx_frame_t* tx_frame_from_view(const udpard_bytes_t view) +{ + return (tx_frame_t*)unbias_ptr(view.data, offsetof(tx_frame_t, data)); +} + typedef struct { uint64_t topic_hash; @@ -536,14 +547,14 @@ static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) (memory.payload.alloc != NULL) && (memory.payload.free != NULL); } -static void tx_transfer_free_payload(const udpard_tx_mem_resources_t mem, tx_transfer_t* const tr) +static void tx_transfer_free_payload(udpard_tx_t* const tx, tx_transfer_t* const tr) { UDPARD_ASSERT(tr != NULL); tx_frame_t* frame = tr->head; while (frame != NULL) { tx_frame_t* const next = frame->next; const size_t mtu = (frame->next == NULL) ? tr->mtu_last : tr->mtu; - mem_free(mem.payload, tx_frame_object_size(mtu), frame); + udpard_tx_refcount_dec(tx, tx_frame_view(frame, mtu)); frame = next; } tr->head = NULL; @@ -553,7 +564,7 @@ static void tx_transfer_free_payload(const udpard_tx_mem_resources_t mem, tx_tra static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) { UDPARD_ASSERT(tr != NULL); - tx_transfer_free_payload(tx->memory, tr); + tx_transfer_free_payload(tx, tr); // Remove the transfer from all indexes. delist(&tx->queue[tr->priority], &tr->queue); if (cavl2_is_inserted(tx->index_staged, &tr->index_staged)) { @@ -627,6 +638,7 @@ static tx_frame_t* tx_spool(const udpard_tx_mem_resources_t memory, break; } // Populate the frame contents. + tail->refcount = 1; tail->next = NULL; const byte_t* const read_ptr = ((const byte_t*)payload.data) + offset; prefix_crc = crc_add(prefix_crc, progress, read_ptr); @@ -735,7 +747,7 @@ static void tx_receive_ack(udpard_rx_t* const rx, .attempts = tr->attempts, .success = true, }; - tx_transfer_free_payload(tx->memory, tr); // do this early to release memory before callback + tx_transfer_free_payload(tx, tr); // do this early to release memory before callback tr->feedback(tx, fb); tx_transfer_free(tx, tr); } @@ -861,7 +873,7 @@ static void tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) .attempts = tr->attempts, .success = false, }; - tx_transfer_free_payload(self->memory, tr); // do this early to release memory before callback + tx_transfer_free_payload(self, tr); // do this early to release memory before callback if (tr->feedback != NULL) { tr->feedback(self, fb); } @@ -914,10 +926,6 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) tx_frame_t* const frame_next = frame->next; const bool last_attempt = tr->deadline <= tr->retry_at; const bool last_frame = frame_next == NULL; // if not last attempt we will have to rewind to head - const size_t frame_size = last_frame ? tr->mtu_last : tr->mtu; - // Transfer ownership to the application if no further attempts will be made to reduce queue/memory pressure. - const udpard_bytes_mut_t frame_origin = { .size = last_attempt ? tx_frame_object_size(frame_size) : 0U, - .data = last_attempt ? frame : NULL }; // Eject the frame. const udpard_tx_ejection_t ejection = { @@ -925,8 +933,7 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) .deadline = tr->deadline, .dscp = self->dscp_value_per_priority[tr->priority], .destination = tr->destination, - .datagram_view = { .size = HEADER_SIZE_BYTES + frame_size, .data = frame->data }, - .datagram_origin = frame_origin, + .datagram = tx_frame_view(frame, last_frame ? tr->mtu_last : tr->mtu), .user_transfer_reference = tr->user_transfer_reference, }; if (!self->vtable->eject(self, ejection)) { // The easy case -- no progress was made at this time; @@ -937,8 +944,10 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) if (last_attempt) { // no need to keep frames that we will no longer use; free early to reduce pressure UDPARD_ASSERT(tr->head == tr->cursor); // They go together on the last attempt. tr->head = frame_next; - self->enqueued_frames_count--; // Ownership transferred to the application. + udpard_tx_refcount_dec(self, ejection.datagram); + self->enqueued_frames_count--; } + tr->cursor = frame_next; // Finalize the transmission if this was the last frame of the transfer. if (last_frame) { @@ -954,8 +963,6 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) cavl2_find_or_insert( &self->index_staged, &tr->retry_at, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); } - } else { - tr->cursor = frame_next; } } } @@ -969,6 +976,27 @@ void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now) } } +void udpard_tx_refcount_inc(udpard_tx_t* const self, const udpard_bytes_t datagram) +{ + if ((self != NULL) && (datagram.data != NULL)) { + tx_frame_t* const frame = tx_frame_from_view(datagram); + UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound) + frame->refcount++; + } +} + +void udpard_tx_refcount_dec(udpard_tx_t* const self, const udpard_bytes_t datagram) +{ + if ((self != NULL) && (datagram.data != NULL)) { + tx_frame_t* const frame = tx_frame_from_view(datagram); + UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound) + frame->refcount--; + if (frame->refcount == 0U) { + mem_free(self->memory.payload, tx_frame_object_size(datagram.size), frame); + } + } +} + void udpard_tx_free(udpard_tx_t* const self) { if (self != NULL) { diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 4c22974..50fd6dd 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -331,13 +331,10 @@ typedef struct udpard_tx_ejection_t uint_fast8_t dscp; ///< Set the DSCP field of the outgoing packet to this. udpard_udpip_ep_t destination; ///< Unicast or multicast UDP/IP endpoint. - /// If the ejection handler returns success, the application is responsible for freeing the datagram_origin.data - /// using udpard_tx_t::memory.payload.free() at some point in the future (either within the callback or later), - /// unless datagram_origin.data is NULL, in which case the library will retain the ownership. - /// It may help to know that the view is a small fixed offset greater than the origin, - /// so both may not have to be kept, depending on the implementation. - udpard_bytes_t datagram_view; ///< Transmit this; do not free it. - udpard_bytes_mut_t datagram_origin; ///< Free this unless NULL. + /// If the datagram pointer is retained by the application, udpard_tx_refcount_inc() must be invoked on it. + /// When no longer needed (e.g, upon transmission), udpard_tx_refcount_dec() must be invoked. + /// Ref counting is needed because the library may need to retain the buffer for subsequent retransmissions. + udpard_bytes_t datagram; /// This is the same pointer that was passed to udpard_tx_push(). void* user_transfer_reference; @@ -483,6 +480,11 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, /// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now); +/// When a datagram is ejected and the application opts to keep it, these functions must be used to manage the +/// datagram buffer lifetime. The datagram will be freed once the reference count reaches zero. +void udpard_tx_refcount_inc(udpard_tx_t* const self, const udpard_bytes_t datagram); +void udpard_tx_refcount_dec(udpard_tx_t* const self, const udpard_bytes_t datagram); + /// Drops all enqueued items; afterward, the instance is safe to discard. Callbacks will not be invoked. void udpard_tx_free(udpard_tx_t* const self); From 223d6f098c5bc612dc9336dd8055ee54897bd71e Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Fri, 26 Dec 2025 23:40:12 +0200 Subject: [PATCH 11/42] shared tx queue wip --- .clang-tidy | 1 + libudpard/udpard.c | 339 ++++++++++++++++++++++++++------------------- libudpard/udpard.h | 104 +++++--------- 3 files changed, 230 insertions(+), 214 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 6a3e3b3..62d2be1 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -34,6 +34,7 @@ Checks: >- -*-use-enum-class, -*-use-trailing-return-type, -*-deprecated-headers, + -*-avoid-c-arrays, CheckOptions: - key: readability-function-cognitive-complexity.Threshold value: '99' diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 24326b4..fce2b2d 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -87,6 +87,13 @@ static int64_t max_i64(const int64_t a, const int64_t b) { return (a > b) ? static udpard_us_t earlier(const udpard_us_t a, const udpard_us_t b) { return min_i64(a, b); } static udpard_us_t later(const udpard_us_t a, const udpard_us_t b) { return max_i64(a, b); } +/// Two memory resources are considered identical if they share the same user pointer and the same allocation function. +/// The deallocation function is intentionally excluded from the comparison. +static bool mem_same(const udpard_mem_resource_t a, const udpard_mem_resource_t b) +{ + return (a.user == b.user) && (a.alloc == b.alloc); +} + static void* mem_alloc(const udpard_mem_resource_t memory, const size_t size) { UDPARD_ASSERT(memory.alloc != NULL); @@ -173,6 +180,16 @@ bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep) return (ep.port != 0) && (ep.ip != 0) && (ep.ip != UINT32_MAX); } +static bool has_valid_endpoint(const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX]) +{ + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (udpard_is_valid_endpoint(remote_ep[i])) { + return true; + } + } + return false; +} + udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id) { return (udpard_udpip_ep_t){ .ip = IPv4_MCAST_PREFIX | (subject_id & UDPARD_IPv4_SUBJECT_ID_MAX), .port = UDP_PORT }; @@ -330,6 +347,12 @@ static uint32_t crc_full(const size_t n_bytes, const void* const data) // --------------------------------------------- LIST CONTAINER --------------------------------------------- +/// True iff the member is in the list. +static bool is_listed(const udpard_list_t* const list, const udpard_list_member_t* const member) +{ + return (member->next != NULL) || (member->prev != NULL) || (list->head == member); +} + /// No effect if not in the list. static void delist(udpard_list_t* const list, udpard_list_member_t* const member) { @@ -469,21 +492,18 @@ static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, // --------------------------------------------- TX PIPELINE --------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- -/// This may be allocated in the NIC DMA region so we keep overheads tight. -/// An alternative solution is to allocate a flex array of void* pointers, one per fragment, directly in tx_transfer_t, -/// but it might create a bit more memory pressure on average. typedef struct tx_frame_t { - size_t refcount; ///< Buffer destroyed when refcount reaches zero. - struct tx_frame_t* next; - byte_t data[]; + size_t refcount; + udpard_mem_deleter_t deleter; + struct tx_frame_t* next; + size_t size; + byte_t data[]; } tx_frame_t; -static size_t tx_frame_object_size(const size_t mtu) { return sizeof(tx_frame_t) + mtu + HEADER_SIZE_BYTES; } - -static udpard_bytes_t tx_frame_view(const tx_frame_t* const frame, const size_t mtu) +static udpard_bytes_t tx_frame_view(const tx_frame_t* const frame) { - return (udpard_bytes_t){ .size = mtu + HEADER_SIZE_BYTES, .data = frame->data }; + return (udpard_bytes_t){ .size = frame->size, .data = frame->data }; } static tx_frame_t* tx_frame_from_view(const udpard_bytes_t view) @@ -491,6 +511,18 @@ static tx_frame_t* tx_frame_from_view(const udpard_bytes_t view) return (tx_frame_t*)unbias_ptr(view.data, offsetof(tx_frame_t, data)); } +static tx_frame_t* tx_frame_new(const udpard_mem_resource_t mem, const size_t data_size) +{ + tx_frame_t* const frame = (tx_frame_t*)mem_alloc(mem, sizeof(tx_frame_t) + data_size); + if (frame != NULL) { + frame->refcount = 1U; + frame->deleter = (udpard_mem_deleter_t){ .user = mem.user, .free = mem.free }; + frame->next = NULL; + frame->size = data_size; + } + return frame; +} + typedef struct { uint64_t topic_hash; @@ -512,30 +544,26 @@ typedef struct tx_transfer_t udpard_tree_t index_staged; ///< Soonest to be ready on the left. Key: retry_at udpard_tree_t index_deadline; ///< Soonest to expire on the left. Key: deadline udpard_tree_t index_transfer; ///< Specific transfer lookup for ack management. Key: tx_transfer_key_t - udpard_list_member_t queue; ///< Listed when ready for transmission. + udpard_list_member_t queue[UDPARD_IFACE_COUNT_MAX]; ///< Listed when ready for transmission. /// We always keep a pointer to the head, plus a cursor that scans the frames during transmission. /// Both are NULL if the payload is destroyed. /// The head points to the first frame unless it is known that no (further) retransmissions are needed, /// in which case the old head is deleted and the head points to the next frame to transmit. - tx_frame_t* head; + tx_frame_t* head[UDPARD_IFACE_COUNT_MAX]; /// Mutable transmission state. All other fields, except for the index handles, are immutable. - tx_frame_t* cursor; - uint_fast8_t attempts; ///< Does not overflow due to exponential backoff. + tx_frame_t* cursor[UDPARD_IFACE_COUNT_MAX]; + uint_fast8_t attempts[UDPARD_IFACE_COUNT_MAX]; ///< Does not overflow due to exponential backoff. udpard_us_t retry_at; ///< If retry_at>=deadline, this is the last attempt; frames can be freed as they go out. - /// All frames except for the last one share the same MTU, so there's no use keeping dedicated size per frame. - size_t mtu; - size_t mtu_last; - /// Constant transfer properties supplied by the client. uint64_t topic_hash; uint64_t transfer_id; udpard_us_t deadline; bool reliable; udpard_prio_t priority; - udpard_udpip_ep_t destination; + udpard_udpip_ep_t destination[UDPARD_IFACE_COUNT_MAX]; void* user_transfer_reference; void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t); @@ -543,30 +571,36 @@ typedef struct tx_transfer_t static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) { - return (memory.transfer.alloc != NULL) && (memory.transfer.free != NULL) && // - (memory.payload.alloc != NULL) && (memory.payload.free != NULL); + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((memory.payload[i].alloc == NULL) || (memory.payload[i].free == NULL)) { + return false; + } + } + return (memory.transfer.alloc != NULL) && (memory.transfer.free != NULL); } -static void tx_transfer_free_payload(udpard_tx_t* const tx, tx_transfer_t* const tr) +static void tx_transfer_free_payload(tx_transfer_t* const tr) { UDPARD_ASSERT(tr != NULL); - tx_frame_t* frame = tr->head; - while (frame != NULL) { - tx_frame_t* const next = frame->next; - const size_t mtu = (frame->next == NULL) ? tr->mtu_last : tr->mtu; - udpard_tx_refcount_dec(tx, tx_frame_view(frame, mtu)); - frame = next; + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + const tx_frame_t* frame = tr->head[i]; + while (frame != NULL) { + const tx_frame_t* const next = frame->next; + udpard_tx_refcount_dec(tx_frame_view(frame)); + frame = next; + } + tr->head[i] = NULL; + tr->cursor[i] = NULL; } - tr->head = NULL; - tr->cursor = NULL; } static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) { UDPARD_ASSERT(tr != NULL); - tx_transfer_free_payload(tx, tr); - // Remove the transfer from all indexes. - delist(&tx->queue[tr->priority], &tr->queue); + tx_transfer_free_payload(tr); + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + delist(&tx->queue[i][tr->priority], &tr->queue[i]); + } if (cavl2_is_inserted(tx->index_staged, &tr->index_staged)) { cavl2_remove(&tx->index_staged, &tr->index_staged); } @@ -575,6 +609,33 @@ static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); } +/// When the queue is exhausted, finds a transfer to sacrifice using simple heuristics and returns it. +/// The heuristics are subject to review and improvement. +/// Will return NULL if there are no transfers worth sacrificing (no queue space can be reclaimed). +static tx_transfer_t* tx_sacrifice(udpard_tx_t* const tx) +{ + uint16_t max_attempts = 0; + tx_transfer_t* out = NULL; + // Scanning from the earliest deadline, meaning we prefer to sacrifice transfers that are the soonest to expire. + for (tx_transfer_t* tr = CAVL2_TO_OWNER(cavl2_min(tx->index_deadline), tx_transfer_t, index_deadline); tr != NULL; + tr = CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_deadline), tx_transfer_t, index_deadline)) { + uint16_t attempts = 0; + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + attempts += tr->attempts[i]; + } + if ((attempts > 0) && !tr->reliable) { // Prefer non-reliable transfers that have been transmitted once. + out = tr; + break; + } + if (attempts > max_attempts) { + tr = out; + max_attempts = attempts; + } + return out; + } + return out; +} + static int32_t tx_cavl_compare_staged(const void* const user, const udpard_tree_t* const node) { return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_transfer_t, index_staged)->retry_at) ? +1 : -1; @@ -601,11 +662,24 @@ static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t top cavl2_find(tx->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer); } +static udpard_tx_feedback_t tx_make_feedback(const tx_transfer_t* const tr, const bool success) +{ + udpard_tx_feedback_t fb = { .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id, + .user_transfer_reference = tr->user_transfer_reference, + .success = success }; + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + fb.remote_ep[i] = tr->destination[i]; + fb.attempts[i] = tr->attempts[i]; + } + return fb; +} + /// Returns the head of the transfer chain; NULL on OOM. -static tx_frame_t* tx_spool(const udpard_tx_mem_resources_t memory, - const size_t mtu, - const meta_t meta, - const udpard_bytes_t payload) +static tx_frame_t* tx_spool(const udpard_mem_resource_t memory, + const size_t mtu, + const meta_t meta, + const udpard_bytes_t payload) { UDPARD_ASSERT(mtu > 0); UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); @@ -620,7 +694,7 @@ static tx_frame_t* tx_spool(const udpard_tx_mem_resources_t memory, // Compute the size of the next frame, allocate it and link it up in the chain. const size_t progress = smaller(payload.size - offset, mtu); { - tx_frame_t* const item = mem_alloc(memory.payload, sizeof(tx_frame_t) + progress + HEADER_SIZE_BYTES); + tx_frame_t* const item = tx_frame_new(memory, progress); if (NULL == head) { head = item; } else { @@ -632,14 +706,12 @@ static tx_frame_t* tx_spool(const udpard_tx_mem_resources_t memory, if (NULL == tail) { while (head != NULL) { tx_frame_t* const next = head->next; - mem_free(memory.payload, tx_frame_object_size((head == tail) ? progress : mtu), head); + mem_free(memory, sizeof(tx_frame_t) + head->size, head); head = next; } break; } // Populate the frame contents. - tail->refcount = 1; - tail->next = NULL; const byte_t* const read_ptr = ((const byte_t*)payload.data) + offset; prefix_crc = crc_add(prefix_crc, progress, read_ptr); byte_t* const write_ptr = @@ -674,8 +746,8 @@ static uint32_t tx_push(udpard_tx_t* const tx, uint32_t out = 0; // The number of frames enqueued; zero on error (error counters incremented). const size_t payload_size = payload.size; const size_t mtu = larger(tx->mtu, UDPARD_MTU_MIN); - const size_t mtu_last = ((payload_size % mtu) != 0U) ? (payload_size % mtu) : mtu; const size_t n_frames = larger(1, (payload_size + mtu - 1U) / mtu); + // TODO: tx_sacrifice() if ((tx->enqueued_frames_count + n_frames) > tx->enqueued_frames_limit) { tx->errors_capacity++; } else { @@ -686,8 +758,6 @@ static uint32_t tx_push(udpard_tx_t* const tx, tr->retry_at = meta.flag_ack // ? (now + tx_ack_timeout(tx->ack_baseline_timeout, meta.priority, 0)) : HEAT_DEATH; - tr->mtu = mtu; - tr->mtu_last = mtu_last; tr->topic_hash = meta.topic_hash; tr->transfer_id = meta.transfer_id; tr->deadline = deadline; @@ -734,23 +804,15 @@ static void tx_receive_ack(udpard_rx_t* const rx, const udpard_remote_t remote) { (void)remote; - for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { - udpard_tx_t* const tx = rx->tx[i]; - if (tx != NULL) { - tx_transfer_t* const tr = tx_transfer_find(tx, topic_hash, transfer_id); - if ((tr != NULL) && (tr->feedback != NULL)) { // don't match non-reliable transfers - const udpard_tx_feedback_t fb = { - .topic_hash = tr->topic_hash, - .transfer_id = tr->transfer_id, - .remote_ep = tr->destination, - .user_transfer_reference = tr->user_transfer_reference, - .attempts = tr->attempts, - .success = true, - }; - tx_transfer_free_payload(tx, tr); // do this early to release memory before callback - tr->feedback(tx, fb); - tx_transfer_free(tx, tr); + if (rx->tx != NULL) { + tx_transfer_t* const tr = tx_transfer_find(rx->tx, topic_hash, transfer_id); + if ((tr != NULL) && tr->reliable) { + if (tr->feedback != NULL) { + const udpard_tx_feedback_t fb = tx_make_feedback(tr, true); + tx_transfer_free_payload(tr); // do this early to release memory before callback + tr->feedback(rx->tx, fb); } + tx_transfer_free(rx->tx, tr); } } } @@ -763,38 +825,34 @@ static void tx_send_ack(udpard_rx_t* const rx, const uint64_t transfer_id, const udpard_remote_t remote) { - // Compose the ack transfer payload. It simply contains the topic hash and the ID of the acked transfer. - byte_t header[UDPARD_P2P_HEADER_BYTES]; - byte_t* ptr = header; - *ptr++ = P2P_KIND_ACK; - ptr += 7U; // Reserved bytes. - ptr = serialize_u64(ptr, topic_hash); - ptr = serialize_u64(ptr, transfer_id); - UDPARD_ASSERT((ptr - header) == UDPARD_P2P_HEADER_BYTES); - (void)ptr; - const udpard_bytes_t payload = { .size = UDPARD_P2P_HEADER_BYTES, .data = header }; - - // Enqueue the ack transfer. - const uint64_t p2p_transfer_id = rx->p2p_transfer_id++; - for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { - udpard_tx_t* const tx = rx->tx[i]; - if ((tx != NULL) && udpard_is_valid_endpoint(remote.endpoints[i])) { - // TODO: scan the transmission queue for already pending acks; abort if one is already there. - const uint32_t count = udpard_tx_push(tx, - now, - now + ACK_TX_DEADLINE, - priority, - remote.uid, // this is a P2P transfer - remote.endpoints[i], - p2p_transfer_id, - payload, - false, - NULL); - UDPARD_ASSERT(count <= 1); - if (count != 1) { // ack is always a single-frame transfer, so we get either 0 or 1 - rx->errors_ack_tx[i]++; - } + if (rx->tx != NULL) { + byte_t header[UDPARD_P2P_HEADER_BYTES]; + byte_t* ptr = header; + *ptr++ = P2P_KIND_ACK; + ptr += 7U; // Reserved bytes. + ptr = serialize_u64(ptr, topic_hash); + ptr = serialize_u64(ptr, transfer_id); + UDPARD_ASSERT((ptr - header) == UDPARD_P2P_HEADER_BYTES); + (void)ptr; + const udpard_bytes_t payload = { .size = UDPARD_P2P_HEADER_BYTES, .data = header }; + // TODO: scan the transmission queue for already pending acks; abort if one is already there. + const uint64_t p2p_transfer_id = rx->p2p_transfer_id++; + const uint32_t count = udpard_tx_push(rx->tx, + now, + now + ACK_TX_DEADLINE, + priority, + remote.uid, // this is a P2P transfer + remote.endpoints, + p2p_transfer_id, + payload, + NULL, + NULL); + UDPARD_ASSERT(count <= 1); + if (count != 1) { // ack is always a single-frame transfer, so we get either 0 or 1 + rx->errors_ack_tx++; } + } else { + rx->errors_ack_tx++; } } @@ -810,19 +868,21 @@ bool udpard_tx_new(udpard_tx_t* const self, mem_zero(sizeof(*self), self); self->vtable = vtable; self->local_uid = local_uid; - self->mtu = UDPARD_MTU_DEFAULT; self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us; self->enqueued_frames_limit = enqueued_frames_limit; self->enqueued_frames_count = 0; self->memory = memory; - for (size_t i = 0; i < UDPARD_PRIORITY_COUNT; i++) { - self->queue[i].head = NULL; - self->queue[i].tail = NULL; + self->index_staged = NULL; + self->index_deadline = NULL; + self->index_transfer = NULL; + self->user = NULL; + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + self->mtu[i] = UDPARD_MTU_DEFAULT; + for (uint_fast8_t p = 0; p < UDPARD_PRIORITY_COUNT; p++) { + self->queue[i][p].head = NULL; + self->queue[i][p].tail = NULL; + } } - self->index_staged = NULL; - self->index_deadline = NULL; - self->index_transfer = NULL; - self->user = NULL; } return ok; } @@ -832,7 +892,7 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_us_t deadline, const udpard_prio_t priority, const uint64_t topic_hash, - const udpard_udpip_ep_t remote_ep, + const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX], const uint64_t transfer_id, const udpard_bytes_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), @@ -840,13 +900,13 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, { uint32_t out = 0; const bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && - udpard_is_valid_endpoint(remote_ep) && (priority <= UDPARD_PRIORITY_MAX) && + has_valid_endpoint(remote_ep) && (priority <= UDPARD_PRIORITY_MAX) && ((payload.data != NULL) || (payload.size == 0U)) && (tx_transfer_find(self, topic_hash, transfer_id) == NULL); if (ok) { // Before attempting to enqueue a new transfer, we need to update the transmission scheduler. // It may release some items from the tx queue, and it may also promote some staged transfers to the queue. - udpard_tx_poll(self, now); + udpard_tx_poll(self, now, UDPARD_IFACE_MASK_ALL); const meta_t meta = { .priority = priority, .flag_ack = feedback != NULL, @@ -865,16 +925,9 @@ static void tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) while (true) { // we can use next_greater instead of doing min search every time tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_deadline), tx_transfer_t, index_deadline); if ((tr != NULL) && (now > tr->deadline)) { - const udpard_tx_feedback_t fb = { - .topic_hash = tr->topic_hash, - .transfer_id = tr->transfer_id, - .remote_ep = tr->destination, - .user_transfer_reference = tr->user_transfer_reference, - .attempts = tr->attempts, - .success = false, - }; - tx_transfer_free_payload(self, tr); // do this early to release memory before callback if (tr->feedback != NULL) { + const udpard_tx_feedback_t fb = tx_make_feedback(tr, false); + tx_transfer_free_payload(tr); // do this early to release memory before callback tr->feedback(self, fb); } tx_transfer_free(self, tr); @@ -903,13 +956,13 @@ static void tx_promote_staged(udpard_tx_t* const self, const udpard_us_t now) } } -static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) +static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t ifindex) { while (true) { // Find the highest-priority pending transfer. tx_transfer_t* tr = NULL; for (size_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { // dear compiler, please unroll - tx_transfer_t* const candidate = LIST_TAIL(self->queue[prio], tx_transfer_t, queue); + tx_transfer_t* const candidate = LIST_TAIL(self->queue[ifindex][prio], tx_transfer_t, queue); if (candidate != NULL) { tr = candidate; break; @@ -922,7 +975,7 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) UDPARD_ASSERT(tr->cursor != NULL); // cannot be pending without payload, doesn't make sense // Compute the auxiliary states that will guide the ejection. - tx_frame_t* const frame = tr->cursor; + tx_frame_t* const frame = tr->cursor[ifindex]; tx_frame_t* const frame_next = frame->next; const bool last_attempt = tr->deadline <= tr->retry_at; const bool last_frame = frame_next == NULL; // if not last attempt we will have to rewind to head @@ -932,8 +985,8 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) .now = now, .deadline = tr->deadline, .dscp = self->dscp_value_per_priority[tr->priority], - .destination = tr->destination, - .datagram = tx_frame_view(frame, last_frame ? tr->mtu_last : tr->mtu), + .destination = tr->destination[ifindex], + .datagram = tx_frame_view(frame), .user_transfer_reference = tr->user_transfer_reference, }; if (!self->vtable->eject(self, ejection)) { // The easy case -- no progress was made at this time; @@ -943,20 +996,20 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) // Frame ejected successfully. Update the transfer state to get ready for the next frame. if (last_attempt) { // no need to keep frames that we will no longer use; free early to reduce pressure UDPARD_ASSERT(tr->head == tr->cursor); // They go together on the last attempt. - tr->head = frame_next; - udpard_tx_refcount_dec(self, ejection.datagram); + tr->head[ifindex] = frame_next; + udpard_tx_refcount_dec(ejection.datagram); self->enqueued_frames_count--; } - tr->cursor = frame_next; + tr->cursor[ifindex] = frame_next; // Finalize the transmission if this was the last frame of the transfer. if (last_frame) { - tr->cursor = tr->head; - tr->attempts++; - delist(&self->queue[tr->priority], &tr->queue); // no longer pending for transmission + tr->cursor[ifindex] = tr->head[ifindex]; + tr->attempts[ifindex]++; + delist(&self->queue[ifindex][tr->priority], &tr->queue[ifindex]); // no longer pending for transmission if (last_attempt) { - if (tr->feedback == NULL) { // Best-effort transfers are removed immediately. - tx_transfer_free(self, tr); + if (!tr->reliable) { // Best-effort transfers are removed immediately. + tx_transfer_free(self, tr); // We can invoke the feedback callback here if needed. } // If this is the last attempt of a reliable transfer, it will wait for ack or expiration. } else { // Reinsert into the staged index for later retransmission if not acknowledged. @@ -967,32 +1020,36 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now) } } -void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now) +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t iface_mask) { if ((self != NULL) && (now >= 0)) { // This is the main scheduler state machine update tick. tx_purge_expired(self, now); // This may free up some memory and some queue slots. tx_promote_staged(self, now); // This may add some new transfers to the queue. - tx_eject_pending(self, now); // The queue is now up to date and we can try to eject some frames. + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((iface_mask & (1U << i)) != 0U) { + tx_eject_pending(self, now, i); + } + } } } -void udpard_tx_refcount_inc(udpard_tx_t* const self, const udpard_bytes_t datagram) +void udpard_tx_refcount_inc(const udpard_bytes_t tx_payload_view) { - if ((self != NULL) && (datagram.data != NULL)) { - tx_frame_t* const frame = tx_frame_from_view(datagram); + if (tx_payload_view.data != NULL) { + tx_frame_t* const frame = tx_frame_from_view(tx_payload_view); UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound) frame->refcount++; } } -void udpard_tx_refcount_dec(udpard_tx_t* const self, const udpard_bytes_t datagram) +void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view) { - if ((self != NULL) && (datagram.data != NULL)) { - tx_frame_t* const frame = tx_frame_from_view(datagram); + if (tx_payload_view.data != NULL) { + tx_frame_t* const frame = tx_frame_from_view(tx_payload_view); UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound) frame->refcount--; if (frame->refcount == 0U) { - mem_free(self->memory.payload, tx_frame_object_size(datagram.size), frame); + frame->deleter.free(frame->deleter.user, sizeof(tx_frame_t) + tx_payload_view.size, frame); } } } @@ -1683,7 +1740,7 @@ static void rx_session_update(rx_session_t* const self, // Update the return path discovery state. // We identify nodes by their UID, allowing them to migrate across interfaces and IP addresses. - UDPARD_ASSERT(ifindex < UDPARD_NETWORK_INTERFACE_COUNT_MAX); + UDPARD_ASSERT(ifindex < UDPARD_IFACE_COUNT_MAX); self->remote.endpoints[ifindex] = src_ep; // Do-once initialization to ensure we don't lose any transfers by choosing the initial transfer-ID poorly. @@ -1863,9 +1920,7 @@ static bool rx_validate_mem_resources(const udpard_rx_mem_resources_t memory) (memory.fragment.alloc != NULL) && (memory.fragment.free != NULL); } -void udpard_rx_new(udpard_rx_t* const self, - udpard_tx_t* const tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX], - const uint64_t p2p_transfer_id_initial) +void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx, const uint64_t p2p_transfer_id_initial) { UDPARD_ASSERT(self != NULL); mem_zero(sizeof(*self), self); @@ -1874,11 +1929,9 @@ void udpard_rx_new(udpard_rx_t* const self, self->errors_oom = 0; self->errors_frame_malformed = 0; self->errors_transfer_malformed = 0; - for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { - self->tx[i] = tx[i]; - } - self->p2p_transfer_id = p2p_transfer_id_initial; - self->user = NULL; + self->tx = tx; + self->p2p_transfer_id = p2p_transfer_id_initial; + self->user = NULL; } void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now) @@ -2025,7 +2078,7 @@ bool udpard_rx_port_push(udpard_rx_t* const rx, { const bool ok = (rx != NULL) && (port != NULL) && (timestamp >= 0) && udpard_is_valid_endpoint(source_ep) && (datagram_payload.data != NULL) && (payload_deleter.free != NULL) && - (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX); + (redundant_iface_index < UDPARD_IFACE_COUNT_MAX); if (ok) { rx_frame_t frame = { 0 }; uint32_t frame_index = 0; diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 50fd6dd..3675efe 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -23,7 +23,8 @@ /// block pool allocators (may be preferable in safety-certified systems). /// If block pool allocators are used, the following block sizes should be served: /// - MTU-sized blocks for the TX and RX pipelines (typically at most 1.5 KB unless jumbo frames are used). -/// - sizeof(udpard_tx_item_t) blocks for the TX pipeline. +/// - sizeof(tx_transfer_t) blocks for the TX pipeline. +/// - sizeof(tx_frame_t) blocks for the TX pipeline. /// - sizeof(rx_session_t) blocks for the RX pipeline. /// - sizeof(udpard_fragment_t) blocks for the RX pipeline. /// @@ -73,7 +74,9 @@ extern "C" #define UDPARD_MTU_MIN 460U /// The library supports at most this many local redundant network interfaces. -#define UDPARD_NETWORK_INTERFACE_COUNT_MAX 3U +#define UDPARD_IFACE_COUNT_MAX 3U + +#define UDPARD_IFACE_MASK_ALL ((1U << UDPARD_IFACE_COUNT_MAX) - 1U) /// All P2P transfers have a fixed prefix, handled by the library transparently for the application, /// defined as follows in DSDL notation: @@ -163,7 +166,7 @@ typedef struct udpard_udpip_ep_t typedef struct udpard_remote_t { uint64_t uid; - udpard_udpip_ep_t endpoints[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; ///< Zeros in unavailable ifaces. + udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX]; ///< Zeros in unavailable ifaces. } udpard_remote_t; /// Returns true if the given UDP/IP endpoint appears to be valid. Zero port or IP are considered invalid. @@ -264,31 +267,6 @@ size_t udpard_fragment_gather(const udpard_fragment_t** cursor, // ================================================= TX PIPELINE ================================================= // ===================================================================================================================== -/// The transmission (TX) pipeline is used to publish messages and send P2P transfers to the network through a -/// particular redundant interface. A Cyphal node with R redundant network interfaces needs to instantiate -/// R transmission pipelines, one per interface, unless the application is not interested in sending data at all. -/// The transmission pipeline contains a prioritized queue of UDP datagrams scheduled for transmission via its -/// network interface. -/// -/// Each transmission pipeline instance requires one socket (or a similar abstraction provided by the underlying -/// UDP/IP stack) that is not connected to any specific remote endpoint (i.e., usable with sendto(), -/// speaking in terms of Berkeley sockets). In the case of redundant interfaces, each socket may need to be configured -/// to emit data through its specific interface (using bind() in Berkeley sockets terminology). -/// -/// Graphically, the transmission pipeline is arranged as follows: -/// -/// +---> udpard_tx_t ---> UDP SOCKET ---> REDUNDANT INTERFACE A -/// | -/// PAYLOAD ---+---> udpard_tx_t ---> UDP SOCKET ---> REDUNDANT INTERFACE B -/// | -/// +---> ... -/// -/// Applications can mark outgoing datagrams with DSCP values derived from the Cyphal transfer priority when sending -/// items pulled from a TX queue. The library itself does not touch the DSCP field but exposes the transfer priority -/// on every enqueued item so the application can apply its own mapping as needed. -/// The maximum transmission unit (MTU) can also be configured separately per TX pipeline instance. -/// Applications that are interested in maximizing their wire compatibility should not change the default MTU setting. - typedef struct udpard_tx_t udpard_tx_t; /// A TX queue uses these memory resources for allocating the enqueued items (UDP datagrams). @@ -305,7 +283,7 @@ typedef struct udpard_tx_mem_resources_t /// The UDP datagram payload buffers are allocated per frame; each buffer is of size at most /// (HEADER_SIZE+MTU+sizeof(void*)) bytes, so a trivial block pool is enough if MTU is known in advance. - udpard_mem_resource_t payload; + udpard_mem_resource_t payload[UDPARD_IFACE_COUNT_MAX]; } udpard_tx_mem_resources_t; /// Outcome notification for a reliable transfer previously scheduled for transmission. @@ -313,27 +291,28 @@ typedef struct udpard_tx_feedback_t { uint64_t topic_hash; uint64_t transfer_id; - udpard_udpip_ep_t remote_ep; + udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX]; void* user_transfer_reference; ///< This is the same pointer that was passed to udpard_tx_push(). - uint_fast8_t attempts; ///< Cannot overflow due to exponential backoff. 0 if timed out before first attempt. - bool success; ///< False if no ack was received from the remote end before deadline expiration. + uint_fast8_t attempts[UDPARD_IFACE_COUNT_MAX]; ///< 0 if timed out before first attempt. + bool success; ///< False if no ack was received from the remote end before deadline expiration. } udpard_tx_feedback_t; typedef struct udpard_tx_ejection_t { udpard_us_t now; - /// Specifies when the frame should be considered expired and dropped if not yet transmitted; + /// Specifies when the frame should be considered expired and dropped if not yet transmitted by then; /// it is optional to use depending on the implementation of the NIC driver (most traditional drivers ignore it). udpard_us_t deadline; + uint_fast8_t iface_index; ///< The interface index on which the datagram is to be transmitted. + uint_fast8_t dscp; ///< Set the DSCP field of the outgoing packet to this. udpard_udpip_ep_t destination; ///< Unicast or multicast UDP/IP endpoint. /// If the datagram pointer is retained by the application, udpard_tx_refcount_inc() must be invoked on it. /// When no longer needed (e.g, upon transmission), udpard_tx_refcount_dec() must be invoked. - /// Ref counting is needed because the library may need to retain the buffer for subsequent retransmissions. udpard_bytes_t datagram; /// This is the same pointer that was passed to udpard_tx_push(). @@ -346,23 +325,6 @@ typedef struct udpard_tx_vtable_t bool (*eject)(udpard_tx_t*, udpard_tx_ejection_t); } udpard_tx_vtable_t; -/// The transmission pipeline is a prioritized transmission queue that keeps UDP datagrams (aka transport frames) -/// destined for transmission via one network interface. -/// Applications with redundant network interfaces are expected to have one instance of this type per interface. -/// Applications that are not interested in transmission may have zero such instances. -/// -/// All operations are logarithmic in complexity on the number of enqueued items. -/// Once initialized, instances cannot be copied. -/// -/// FUTURE: Eventually we might consider adding another way of arranging the transmission pipeline where the UDP -/// datagrams ready for transmission are not enqueued into the local prioritized queue but instead are sent directly -/// to the network interface driver using a dedicated callback. The callback would accept not just a single -/// chunk of data but a list of chunks to avoid copying the source transfer payload: the header and the payload. -/// The driver would then use some form of vectorized IO or MSG_MORE/UDP_CORK to transmit the data; -/// the advantage of this approach is that up to two data copy operations are eliminated from the stack and the -/// memory allocator is not used at all. The disadvantage is that if the driver callback is blocking, -/// the application thread will be blocked as well; plus the driver will be responsible for the correct -/// prioritization of the outgoing datagrams according to the DSCP value. struct udpard_tx_t { const udpard_tx_vtable_t* vtable; @@ -373,7 +335,7 @@ struct udpard_tx_t /// The maximum number of Cyphal transfer payload bytes per UDP datagram. /// The Cyphal/UDP header is added to this value to obtain the total UDP datagram payload size. See UDPARD_MTU_*. /// The value can be changed arbitrarily between enqueue operations as long as it is at least UDPARD_MTU_MIN. - size_t mtu; + size_t mtu[UDPARD_IFACE_COUNT_MAX]; /// This duration is used to derive the acknowledgment timeout for reliable transfers in tx_ack_timeout(). /// It must be a positive number of microseconds. A sensible default is provided at initialization. @@ -383,12 +345,13 @@ struct udpard_tx_t /// to the IP DSCP field value for use by the application when transmitting. By default, all entries are zero. uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_COUNT]; - /// The maximum number of UDP datagrams this instance is allowed to enqueue, irrespective of the transfer count. - /// At worst, there may be one datagram per transfer, more for multi-frame transfers. - /// The purpose of this limitation is to ensure that a blocked queue does not exhaust the memory. + /// The maximum number of UDP datagrams irrespective of the transfer count, for all ifaces pooled. + /// The purpose of this limitation is to ensure that a blocked interface queue does not exhaust the memory. + /// When the limit is reached, the library will apply heuristics to choose which transfers to drop, + /// which may incur linear worst-case complexity in the number of enqueued transfers. size_t enqueued_frames_limit; - /// The number of frames that are currently contained in the queue, initially zero. READ-ONLY! + /// The number of frames that are currently registered in the queue, initially zero. READ-ONLY! size_t enqueued_frames_count; udpard_tx_mem_resources_t memory; @@ -400,7 +363,7 @@ struct udpard_tx_t uint64_t errors_expiration; ///< A frame had to be dropped due to premature deadline expiration. /// Internal use only, do not modify! See tx_transfer_t for details. - udpard_list_t queue[UDPARD_PRIORITY_COUNT]; + udpard_list_t queue[UDPARD_IFACE_COUNT_MAX][UDPARD_PRIORITY_COUNT]; udpard_tree_t* index_staged; udpard_tree_t* index_deadline; udpard_tree_t* index_transfer; @@ -411,7 +374,7 @@ struct udpard_tx_t /// The parameters are initialized deterministically (MTU defaults to UDPARD_MTU_DEFAULT and counters are reset) /// and can be changed later by modifying the struct fields directly. No memory allocation is going to take place -/// until the pipeline is actually written to. +/// until the first transfer is successfully pushed via udpard_tx_push(). /// True on success, false if any of the arguments are invalid. bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, @@ -467,7 +430,7 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_us_t deadline, const udpard_prio_t priority, const uint64_t topic_hash, // For P2P transfers, this is the destination's UID. - const udpard_udpip_ep_t remote_ep, + const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX], // May be invalid for some ifaces. const uint64_t transfer_id, const udpard_bytes_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. @@ -477,13 +440,15 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, /// It is fine to also invoke it periodically unconditionally to drive the transmission process. /// Internally, the function will query the scheduler for the next frame to be transmitted and will attempt /// to submit it via the eject() callback provided in the vtable. +/// The iface mask indicates which interfaces are currently available for transmission; +/// eject() will only be invoked on these interfaces. /// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. -void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now); +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t iface_mask); /// When a datagram is ejected and the application opts to keep it, these functions must be used to manage the /// datagram buffer lifetime. The datagram will be freed once the reference count reaches zero. -void udpard_tx_refcount_inc(udpard_tx_t* const self, const udpard_bytes_t datagram); -void udpard_tx_refcount_dec(udpard_tx_t* const self, const udpard_bytes_t datagram); +void udpard_tx_refcount_inc(const udpard_bytes_t tx_payload_view); +void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view); /// Drops all enqueued items; afterward, the instance is safe to discard. Callbacks will not be invoked. void udpard_tx_free(udpard_tx_t* const self); @@ -589,14 +554,13 @@ typedef struct udpard_rx_t uint64_t errors_frame_malformed; ///< A received frame was malformed and thus dropped. uint64_t errors_transfer_malformed; ///< A transfer could not be reassembled correctly. - /// Whenever an ack fails to transmit on a certain interface, the corresponding counter is incremented. + /// Whenever an ack fails to transmit, the counter is incremented. /// The specific error can be determined by checking the specific counters in the corresponding tx instance. - uint64_t errors_ack_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; + uint64_t errors_ack_tx; - /// The transmission pipelines are needed to manage ack transmission and removal of acknowledged transfers. - /// Some of the pointers can be NULL depending on the number of redundant interfaces available. - /// If the application wants to only listen, all pointers may be NULL (no acks will be sent ever). - udpard_tx_t* tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; + /// The transmission pipeline is needed to manage ack transmission and removal of acknowledged transfers. + /// If the application wants to only listen, the pointer may be NULL (no acks will be sent). + udpard_tx_t* tx; /// A random-initialized transfer-ID counter for all outgoing P2P transfers. uint64_t p2p_transfer_id; @@ -749,9 +713,7 @@ struct udpard_rx_port_p2p_t /// The RX instance holds no resources and can be destroyed at any time by simply freeing all its ports first /// using udpard_rx_port_free(), then discarding the instance itself. The self pointer must not be NULL. -void udpard_rx_new(udpard_rx_t* const self, - udpard_tx_t* const tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX], - const uint64_t p2p_transfer_id_initial); +void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx, const uint64_t p2p_transfer_id_initial); /// Must be invoked at least every few milliseconds (more often is fine) to purge timed-out sessions and eject /// received transfers when the reordering window expires. If this is invoked simultaneously with rx subscription From d6a64651ee8516cbde29aeeda86b2ca784887544 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 14:03:48 +0200 Subject: [PATCH 12/42] resolve the retry logic design challenge, a major one --- libudpard/udpard.c | 140 ++++++++++++++++++++++++--------------------- libudpard/udpard.h | 16 +++--- 2 files changed, 81 insertions(+), 75 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index fce2b2d..9675d3f 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -530,21 +530,19 @@ typedef struct } tx_transfer_key_t; /// The transmission scheduler maintains several indexes for the transfers in the pipeline. -/// All index operations are logarithmic in the number of scheduled transfers except for the pending queue, -/// where the complexity is constant. /// /// The segregated priority queue only contains transfers that are ready for transmission. -/// The staged index contains only transfers that will be ready for transmission later, ordered by readiness time. -/// Transfers that will no longer be transmitted but are retained waiting for the ack are in neither of these. -/// +/// The staged index contains transfers ordered by readiness time; +/// transfers that will no longer be transmitted but are retained waiting for the ack are in neither of these. /// The deadline index contains ALL transfers, ordered by their deadlines, used for purging expired transfers. /// The transfer index contains ALL transfers, used for lookup by (topic_hash, transfer_id). typedef struct tx_transfer_t { - udpard_tree_t index_staged; ///< Soonest to be ready on the left. Key: retry_at + udpard_tree_t index_staged; ///< Soonest to be ready on the left. Key: staged_until udpard_tree_t index_deadline; ///< Soonest to expire on the left. Key: deadline udpard_tree_t index_transfer; ///< Specific transfer lookup for ack management. Key: tx_transfer_key_t udpard_list_member_t queue[UDPARD_IFACE_COUNT_MAX]; ///< Listed when ready for transmission. + udpard_list_member_t agewise; ///< Listed when created; oldest at the tail. /// We always keep a pointer to the head, plus a cursor that scans the frames during transmission. /// Both are NULL if the payload is destroyed. @@ -554,8 +552,8 @@ typedef struct tx_transfer_t /// Mutable transmission state. All other fields, except for the index handles, are immutable. tx_frame_t* cursor[UDPARD_IFACE_COUNT_MAX]; - uint_fast8_t attempts[UDPARD_IFACE_COUNT_MAX]; ///< Does not overflow due to exponential backoff. - udpard_us_t retry_at; ///< If retry_at>=deadline, this is the last attempt; frames can be freed as they go out. + uint_fast8_t epoch; ///< Does not overflow due to exponential backoff. + udpard_us_t staged_until; ///< If staged_until>=deadline, this is the last attempt; frames can be freed as leave. /// Constant transfer properties supplied by the client. uint64_t topic_hash; @@ -586,7 +584,7 @@ static void tx_transfer_free_payload(tx_transfer_t* const tr) const tx_frame_t* frame = tr->head[i]; while (frame != NULL) { const tx_frame_t* const next = frame->next; - udpard_tx_refcount_dec(tx_frame_view(frame)); + udpard_tx_refcount_dec(tx_frame_view(frame)); // TODO FIXME frame counting! frame = next; } tr->head[i] = NULL; @@ -601,6 +599,7 @@ static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { delist(&tx->queue[i][tr->priority], &tr->queue[i]); } + delist(&tx->agewise, &tr->agewise); if (cavl2_is_inserted(tx->index_staged, &tr->index_staged)) { cavl2_remove(&tx->index_staged, &tr->index_staged); } @@ -610,35 +609,30 @@ static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) } /// When the queue is exhausted, finds a transfer to sacrifice using simple heuristics and returns it. -/// The heuristics are subject to review and improvement. /// Will return NULL if there are no transfers worth sacrificing (no queue space can be reclaimed). -static tx_transfer_t* tx_sacrifice(udpard_tx_t* const tx) -{ - uint16_t max_attempts = 0; - tx_transfer_t* out = NULL; - // Scanning from the earliest deadline, meaning we prefer to sacrifice transfers that are the soonest to expire. - for (tx_transfer_t* tr = CAVL2_TO_OWNER(cavl2_min(tx->index_deadline), tx_transfer_t, index_deadline); tr != NULL; - tr = CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_deadline), tx_transfer_t, index_deadline)) { - uint16_t attempts = 0; - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - attempts += tr->attempts[i]; - } - if ((attempts > 0) && !tr->reliable) { // Prefer non-reliable transfers that have been transmitted once. - out = tr; +/// We cannot simply stop accepting new transfers when the queue is full, because it may be caused by a single +/// stalled interface holding back progress for all transfers. +/// The heuristics are subject to review and improvement. +static tx_transfer_t* tx_sacrifice(udpard_tx_t* const tx) { return LIST_TAIL(tx->agewise, tx_transfer_t, agewise); } + +static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_frames_needed) +{ + if (total_frames_needed > tx->enqueued_frames_limit) { + return false; // not gonna happen + } + while (total_frames_needed > (tx->enqueued_frames_limit - tx->enqueued_frames_count)) { + tx_transfer_t* const victim = tx_sacrifice(tx); + if (victim == NULL) { break; } - if (attempts > max_attempts) { - tr = out; - max_attempts = attempts; - } - return out; + tx_transfer_free(tx, victim); } - return out; + return total_frames_needed <= (tx->enqueued_frames_limit - tx->enqueued_frames_count); } static int32_t tx_cavl_compare_staged(const void* const user, const udpard_tree_t* const node) { - return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_transfer_t, index_staged)->retry_at) ? +1 : -1; + return ((*(const udpard_us_t*)user) >= CAVL2_TO_OWNER(node, tx_transfer_t, index_staged)->staged_until) ? +1 : -1; } static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node) { @@ -664,14 +658,10 @@ static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t top static udpard_tx_feedback_t tx_make_feedback(const tx_transfer_t* const tr, const bool success) { - udpard_tx_feedback_t fb = { .topic_hash = tr->topic_hash, - .transfer_id = tr->transfer_id, - .user_transfer_reference = tr->user_transfer_reference, - .success = success }; - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - fb.remote_ep[i] = tr->destination[i]; - fb.attempts[i] = tr->attempts[i]; - } + const udpard_tx_feedback_t fb = { .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id, + .user_transfer_reference = tr->user_transfer_reference, + .success = success }; return fb; } @@ -747,17 +737,14 @@ static uint32_t tx_push(udpard_tx_t* const tx, const size_t payload_size = payload.size; const size_t mtu = larger(tx->mtu, UDPARD_MTU_MIN); const size_t n_frames = larger(1, (payload_size + mtu - 1U) / mtu); - // TODO: tx_sacrifice() + // TODO: tx_sacrifice() and choose duplication --- find matching allocations in the existing queues to reuse. if ((tx->enqueued_frames_count + n_frames) > tx->enqueued_frames_limit) { tx->errors_capacity++; } else { tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t)); if (tr != NULL) { mem_zero(sizeof(*tr), tr); - tr->attempts = 0; - tr->retry_at = meta.flag_ack // - ? (now + tx_ack_timeout(tx->ack_baseline_timeout, meta.priority, 0)) - : HEAT_DEATH; + tr->epoch = 0; tr->topic_hash = meta.topic_hash; tr->transfer_id = meta.transfer_id; tr->deadline = deadline; @@ -766,10 +753,22 @@ static uint32_t tx_push(udpard_tx_t* const tx, tr->destination = endpoint; tr->user_transfer_reference = user_transfer_reference; tr->feedback = feedback; + tr->staged_until = + meta.flag_ack ? (now + tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, tr->epoch)) : HEAT_DEATH; tr->head = tr->cursor = tx_spool(tx->memory, mtu, meta, payload); if (tr->head != NULL) { - // Schedule the transfer for transmission. - enlist_head(&tx->queue[tr->priority], &tr->queue); + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (udpard_is_valid_endpoint(tr->destination[i])) { + enlist_head(&tx->queue[i][tr->priority], &tr->queue[i]); + } + } + if (tr->deadline > tr->staged_until) { // only if retransmissions are possible + (void)cavl2_find_or_insert(&tx->index_staged, + &tr->staged_until, + tx_cavl_compare_staged, + &tr->index_staged, + cavl2_trivial_factory); + } const tx_transfer_key_t key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; (void)cavl2_find_or_insert(&tx->index_transfer, // &key, @@ -781,6 +780,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); + enlist_head(&tx->agewise, &tr->agewise); tx->enqueued_frames_count += n_frames; UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); out = (uint32_t)n_frames; @@ -942,14 +942,30 @@ static void tx_promote_staged(udpard_tx_t* const self, const udpard_us_t now) { while (true) { // we can use next_greater instead of doing min search every time tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_staged), tx_transfer_t, index_staged); - if ((tr != NULL) && (now >= tr->retry_at)) { + if ((tr != NULL) && (now >= tr->staged_until)) { UDPARD_ASSERT(tr->cursor != NULL); // cannot stage without payload, doesn't make sense - // Update the state for the next retransmission. - tr->retry_at += tx_ack_timeout(self->ack_baseline_timeout, tr->priority, tr->attempts); - UDPARD_ASSERT(tr->cursor == tr->head); - // Remove from the staged index and add to the transmission queue. - enlist_head(&self->queue[tr->priority], &tr->queue); + + // Reinsert into the staged index at the new position, when the next attempt is due. + // Do not insert if this is the last attempt -- no point doing that since it will not be transmitted again. cavl2_remove(&self->index_staged, &tr->index_staged); + tr->epoch++; + tr->staged_until += tx_ack_timeout(self->ack_baseline_timeout, tr->priority, tr->epoch); + if (tr->deadline > tr->staged_until) { + (void)cavl2_find_or_insert(&self->index_staged, + &tr->staged_until, + tx_cavl_compare_staged, + &tr->index_staged, + cavl2_trivial_factory); + } + + // Enqueue for transmission unless it's been there since the last attempt (stalled interface?) + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + UDPARD_ASSERT(tr->cursor[i] == tr->head[i]); + if (udpard_is_valid_endpoint(tr->destination[i]) && + !is_listed(&self->queue[i][tr->priority], &tr->queue[i])) { + enlist_head(&self->queue[i][tr->priority], &tr->queue[i]); + } + } } else { break; } @@ -974,13 +990,11 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con UDPARD_ASSERT(!cavl2_is_inserted(self->index_staged, &tr->index_staged)); UDPARD_ASSERT(tr->cursor != NULL); // cannot be pending without payload, doesn't make sense - // Compute the auxiliary states that will guide the ejection. - tx_frame_t* const frame = tr->cursor[ifindex]; - tx_frame_t* const frame_next = frame->next; - const bool last_attempt = tr->deadline <= tr->retry_at; - const bool last_frame = frame_next == NULL; // if not last attempt we will have to rewind to head - // Eject the frame. + const tx_frame_t* const frame = tr->cursor[ifindex]; + tx_frame_t* const frame_next = frame->next; + const bool last_attempt = tr->deadline <= tr->staged_until; + const bool last_frame = frame_next == NULL; // if not last attempt we will have to rewind to head. const udpard_tx_ejection_t ejection = { .now = now, .deadline = tr->deadline, @@ -1005,17 +1019,11 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con // Finalize the transmission if this was the last frame of the transfer. if (last_frame) { tr->cursor[ifindex] = tr->head[ifindex]; - tr->attempts[ifindex]++; delist(&self->queue[ifindex][tr->priority], &tr->queue[ifindex]); // no longer pending for transmission - if (last_attempt) { - if (!tr->reliable) { // Best-effort transfers are removed immediately. - tx_transfer_free(self, tr); // We can invoke the feedback callback here if needed. - } - // If this is the last attempt of a reliable transfer, it will wait for ack or expiration. - } else { // Reinsert into the staged index for later retransmission if not acknowledged. - cavl2_find_or_insert( - &self->index_staged, &tr->retry_at, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); + if (last_attempt && !tr->reliable) { // Best-effort transfers are removed immediately, no ack to wait for. + tx_transfer_free(self, tr); // We can invoke the feedback callback here if needed. } + UDPARD_ASSERT(!last_attempt || (tr->head == NULL)); // the payload is no longer needed } } } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 3675efe..70ba11d 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -289,13 +289,11 @@ typedef struct udpard_tx_mem_resources_t /// Outcome notification for a reliable transfer previously scheduled for transmission. typedef struct udpard_tx_feedback_t { - uint64_t topic_hash; - uint64_t transfer_id; - udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX]; - void* user_transfer_reference; ///< This is the same pointer that was passed to udpard_tx_push(). + uint64_t topic_hash; + uint64_t transfer_id; + void* user_transfer_reference; ///< This is the same pointer that was passed to udpard_tx_push(). - uint_fast8_t attempts[UDPARD_IFACE_COUNT_MAX]; ///< 0 if timed out before first attempt. - bool success; ///< False if no ack was received from the remote end before deadline expiration. + bool success; ///< False if no ack was received from the remote end before deadline expiration or forced eviction. } udpard_tx_feedback_t; typedef struct udpard_tx_ejection_t @@ -347,8 +345,7 @@ struct udpard_tx_t /// The maximum number of UDP datagrams irrespective of the transfer count, for all ifaces pooled. /// The purpose of this limitation is to ensure that a blocked interface queue does not exhaust the memory. - /// When the limit is reached, the library will apply heuristics to choose which transfers to drop, - /// which may incur linear worst-case complexity in the number of enqueued transfers. + /// When the limit is reached, the library will apply simple heuristics to choose which transfers to drop. size_t enqueued_frames_limit; /// The number of frames that are currently registered in the queue, initially zero. READ-ONLY! @@ -363,7 +360,8 @@ struct udpard_tx_t uint64_t errors_expiration; ///< A frame had to be dropped due to premature deadline expiration. /// Internal use only, do not modify! See tx_transfer_t for details. - udpard_list_t queue[UDPARD_IFACE_COUNT_MAX][UDPARD_PRIORITY_COUNT]; + udpard_list_t queue[UDPARD_IFACE_COUNT_MAX][UDPARD_PRIORITY_COUNT]; ///< Next to transmit at the tail. + udpard_list_t agewise; ///< Oldest at the tail. udpard_tree_t* index_staged; udpard_tree_t* index_deadline; udpard_tree_t* index_transfer; From c5890d63a71988273a270cc00a711c02016c5cd5 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 14:17:50 +0200 Subject: [PATCH 13/42] count the number of allocated frames; the next task is to implement the new tx spool --- .idea/dictionaries/project.xml | 1 + libudpard/udpard.c | 22 +++++++++++----------- libudpard/udpard.h | 9 ++++++--- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/.idea/dictionaries/project.xml b/.idea/dictionaries/project.xml index c7c54f0..a0bfd71 100644 --- a/.idea/dictionaries/project.xml +++ b/.idea/dictionaries/project.xml @@ -13,6 +13,7 @@ lmnopqrst mnop noinit + objcount optin pqrst tidwin diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 9675d3f..2123156 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -496,6 +496,7 @@ typedef struct tx_frame_t { size_t refcount; udpard_mem_deleter_t deleter; + size_t* objcount; struct tx_frame_t* next; size_t size; byte_t data[]; @@ -511,14 +512,17 @@ static tx_frame_t* tx_frame_from_view(const udpard_bytes_t view) return (tx_frame_t*)unbias_ptr(view.data, offsetof(tx_frame_t, data)); } -static tx_frame_t* tx_frame_new(const udpard_mem_resource_t mem, const size_t data_size) +static tx_frame_t* tx_frame_new(udpard_tx_t* const tx, const udpard_mem_resource_t mem, const size_t data_size) { tx_frame_t* const frame = (tx_frame_t*)mem_alloc(mem, sizeof(tx_frame_t) + data_size); if (frame != NULL) { frame->refcount = 1U; frame->deleter = (udpard_mem_deleter_t){ .user = mem.user, .free = mem.free }; + frame->objcount = &tx->enqueued_frames_count; frame->next = NULL; frame->size = data_size; + // Update the count; this is decremented when the frame is freed upon refcount reaching zero. + tx->enqueued_frames_count++; } return frame; } @@ -623,7 +627,7 @@ static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_fram while (total_frames_needed > (tx->enqueued_frames_limit - tx->enqueued_frames_count)) { tx_transfer_t* const victim = tx_sacrifice(tx); if (victim == NULL) { - break; + break; // We may have no transfers anymore but the NIC TX driver could still be holding some frames. } tx_transfer_free(tx, victim); } @@ -666,10 +670,7 @@ static udpard_tx_feedback_t tx_make_feedback(const tx_transfer_t* const tr, cons } /// Returns the head of the transfer chain; NULL on OOM. -static tx_frame_t* tx_spool(const udpard_mem_resource_t memory, - const size_t mtu, - const meta_t meta, - const udpard_bytes_t payload) +static tx_frame_t* tx_spool(udpard_tx_t* const tx, const size_t mtu, const meta_t meta, const udpard_bytes_t payload) { UDPARD_ASSERT(mtu > 0); UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); @@ -684,7 +685,7 @@ static tx_frame_t* tx_spool(const udpard_mem_resource_t memory, // Compute the size of the next frame, allocate it and link it up in the chain. const size_t progress = smaller(payload.size - offset, mtu); { - tx_frame_t* const item = tx_frame_new(memory, progress); + tx_frame_t* const item = tx_frame_new(tx, tx->memory, progress); if (NULL == head) { head = item; } else { @@ -696,7 +697,7 @@ static tx_frame_t* tx_spool(const udpard_mem_resource_t memory, if (NULL == tail) { while (head != NULL) { tx_frame_t* const next = head->next; - mem_free(memory, sizeof(tx_frame_t) + head->size, head); + mem_free(tx->memory, sizeof(tx_frame_t) + head->size, head); head = next; } break; @@ -755,7 +756,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, tr->feedback = feedback; tr->staged_until = meta.flag_ack ? (now + tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, tr->epoch)) : HEAT_DEATH; - tr->head = tr->cursor = tx_spool(tx->memory, mtu, meta, payload); + tr->head = tr->cursor = tx_spool(tx, mtu, meta, payload); if (tr->head != NULL) { for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if (udpard_is_valid_endpoint(tr->destination[i])) { @@ -781,7 +782,6 @@ static uint32_t tx_push(udpard_tx_t* const tx, &tr->index_deadline, cavl2_trivial_factory); enlist_head(&tx->agewise, &tr->agewise); - tx->enqueued_frames_count += n_frames; UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); out = (uint32_t)n_frames; } else { // The queue is large enough but we ran out of heap memory. @@ -1012,7 +1012,6 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con UDPARD_ASSERT(tr->head == tr->cursor); // They go together on the last attempt. tr->head[ifindex] = frame_next; udpard_tx_refcount_dec(ejection.datagram); - self->enqueued_frames_count--; } tr->cursor[ifindex] = frame_next; @@ -1057,6 +1056,7 @@ void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view) UDPARD_ASSERT(frame->refcount > 0); // NOLINT(*ArrayBound) frame->refcount--; if (frame->refcount == 0U) { + --*frame->objcount; frame->deleter.free(frame->deleter.user, sizeof(tx_frame_t) + tx_payload_view.size, frame); } } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 70ba11d..7c0fd61 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -282,7 +282,7 @@ typedef struct udpard_tx_mem_resources_t udpard_mem_resource_t transfer; /// The UDP datagram payload buffers are allocated per frame; each buffer is of size at most - /// (HEADER_SIZE+MTU+sizeof(void*)) bytes, so a trivial block pool is enough if MTU is known in advance. + /// HEADER_SIZE + MTU + small overhead, so a trivial block pool is enough if MTU is known in advance. udpard_mem_resource_t payload[UDPARD_IFACE_COUNT_MAX]; } udpard_tx_mem_resources_t; @@ -345,10 +345,13 @@ struct udpard_tx_t /// The maximum number of UDP datagrams irrespective of the transfer count, for all ifaces pooled. /// The purpose of this limitation is to ensure that a blocked interface queue does not exhaust the memory. - /// When the limit is reached, the library will apply simple heuristics to choose which transfers to drop. + /// When the limit is reached, the library will apply simple heuristics to choose which transfers to sacrifice. size_t enqueued_frames_limit; - /// The number of frames that are currently registered in the queue, initially zero. READ-ONLY! + /// The number of frames that are currently registered in the queue, initially zero. + /// This includes frames that are handed over to the NIC driver for transmission that are not yet released + /// via udpard_tx_refcount_dec(). + /// READ-ONLY! size_t enqueued_frames_count; udpard_tx_mem_resources_t memory; From c79db006a2addb3ad3e98be1f41b063ee0b297d1 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 16:13:36 +0200 Subject: [PATCH 14/42] push with deduplication --- libudpard/udpard.c | 212 +++++++++++++++++++++++++++++---------------- 1 file changed, 136 insertions(+), 76 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 2123156..a2e5ee1 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -588,7 +588,7 @@ static void tx_transfer_free_payload(tx_transfer_t* const tr) const tx_frame_t* frame = tr->head[i]; while (frame != NULL) { const tx_frame_t* const next = frame->next; - udpard_tx_refcount_dec(tx_frame_view(frame)); // TODO FIXME frame counting! + udpard_tx_refcount_dec(tx_frame_view(frame)); frame = next; } tr->head[i] = NULL; @@ -619,6 +619,7 @@ static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) /// The heuristics are subject to review and improvement. static tx_transfer_t* tx_sacrifice(udpard_tx_t* const tx) { return LIST_TAIL(tx->agewise, tx_transfer_t, agewise); } +/// True on success, false if not possible to reclaim enough space. static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_frames_needed) { if (total_frames_needed > tx->enqueued_frames_limit) { @@ -670,7 +671,11 @@ static udpard_tx_feedback_t tx_make_feedback(const tx_transfer_t* const tr, cons } /// Returns the head of the transfer chain; NULL on OOM. -static tx_frame_t* tx_spool(udpard_tx_t* const tx, const size_t mtu, const meta_t meta, const udpard_bytes_t payload) +static tx_frame_t* tx_spool(udpard_tx_t* const tx, + const udpard_mem_resource_t memory, + const size_t mtu, + const meta_t meta, + const udpard_bytes_t payload) { UDPARD_ASSERT(mtu > 0); UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); @@ -679,25 +684,21 @@ static tx_frame_t* tx_spool(udpard_tx_t* const tx, const size_t mtu, const meta_ tx_frame_t* tail = NULL; size_t frame_index = 0U; size_t offset = 0U; - // Run the O(n) copy loop, where n is the payload size. - // The client doesn't have to ensure that the payload data survives beyond this function call. do { // Compute the size of the next frame, allocate it and link it up in the chain. - const size_t progress = smaller(payload.size - offset, mtu); - { - tx_frame_t* const item = tx_frame_new(tx, tx->memory, progress); - if (NULL == head) { - head = item; - } else { - tail->next = item; - } - tail = item; + const size_t progress = smaller(payload.size - offset, mtu); + tx_frame_t* const item = tx_frame_new(tx, memory, progress); + if (NULL == head) { + head = item; + } else { + tail->next = item; } + tail = item; // On OOM, deallocate the entire chain and quit. if (NULL == tail) { while (head != NULL) { tx_frame_t* const next = head->next; - mem_free(tx->memory, sizeof(tx_frame_t) + head->size, head); + udpard_tx_refcount_dec(tx_frame_view(head)); head = next; } break; @@ -723,87 +724,146 @@ static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_ return baseline * (1L << smaller((size_t)prio + attempts, 62)); // NOLINT(*-signed-bitwise) } +/// A transfer can use the same fragments between two interfaces if both have the same MTU and use the same allocator. +/// The allocator requirement is important because it is possible that distinct NICs may not be able to reach the +/// same memory region via DMA. +static bool tx_spool_shareable(const size_t mtu_a, + const udpard_mem_resource_t mem_a, + const size_t mtu_b, + const udpard_mem_resource_t mem_b) +{ + return (mtu_a == mtu_b) && mem_same(mem_a, mem_b); +} + +/// The prediction takes into account that some interfaces may share the same frame spool. +static size_t tx_predict_frame_count(const size_t mtu[UDPARD_IFACE_COUNT_MAX], + const udpard_mem_resource_t memory[UDPARD_IFACE_COUNT_MAX], + const udpard_udpip_ep_t endpoint[UDPARD_IFACE_COUNT_MAX], + const size_t payload_size) +{ + size_t n_frames_total = 0; + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (udpard_is_valid_endpoint(endpoint[i])) { + bool shared = false; + for (uint_fast8_t j = 0; j < i; j++) { + shared = shared || (udpard_is_valid_endpoint(endpoint[j]) && + tx_spool_shareable(mtu[i], memory[i], mtu[j], memory[j])); + } + if (!shared) { + n_frames_total += larger(1, (payload_size + mtu[i] - 1U) / mtu[i]); + } + } + } + return n_frames_total; +} + static uint32_t tx_push(udpard_tx_t* const tx, const udpard_us_t now, const udpard_us_t deadline, const meta_t meta, - const udpard_udpip_ep_t endpoint, + const udpard_udpip_ep_t endpoint[UDPARD_IFACE_COUNT_MAX], const udpard_bytes_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), void* const user_transfer_reference) { UDPARD_ASSERT(now <= deadline); UDPARD_ASSERT(tx != NULL); - uint32_t out = 0; // The number of frames enqueued; zero on error (error counters incremented). - const size_t payload_size = payload.size; - const size_t mtu = larger(tx->mtu, UDPARD_MTU_MIN); - const size_t n_frames = larger(1, (payload_size + mtu - 1U) / mtu); - // TODO: tx_sacrifice() and choose duplication --- find matching allocations in the existing queues to reuse. - if ((tx->enqueued_frames_count + n_frames) > tx->enqueued_frames_limit) { + + // Ensure the queue has enough space. + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + tx->mtu[i] = larger(tx->mtu[i], UDPARD_MTU_MIN); // enforce minimum MTU + } + const size_t n_frames = tx_predict_frame_count(tx->mtu, tx->memory.payload, endpoint, meta.transfer_payload_size); + if (!tx_ensure_queue_space(tx, n_frames)) { tx->errors_capacity++; - } else { - tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t)); - if (tr != NULL) { - mem_zero(sizeof(*tr), tr); - tr->epoch = 0; - tr->topic_hash = meta.topic_hash; - tr->transfer_id = meta.transfer_id; - tr->deadline = deadline; - tr->reliable = meta.flag_ack; - tr->priority = meta.priority; - tr->destination = endpoint; - tr->user_transfer_reference = user_transfer_reference; - tr->feedback = feedback; - tr->staged_until = - meta.flag_ack ? (now + tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, tr->epoch)) : HEAT_DEATH; - tr->head = tr->cursor = tx_spool(tx, mtu, meta, payload); - if (tr->head != NULL) { - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - if (udpard_is_valid_endpoint(tr->destination[i])) { - enlist_head(&tx->queue[i][tr->priority], &tr->queue[i]); - } + return 0; + } + + // Construct the transfer object, without the frames for now. The frame spools will be constructed next. + tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t)); + if (tr == NULL) { + tx->errors_oom++; + return 0; + } + mem_zero(sizeof(*tr), tr); + tr->epoch = 0; + tr->topic_hash = meta.topic_hash; + tr->transfer_id = meta.transfer_id; + tr->deadline = deadline; + tr->reliable = meta.flag_ack; + tr->priority = meta.priority; + tr->user_transfer_reference = user_transfer_reference; + tr->feedback = feedback; + tr->staged_until = + meta.flag_ack ? (now + tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, tr->epoch)) : HEAT_DEATH; + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + tr->destination[i] = endpoint[i]; + tr->head[i] = tr->cursor[i] = NULL; + } + + // Spool the frames for each interface, with deduplication where possible to conserve space. + const size_t enqueued_frames_before = tx->enqueued_frames_count; + bool oom = false; + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (udpard_is_valid_endpoint(tr->destination[i])) { + if (tr->head[i] == NULL) { + tr->head[i] = tx_spool(tx, tx->memory.payload[i], tx->mtu[i], meta, payload); + tr->cursor[i] = tr->head[i]; + if (tr->head[i] == NULL) { + oom = true; + break; } - if (tr->deadline > tr->staged_until) { // only if retransmissions are possible - (void)cavl2_find_or_insert(&tx->index_staged, - &tr->staged_until, - tx_cavl_compare_staged, - &tr->index_staged, - cavl2_trivial_factory); + // Detect which interfaces can use the same spool to conserve memory. + for (uint_fast8_t j = i + 1; j < UDPARD_IFACE_COUNT_MAX; j++) { + if (udpard_is_valid_endpoint(tr->destination[j]) && + tx_spool_shareable(tx->mtu[i], tx->memory.payload[i], tx->mtu[j], tx->memory.payload[j])) { + tr->head[j] = tr->head[i]; + tr->cursor[j] = tr->cursor[i]; + tx_frame_t* frame = tr->head[j]; + while (frame != NULL) { + frame->refcount++; + frame = frame->next; + } + } } - const tx_transfer_key_t key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; - (void)cavl2_find_or_insert(&tx->index_transfer, // - &key, - tx_cavl_compare_transfer, - &tr->index_transfer, - cavl2_trivial_factory); - (void)cavl2_find_or_insert(&tx->index_deadline, - &tr->deadline, - tx_cavl_compare_deadline, - &tr->index_deadline, - cavl2_trivial_factory); - enlist_head(&tx->agewise, &tr->agewise); - UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); - out = (uint32_t)n_frames; - } else { // The queue is large enough but we ran out of heap memory. - tx->errors_oom++; - mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); } - } else { // The queue is large enough but we couldn't allocate the transfer metadata object. - tx->errors_oom++; } } - return out; + if (oom) { + tx_transfer_free_payload(tr); + mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); + tx->errors_oom++; + return 0; + } + UDPARD_ASSERT((tx->enqueued_frames_count - enqueued_frames_before) == n_frames); + UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); + + // Enqueue for transmission immediately. + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (udpard_is_valid_endpoint(tr->destination[i])) { + enlist_head(&tx->queue[i][tr->priority], &tr->queue[i]); + } + } + // If retransmissions are possible, add to the staged index so that it is re-enqueued later unless acknowledged. + if (tr->deadline > tr->staged_until) { + (void)cavl2_find_or_insert( + &tx->index_staged, &tr->staged_until, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); + } + // Add to the deadline index for expiration management. + (void)cavl2_find_or_insert( + &tx->index_deadline, &tr->deadline, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); + // Add to the transfer index for incoming ack management. + const tx_transfer_key_t key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; + (void)cavl2_find_or_insert( + &tx->index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); + // Add to the agewise list to allow instant sacrifice when needed; oldest at the tail. + enlist_head(&tx->agewise, &tr->agewise); + return n_frames; } /// Handle an ACK received from a remote node. -/// This is where we dequeue pending transmissions and invoke the feedback callback. -/// Acks for non-reliable transfers are ignored. -static void tx_receive_ack(udpard_rx_t* const rx, - const uint64_t topic_hash, - const uint64_t transfer_id, - const udpard_remote_t remote) +static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, const uint64_t transfer_id) { - (void)remote; if (rx->tx != NULL) { tx_transfer_t* const tr = tx_transfer_find(rx->tx, topic_hash, transfer_id); if ((tr != NULL) && tr->reliable) { @@ -2026,7 +2086,7 @@ static void rx_p2p_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const por // Process the data depending on the kind. if (kind == P2P_KIND_ACK) { - tx_receive_ack(rx, topic_hash, transfer_id, transfer.remote); + tx_receive_ack(rx, topic_hash, transfer_id); } else if (kind == P2P_KIND_RESPONSE) { const udpard_rx_transfer_p2p_t tr = { .base = transfer, .topic_hash = topic_hash }; self->vtable->on_message(rx, self, tr); From 72230bd508c4acca84dad933480b3d1a5a7684d3 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 16:22:05 +0200 Subject: [PATCH 15/42] nits --- libudpard/udpard.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index a2e5ee1..8e3fc8d 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -523,6 +523,7 @@ static tx_frame_t* tx_frame_new(udpard_tx_t* const tx, const udpard_mem_resource frame->size = data_size; // Update the count; this is decremented when the frame is freed upon refcount reaching zero. tx->enqueued_frames_count++; + UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); } return frame; } @@ -1004,12 +1005,10 @@ static void tx_promote_staged(udpard_tx_t* const self, const udpard_us_t now) tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_staged), tx_transfer_t, index_staged); if ((tr != NULL) && (now >= tr->staged_until)) { UDPARD_ASSERT(tr->cursor != NULL); // cannot stage without payload, doesn't make sense - // Reinsert into the staged index at the new position, when the next attempt is due. // Do not insert if this is the last attempt -- no point doing that since it will not be transmitted again. cavl2_remove(&self->index_staged, &tr->index_staged); - tr->epoch++; - tr->staged_until += tx_ack_timeout(self->ack_baseline_timeout, tr->priority, tr->epoch); + tr->staged_until += tx_ack_timeout(self->ack_baseline_timeout, tr->priority, ++(tr->epoch)); if (tr->deadline > tr->staged_until) { (void)cavl2_find_or_insert(&self->index_staged, &tr->staged_until, @@ -1017,7 +1016,6 @@ static void tx_promote_staged(udpard_tx_t* const self, const udpard_us_t now) &tr->index_staged, cavl2_trivial_factory); } - // Enqueue for transmission unless it's been there since the last attempt (stalled interface?) for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { UDPARD_ASSERT(tr->cursor[i] == tr->head[i]); @@ -1047,7 +1045,6 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con if (tr == NULL) { break; // No pending transfers at the moment. Find something else to do. } - UDPARD_ASSERT(!cavl2_is_inserted(self->index_staged, &tr->index_staged)); UDPARD_ASSERT(tr->cursor != NULL); // cannot be pending without payload, doesn't make sense // Eject the frame. From 99f8ec9ee820ce3a580b10809f3f13466656267d Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 17:58:39 +0200 Subject: [PATCH 16/42] ack sending --- libudpard/udpard.c | 118 ++++++++++++++++++++++++++++++++++++--------- libudpard/udpard.h | 10 ++-- 2 files changed, 101 insertions(+), 27 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 8e3fc8d..cf216ba 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -180,14 +180,15 @@ bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep) return (ep.port != 0) && (ep.ip != 0) && (ep.ip != UINT32_MAX); } -static bool has_valid_endpoint(const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX]) +static uint32_t valid_ep_mask(const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX]) { + uint32_t mask = 0U; for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if (udpard_is_valid_endpoint(remote_ep[i])) { - return true; + mask |= (1U << i); } } - return false; + return mask; } udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id) @@ -570,6 +571,14 @@ typedef struct tx_transfer_t void* user_transfer_reference; void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t); + + /// These entities are specific to outgoing acks only. I considered extracting them into a polymorphic + /// tx_transfer_ack_t subtype with a virtual destructor, but it adds a bit more complexity than I would like + /// to tolerate for a gain of only a dozen bytes per transfer object. + /// These are undefined for non-ack transfers. + udpard_tree_t index_transfer_remote; ///< Key: tx_transfer_key_t but referencing the remotes. + uint64_t remote_topic_hash; + uint64_t remote_transfer_id; } tx_transfer_t; static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) @@ -610,6 +619,9 @@ static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) } cavl2_remove(&tx->index_deadline, &tr->index_deadline); cavl2_remove(&tx->index_transfer, &tr->index_transfer); + if (cavl2_is_inserted(tx->index_transfer_remote, &tr->index_transfer_remote)) { + cavl2_remove(&tx->index_transfer_remote, &tr->index_transfer_remote); + } mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); } @@ -654,6 +666,16 @@ static int32_t tx_cavl_compare_transfer(const void* const user, const udpard_tre if (key->transfer_id > tr->transfer_id) { return +1; } return 0; // clang-format on } +static int32_t tx_cavl_compare_transfer_remote(const void* const user, const udpard_tree_t* const node) +{ + const tx_transfer_key_t* const key = (const tx_transfer_key_t*)user; + const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer_remote); // clang-format off + if (key->topic_hash < tr->remote_topic_hash) { return -1; } + if (key->topic_hash > tr->remote_topic_hash) { return +1; } + if (key->transfer_id < tr->remote_transfer_id) { return -1; } + if (key->transfer_id > tr->remote_transfer_id) { return +1; } + return 0; // clang-format on +} static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t topic_hash, const uint64_t transfer_id) { @@ -765,7 +787,8 @@ static uint32_t tx_push(udpard_tx_t* const tx, const udpard_udpip_ep_t endpoint[UDPARD_IFACE_COUNT_MAX], const udpard_bytes_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), - void* const user_transfer_reference) + void* const user_transfer_reference, + tx_transfer_t** const out_transfer) { UDPARD_ASSERT(now <= deadline); UDPARD_ASSERT(tx != NULL); @@ -859,6 +882,11 @@ static uint32_t tx_push(udpard_tx_t* const tx, &tx->index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); // Add to the agewise list to allow instant sacrifice when needed; oldest at the tail. enlist_head(&tx->agewise, &tr->agewise); + + // Finalize. + if (out_transfer != NULL) { + *out_transfer = tr; + } return n_frames; } @@ -886,7 +914,25 @@ static void tx_send_ack(udpard_rx_t* const rx, const uint64_t transfer_id, const udpard_remote_t remote) { - if (rx->tx != NULL) { + udpard_tx_t* const tx = rx->tx; + if (tx != NULL) { + // Check if an ack for this transfer is already enqueued. + const tx_transfer_key_t key = { .topic_hash = topic_hash, .transfer_id = transfer_id }; + tx_transfer_t* const prior = + CAVL2_TO_OWNER(cavl2_find(tx->index_transfer_remote, &key, &tx_cavl_compare_transfer_remote), + tx_transfer_t, + index_transfer_remote); + const uint32_t prior_ep_mask = (prior != NULL) ? valid_ep_mask(prior->destination) : 0U; + const uint32_t new_ep_mask = valid_ep_mask(remote.endpoints); + const bool new_better = (new_ep_mask & (~prior_ep_mask)) != 0U; + if (!new_better) { + return; // Can we get a new ack? We have ack at home! + } + if (prior != NULL) { + tx_transfer_free(tx, prior); // avoid redundant acks for the same transfer + } + + // Serialize the ACK payload. byte_t header[UDPARD_P2P_HEADER_BYTES]; byte_t* ptr = header; *ptr++ = P2P_KIND_ACK; @@ -895,21 +941,38 @@ static void tx_send_ack(udpard_rx_t* const rx, ptr = serialize_u64(ptr, transfer_id); UDPARD_ASSERT((ptr - header) == UDPARD_P2P_HEADER_BYTES); (void)ptr; + + // Enqueue the transfer. const udpard_bytes_t payload = { .size = UDPARD_P2P_HEADER_BYTES, .data = header }; - // TODO: scan the transmission queue for already pending acks; abort if one is already there. - const uint64_t p2p_transfer_id = rx->p2p_transfer_id++; - const uint32_t count = udpard_tx_push(rx->tx, - now, - now + ACK_TX_DEADLINE, - priority, - remote.uid, // this is a P2P transfer - remote.endpoints, - p2p_transfer_id, - payload, - NULL, - NULL); + const meta_t meta = { + .priority = priority, + .flag_ack = false, + .transfer_payload_size = (uint32_t)payload.size, + .transfer_id = tx->p2p_transfer_id++, + .sender_uid = tx->local_uid, + .topic_hash = remote.uid, // this is a P2P transfer + }; + tx_transfer_t* tr = NULL; + const uint32_t count = tx_push(tx, // + now, + now + ACK_TX_DEADLINE, + meta, + remote.endpoints, + payload, + NULL, + NULL, + &tr); UDPARD_ASSERT(count <= 1); - if (count != 1) { // ack is always a single-frame transfer, so we get either 0 or 1 + if (count == 1) { // ack is always a single-frame transfer, so we get either 0 or 1 + UDPARD_ASSERT(tr != NULL); + tr->remote_topic_hash = topic_hash; + tr->remote_transfer_id = transfer_id; + (void)cavl2_find_or_insert(&tx->index_transfer_remote, + &key, + tx_cavl_compare_transfer_remote, + &tr->index_transfer_remote, + cavl2_trivial_factory); + } else { rx->errors_ack_tx++; } } else { @@ -919,6 +982,7 @@ static void tx_send_ack(udpard_rx_t* const rx, bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, + const uint64_t p2p_transfer_id_initial, const size_t enqueued_frames_limit, const udpard_tx_mem_resources_t memory, const udpard_tx_vtable_t* const vtable) @@ -929,6 +993,7 @@ bool udpard_tx_new(udpard_tx_t* const self, mem_zero(sizeof(*self), self); self->vtable = vtable; self->local_uid = local_uid; + self->p2p_transfer_id = p2p_transfer_id_initial; self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us; self->enqueued_frames_limit = enqueued_frames_limit; self->enqueued_frames_count = 0; @@ -961,7 +1026,7 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, { uint32_t out = 0; const bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && - has_valid_endpoint(remote_ep) && (priority <= UDPARD_PRIORITY_MAX) && + (valid_ep_mask(remote_ep) != 0) && (priority <= UDPARD_PRIORITY_MAX) && ((payload.data != NULL) || (payload.size == 0U)) && (tx_transfer_find(self, topic_hash, transfer_id) == NULL); if (ok) { @@ -976,7 +1041,15 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, .sender_uid = self->local_uid, .topic_hash = topic_hash, }; - out = tx_push(self, now, deadline, meta, remote_ep, payload, feedback, user_transfer_reference); + out = tx_push(self, // + now, + deadline, + meta, + remote_ep, + payload, + feedback, + user_transfer_reference, + NULL); } return out; } @@ -1123,7 +1196,7 @@ void udpard_tx_free(udpard_tx_t* const self) { if (self != NULL) { while (self->index_transfer != NULL) { - tx_transfer_free(self, (tx_transfer_t*)self->index_transfer); + tx_transfer_free(self, CAVL2_TO_OWNER(self->index_transfer, tx_transfer_t, index_transfer)); } } } @@ -1985,7 +2058,7 @@ static bool rx_validate_mem_resources(const udpard_rx_mem_resources_t memory) (memory.fragment.alloc != NULL) && (memory.fragment.free != NULL); } -void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx, const uint64_t p2p_transfer_id_initial) +void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx) { UDPARD_ASSERT(self != NULL); mem_zero(sizeof(*self), self); @@ -1995,7 +2068,6 @@ void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx, const uint64_ self->errors_frame_malformed = 0; self->errors_transfer_malformed = 0; self->tx = tx; - self->p2p_transfer_id = p2p_transfer_id_initial; self->user = NULL; } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 7c0fd61..3aee688 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -330,6 +330,9 @@ struct udpard_tx_t /// The globally unique identifier of the local node. Must not change after initialization. uint64_t local_uid; + /// A random-initialized transfer-ID counter for all outgoing P2P transfers. + uint64_t p2p_transfer_id; + /// The maximum number of Cyphal transfer payload bytes per UDP datagram. /// The Cyphal/UDP header is added to this value to obtain the total UDP datagram payload size. See UDPARD_MTU_*. /// The value can be changed arbitrarily between enqueue operations as long as it is at least UDPARD_MTU_MIN. @@ -368,6 +371,7 @@ struct udpard_tx_t udpard_tree_t* index_staged; udpard_tree_t* index_deadline; udpard_tree_t* index_transfer; + udpard_tree_t* index_transfer_remote; /// Opaque pointer for the application use only. Not accessed by the library. void* user; @@ -379,6 +383,7 @@ struct udpard_tx_t /// True on success, false if any of the arguments are invalid. bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, + const uint64_t p2p_transfer_id_initial, const size_t enqueued_frames_limit, const udpard_tx_mem_resources_t memory, const udpard_tx_vtable_t* const vtable); @@ -563,9 +568,6 @@ typedef struct udpard_rx_t /// If the application wants to only listen, the pointer may be NULL (no acks will be sent). udpard_tx_t* tx; - /// A random-initialized transfer-ID counter for all outgoing P2P transfers. - uint64_t p2p_transfer_id; - void* user; ///< Opaque pointer for the application use only. Not accessed by the library. } udpard_rx_t; @@ -714,7 +716,7 @@ struct udpard_rx_port_p2p_t /// The RX instance holds no resources and can be destroyed at any time by simply freeing all its ports first /// using udpard_rx_port_free(), then discarding the instance itself. The self pointer must not be NULL. -void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx, const uint64_t p2p_transfer_id_initial); +void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx); /// Must be invoked at least every few milliseconds (more often is fine) to purge timed-out sessions and eject /// received transfers when the reordering window expires. If this is invoked simultaneously with rx subscription From c63aadc94ae3d436a429c376367d947322c2301e Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 21:16:05 +0200 Subject: [PATCH 17/42] bugfixes --- libudpard/udpard.c | 37 +++++++++++++++++++++++++++---------- libudpard/udpard.h | 3 +-- 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index cf216ba..251bbdf 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -684,6 +684,17 @@ static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t top cavl2_find(tx->index_transfer, &key, &tx_cavl_compare_transfer), tx_transfer_t, index_transfer); } +/// True iff listed in at least one interface queue. +static bool tx_is_pending(const udpard_tx_t* const tx, const tx_transfer_t* const tr) +{ + for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if (is_listed(&tx->queue[i][tr->priority], &tr->queue[i])) { + return true; + } + } + return false; +} + static udpard_tx_feedback_t tx_make_feedback(const tx_transfer_t* const tr, const bool success) { const udpard_tx_feedback_t fb = { .topic_hash = tr->topic_hash, @@ -710,7 +721,7 @@ static tx_frame_t* tx_spool(udpard_tx_t* const tx, do { // Compute the size of the next frame, allocate it and link it up in the chain. const size_t progress = smaller(payload.size - offset, mtu); - tx_frame_t* const item = tx_frame_new(tx, memory, progress); + tx_frame_t* const item = tx_frame_new(tx, memory, progress + HEADER_SIZE_BYTES); if (NULL == head) { head = item; } else { @@ -861,6 +872,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, } UDPARD_ASSERT((tx->enqueued_frames_count - enqueued_frames_before) == n_frames); UDPARD_ASSERT(tx->enqueued_frames_count <= tx->enqueued_frames_limit); + (void)enqueued_frames_before; // Enqueue for transmission immediately. for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { @@ -887,7 +899,8 @@ static uint32_t tx_push(udpard_tx_t* const tx, if (out_transfer != NULL) { *out_transfer = tr; } - return n_frames; + UDPARD_ASSERT(n_frames <= UINT32_MAX); + return (uint32_t)n_frames; } /// Handle an ACK received from a remote node. @@ -926,7 +939,7 @@ static void tx_send_ack(udpard_rx_t* const rx, const uint32_t new_ep_mask = valid_ep_mask(remote.endpoints); const bool new_better = (new_ep_mask & (~prior_ep_mask)) != 0U; if (!new_better) { - return; // Can we get a new ack? We have ack at home! + return; // Can we get an ack? We have ack at home! } if (prior != NULL) { tx_transfer_free(tx, prior); // avoid redundant acks for the same transfer @@ -1108,8 +1121,10 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con while (true) { // Find the highest-priority pending transfer. tx_transfer_t* tr = NULL; - for (size_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { // dear compiler, please unroll - tx_transfer_t* const candidate = LIST_TAIL(self->queue[ifindex][prio], tx_transfer_t, queue); + for (size_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { + tx_transfer_t* const candidate = // This pointer arithmetic is ugly and perhaps should be improved + unbias_ptr(self->queue[ifindex][prio].tail, + offsetof(tx_transfer_t, queue) + (sizeof(udpard_list_member_t) * ifindex)); if (candidate != NULL) { tr = candidate; break; @@ -1118,7 +1133,8 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con if (tr == NULL) { break; // No pending transfers at the moment. Find something else to do. } - UDPARD_ASSERT(tr->cursor != NULL); // cannot be pending without payload, doesn't make sense + UDPARD_ASSERT(tr->cursor[ifindex] != NULL); // cannot be pending without payload, doesn't make sense + UDPARD_ASSERT(tr->priority < UDPARD_PRIORITY_COUNT); // Eject the frame. const tx_frame_t* const frame = tr->cursor[ifindex]; @@ -1128,6 +1144,7 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con const udpard_tx_ejection_t ejection = { .now = now, .deadline = tr->deadline, + .iface_index = ifindex, .dscp = self->dscp_value_per_priority[tr->priority], .destination = tr->destination[ifindex], .datagram = tx_frame_view(frame), @@ -1139,7 +1156,7 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con // Frame ejected successfully. Update the transfer state to get ready for the next frame. if (last_attempt) { // no need to keep frames that we will no longer use; free early to reduce pressure - UDPARD_ASSERT(tr->head == tr->cursor); // They go together on the last attempt. + UDPARD_ASSERT(tr->head[ifindex] == tr->cursor[ifindex]); tr->head[ifindex] = frame_next; udpard_tx_refcount_dec(ejection.datagram); } @@ -1149,10 +1166,10 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con if (last_frame) { tr->cursor[ifindex] = tr->head[ifindex]; delist(&self->queue[ifindex][tr->priority], &tr->queue[ifindex]); // no longer pending for transmission - if (last_attempt && !tr->reliable) { // Best-effort transfers are removed immediately, no ack to wait for. - tx_transfer_free(self, tr); // We can invoke the feedback callback here if needed. + UDPARD_ASSERT(!last_attempt || (tr->head[ifindex] == NULL)); // this iface is done with the payload + if (last_attempt && !tr->reliable && !tx_is_pending(self, tr)) { // remove early once all ifaces are done + tx_transfer_free(self, tr); } - UDPARD_ASSERT(!last_attempt || (tr->head == NULL)); // the payload is no longer needed } } } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 3aee688..65a0dcd 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -304,8 +304,7 @@ typedef struct udpard_tx_ejection_t /// it is optional to use depending on the implementation of the NIC driver (most traditional drivers ignore it). udpard_us_t deadline; - uint_fast8_t iface_index; ///< The interface index on which the datagram is to be transmitted. - + uint_fast8_t iface_index; ///< The interface index on which the datagram is to be transmitted. uint_fast8_t dscp; ///< Set the DSCP field of the outgoing packet to this. udpard_udpip_ep_t destination; ///< Unicast or multicast UDP/IP endpoint. From ab578676efcc76ba9d988b50102f36410b46ed4b Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 21:22:30 +0200 Subject: [PATCH 18/42] counters --- libudpard/udpard.c | 1 + libudpard/udpard.h | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 251bbdf..a4e7070 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -644,6 +644,7 @@ static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_fram break; // We may have no transfers anymore but the NIC TX driver could still be holding some frames. } tx_transfer_free(tx, victim); + tx->errors_sacrifice++; } return total_frames_needed <= (tx->enqueued_frames_limit - tx->enqueued_frames_count); } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 65a0dcd..b36b2fe 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -360,9 +360,10 @@ struct udpard_tx_t /// Error counters incremented automatically when the corresponding error condition occurs. /// These counters are never decremented by the library but they can be reset by the application if needed. - uint64_t errors_oom; ///< A transfer could not be enqueued due to OOM. + uint64_t errors_oom; ///< A transfer could not be enqueued due to OOM, while there was queue space available. uint64_t errors_capacity; ///< A transfer could not be enqueued due to queue capacity limit. - uint64_t errors_expiration; ///< A frame had to be dropped due to premature deadline expiration. + uint64_t errors_sacrifice; ///< A transfer had to be sacrificed to make room for a new transfer. + uint64_t errors_expiration; ///< A transfer had to be dequeued due to deadline expiration. /// Internal use only, do not modify! See tx_transfer_t for details. udpard_list_t queue[UDPARD_IFACE_COUNT_MAX][UDPARD_PRIORITY_COUNT]; ///< Next to transmit at the tail. From 9e21daa22c5725c16e7313e90a1d41f05c73b98f Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 21:52:25 +0200 Subject: [PATCH 19/42] fix tests --- AGENTS.md | 5 +- README.md | 20 ++-- libudpard/udpard.c | 30 ++---- libudpard/udpard.h | 2 + tests/src/test_e2e_edge.cpp | 123 +++++++++++++++------- tests/src/test_e2e_random.cpp | 125 +++++++++++++--------- tests/src/test_intrusive_rx.c | 190 ++++++++++++++++------------------ 7 files changed, 277 insertions(+), 218 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 8d4167d..88168c7 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,7 +1,6 @@ -# LibUDPard instructions for agents +# LibUDPard instructions for AI agents -Please read README.md for general information about LibUDPard. -The library source files are just two: `libudpard/udpard.c` and `libudpard/udpard.h`. +Please read `README.md` for general information about LibUDPard. Keep the code and comments very brief. Be sure every significant code block is preceded with a brief comment. diff --git a/README.md b/README.md index 90a5091..86f1396 100644 --- a/README.md +++ b/README.md @@ -22,17 +22,21 @@ next-generation intelligent vehicles: manned and unmanned aircraft, spacecraft, - Zero-copy RX pipeline -- payload is moved from the NIC driver all the way to the application without copying. - Support for redundant network interfaces with seamless interface aggregation and zero fail-over delay. -- Robust message reassembler tolerant to highly distorted datagram streams (out-of-order, duplication, distinct MTU). -- Message ordering recovery for ordering-sensitive applications (e.g., state estimators, control loops). +- Robust message reassembler supporting highly distorted datagram streams: + out-of-order fragments, message ordering recovery, fragment/message deduplication, interleaving, variable MTU, ... +- Robust message ordering recovery for ordering-sensitive applications (e.g., state estimators, control loops) + with well-defined deterministic recovery in the event of lost messages. - Packet loss mitigation via: - - repetition-coding FEC (transparent to the application); - redundant interfaces (packet lost on one interface may be received on another, transparent to the application); - - positive acknowledgment with retransmission (retransmission not handled by the library). + - reliable topics (retransmit until acknowledged; callback notifications for successful/failed deliveries). +- Single-copy TX pipeline with fragment deduplication across multiple interfaces and reference counting. - Heap not required; the library can be used with fixed-size block pool allocators. - Detailed time complexity and memory requirement models for the benefit of real-time high-integrity applications. -- Runs on any 8/16/32/64-bit platform and extremely resource-constrained baremetal environments with ~100K ROM/RAM. +- Runs anywhere out of the box, including extremely resource-constrained baremetal environments with ~100K ROM/RAM. + No porting required. - MISRA C compliance (reach out to ). -- Full implementation in a single C file with less than 2k lines of straightforward code! +- Full implementation in a single C file with only ~2k lines of straightforward C99! +- Extensive test coverage. ## Usage @@ -72,7 +76,9 @@ standards-compliant C99 compiler is available. ### v3.0 -WIP --- adding support for Cyphal v1.1. +The library has been redesigned from scratch to support Cyphal v1.1, named topics, and reliable transfers. +No porting guide is provided since the changes are too significant; +please refer to the new API docs in `libudpard/udpard.h`. ### v2.0 diff --git a/libudpard/udpard.c b/libudpard/udpard.c index a4e7070..d0a578a 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -921,6 +921,7 @@ static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, con } /// Generate an ack transfer for the specified remote transfer. +/// Do nothing if an ack for the same transfer is already enqueued with equal or better endpoint coverage. static void tx_send_ack(udpard_rx_t* const rx, const udpard_us_t now, const udpard_prio_t priority, @@ -943,7 +944,7 @@ static void tx_send_ack(udpard_rx_t* const rx, return; // Can we get an ack? We have ack at home! } if (prior != NULL) { - tx_transfer_free(tx, prior); // avoid redundant acks for the same transfer + tx_transfer_free(tx, prior); // avoid redundant acks for the same transfer -- replace with better one } // Serialize the ACK payload. @@ -958,24 +959,15 @@ static void tx_send_ack(udpard_rx_t* const rx, // Enqueue the transfer. const udpard_bytes_t payload = { .size = UDPARD_P2P_HEADER_BYTES, .data = header }; - const meta_t meta = { - .priority = priority, - .flag_ack = false, - .transfer_payload_size = (uint32_t)payload.size, - .transfer_id = tx->p2p_transfer_id++, - .sender_uid = tx->local_uid, - .topic_hash = remote.uid, // this is a P2P transfer - }; - tx_transfer_t* tr = NULL; - const uint32_t count = tx_push(tx, // - now, - now + ACK_TX_DEADLINE, - meta, - remote.endpoints, - payload, - NULL, - NULL, - &tr); + const meta_t meta = { .priority = priority, + .flag_ack = false, + .transfer_payload_size = (uint32_t)payload.size, + .transfer_id = tx->p2p_transfer_id++, + .sender_uid = tx->local_uid, + .topic_hash = remote.uid }; + tx_transfer_t* tr = NULL; + const uint32_t count = + tx_push(tx, now, now + ACK_TX_DEADLINE, meta, remote.endpoints, payload, NULL, NULL, &tr); UDPARD_ASSERT(count <= 1); if (count == 1) { // ack is always a single-frame transfer, so we get either 0 or 1 UDPARD_ASSERT(tr != NULL); diff --git a/libudpard/udpard.h b/libudpard/udpard.h index b36b2fe..cccc314 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -716,6 +716,8 @@ struct udpard_rx_port_p2p_t /// The RX instance holds no resources and can be destroyed at any time by simply freeing all its ports first /// using udpard_rx_port_free(), then discarding the instance itself. The self pointer must not be NULL. +/// The TX instance must be initialized beforehand, unless the application wants to only listen, +/// in which case it may be NULL. void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx); /// Must be invoked at least every few milliseconds (more often is fine) to purge timed-out sessions and eject diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 5f5cb2f..beffddc 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -15,30 +15,58 @@ namespace { void on_message(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_rx_transfer_t transfer); void on_collision(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_remote_t remote); -constexpr udpard_rx_port_vtable_t callbacks{ &on_message, &on_collision }; +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; + +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +bool capture_tx_frame(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection.datagram); + void* const data = const_cast(ejection.datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) + frames->push_back(CapturedFrame{ .datagram = { .size = ejection.datagram.size, .data = data }, + .iface_index = ejection.iface_index }); + return true; +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx_frame }; struct Context { std::vector ids; size_t collisions = 0; uint64_t expected_uid = 0; - udpard_udpip_ep_t source = {}; + udpard_udpip_ep_t source{}; }; struct Fixture { - instrumented_allocator_t tx_alloc_frag{}; - instrumented_allocator_t tx_alloc_payload{}; - instrumented_allocator_t rx_alloc_frag{}; - instrumented_allocator_t rx_alloc_session{}; - udpard_tx_t tx{}; - udpard_rx_t rx{}; - udpard_rx_port_t port{}; - udpard_mem_deleter_t tx_payload_deleter{}; - Context ctx{}; - udpard_udpip_ep_t dest{}; - udpard_udpip_ep_t source{}; - uint64_t topic_hash{ 0x90AB12CD34EF5678ULL }; + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + udpard_tx_t tx{}; + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + udpard_mem_deleter_t tx_payload_deleter{}; + std::vector frames; + Context ctx{}; + udpard_udpip_ep_t dest{}; + udpard_udpip_ep_t source{}; + uint64_t topic_hash{ 0x90AB12CD34EF5678ULL }; Fixture(const Fixture&) = delete; Fixture& operator=(const Fixture&) = delete; @@ -47,21 +75,24 @@ struct Fixture explicit Fixture(const udpard_us_t reordering_window) { - instrumented_allocator_new(&tx_alloc_frag); + instrumented_allocator_new(&tx_alloc_transfer); instrumented_allocator_new(&tx_alloc_payload); instrumented_allocator_new(&rx_alloc_frag); instrumented_allocator_new(&rx_alloc_session); - const udpard_tx_mem_resources_t tx_mem{ .fragment = instrumented_allocator_make_resource(&tx_alloc_frag), - .payload = instrumented_allocator_make_resource(&tx_alloc_payload) }; + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; - tx_payload_deleter = instrumented_allocator_make_deleter(&tx_alloc_payload); + tx_payload_deleter = udpard_mem_deleter_t{ .user = nullptr, .free = &tx_refcount_free }; source = { .ip = 0x0A000001U, .port = 7501U }; dest = udpard_make_subject_endpoint(222U); - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 16, tx_mem)); - std::array rx_tx{}; - udpard_rx_new(&rx, rx_tx.data(), 0); + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 42U, 16, tx_mem, &tx_vtable)); + tx.user = &frames; + udpard_rx_new(&rx, nullptr); ctx.expected_uid = tx.local_uid; ctx.source = source; rx.user = &ctx; @@ -71,36 +102,48 @@ struct Fixture ~Fixture() { udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, tx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); instrumented_allocator_reset(&rx_alloc_frag); instrumented_allocator_reset(&rx_alloc_session); - instrumented_allocator_reset(&tx_alloc_frag); + instrumented_allocator_reset(&tx_alloc_transfer); instrumented_allocator_reset(&tx_alloc_payload); } void push_single(const udpard_us_t ts, const uint64_t transfer_id) { + frames.clear(); std::array payload_buf{}; for (size_t i = 0; i < payload_buf.size(); i++) { payload_buf[i] = static_cast(transfer_id >> (i * 8U)); } const udpard_bytes_t payload{ .size = payload_buf.size(), .data = payload_buf.data() }; - const udpard_us_t deadline = ts + 1000000; - const uint_fast8_t iface_index = 0; - TEST_ASSERT_GREATER_THAN_UINT32( - 0U, - udpard_tx_push(&tx, ts, deadline, udpard_prio_slow, topic_hash, dest, transfer_id, payload, false, nullptr)); - udpard_tx_item_t* const item = udpard_tx_peek(&tx, ts); - TEST_ASSERT_NOT_NULL(item); - udpard_tx_pop(&tx, item); - TEST_ASSERT_TRUE( - udpard_rx_port_push(&rx, &port, ts, source, item->datagram_payload, tx_payload_deleter, iface_index)); - item->datagram_payload.data = nullptr; - item->datagram_payload.size = 0; - udpard_tx_free(tx.memory, item); + const udpard_us_t deadline = ts + 1000000; + for (auto& mtu_value : tx.mtu) { + mtu_value = UDPARD_MTU_DEFAULT; + } + std::array dest_per_iface{}; + dest_per_iface.fill(udpard_udpip_ep_t{}); + dest_per_iface[0] = dest; + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push(&tx, + ts, + deadline, + udpard_prio_slow, + topic_hash, + dest_per_iface.data(), + transfer_id, + payload, + nullptr, + nullptr)); + udpard_tx_poll(&tx, ts, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_GREATER_THAN_UINT32(0U, static_cast(frames.size())); + for (const auto& [datagram, iface_index] : frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, ts, source, datagram, tx_payload_deleter, iface_index)); + } } }; @@ -127,7 +170,7 @@ void test_udpard_rx_unordered_duplicates() Fixture fix{ UDPARD_RX_REORDERING_WINDOW_UNORDERED }; udpard_us_t now = 0; - const std::array ids{ 100, 20000, 10100, 5000, 20000, 100 }; + constexpr std::array ids{ 100, 20000, 10100, 5000, 20000, 100 }; for (const auto id : ids) { fix.push_single(now, id); udpard_rx_poll(&fix.rx, now); @@ -135,7 +178,7 @@ void test_udpard_rx_unordered_duplicates() } udpard_rx_poll(&fix.rx, now + 100); - const std::array expected{ 100, 20000, 10100, 5000 }; + constexpr std::array expected{ 100, 20000, 10100, 5000 }; TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); for (size_t i = 0; i < expected.size(); i++) { TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); @@ -176,7 +219,7 @@ void test_udpard_rx_ordered_out_of_order() // Allow the window to expire so the remaining interned transfers eject. udpard_rx_poll(&fix.rx, now + 70); - const std::array expected{ 100, 200, 300, 10100, 10200 }; + constexpr std::array expected{ 100, 200, 300, 10100, 10200 }; TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); for (size_t i = 0; i < expected.size(); i++) { TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); @@ -211,7 +254,7 @@ void test_udpard_rx_ordered_head_advanced_late() fix.push_single(++now, 310); udpard_rx_poll(&fix.rx, now); - const std::array expected{ 100, 200, 300, 420, 450 }; + constexpr std::array expected{ 100, 200, 300, 420, 450 }; TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); for (size_t i = 0; i < expected.size(); i++) { TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp index 401979a..4e94016 100644 --- a/tests/src/test_e2e_random.cpp +++ b/tests/src/test_e2e_random.cpp @@ -42,11 +42,11 @@ struct ExpectedPayload struct Context { std::unordered_map expected; - size_t received = 0; - size_t collisions = 0; - size_t truncated = 0; - uint64_t remote_uid = 0; - std::array remote_endpoints = {}; + size_t received = 0; + size_t collisions = 0; + size_t truncated = 0; + uint64_t remote_uid = 0; + std::array remote_endpoints{}; }; struct Arrival @@ -55,6 +55,12 @@ struct Arrival uint_fast8_t iface_index; }; +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + size_t random_range(const size_t min, const size_t max) { const size_t span = max - min + 1U; @@ -76,6 +82,27 @@ void shuffle_frames(std::vector& frames) } } +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +bool capture_tx_frame(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection.datagram); + void* const data = const_cast(ejection.datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) + frames->push_back(CapturedFrame{ .datagram = { .size = ejection.datagram.size, .data = data }, + .iface_index = ejection.iface_index }); + return true; +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx_frame }; + void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { auto* const ctx = static_cast(rx->user); @@ -98,7 +125,7 @@ void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpar // Verify remote and the return path discovery. TEST_ASSERT_EQUAL_UINT64(ctx->remote_uid, transfer.remote.uid); - for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if ((transfer.remote.endpoints[i].ip != 0U) || (transfer.remote.endpoints[i].port != 0U)) { TEST_ASSERT_EQUAL_UINT32(ctx->remote_endpoints[i].ip, transfer.remote.endpoints[i].ip); TEST_ASSERT_EQUAL_UINT16(ctx->remote_endpoints[i].port, transfer.remote.endpoints[i].port); @@ -121,7 +148,7 @@ void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udp (void)remote; ctx->collisions++; } -constexpr udpard_rx_port_vtable_t callbacks{ &on_message, &on_collision }; +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; /// Randomized end-to-end TX/RX covering fragmentation, reordering, and extent-driven truncation. void test_udpard_tx_rx_end_to_end() @@ -129,15 +156,17 @@ void test_udpard_tx_rx_end_to_end() seed_prng(); // TX allocator setup and pipeline initialization. - instrumented_allocator_t tx_alloc_frag{}; - instrumented_allocator_new(&tx_alloc_frag); + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_new(&tx_alloc_transfer); instrumented_allocator_t tx_alloc_payload{}; instrumented_allocator_new(&tx_alloc_payload); - const udpard_mem_deleter_t tx_payload_deleter = instrumented_allocator_make_deleter(&tx_alloc_payload); - const udpard_tx_mem_resources_t tx_mem{ .fragment = instrumented_allocator_make_resource(&tx_alloc_frag), - .payload = instrumented_allocator_make_resource(&tx_alloc_payload) }; - udpard_tx_t tx; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 256, tx_mem)); + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 123U, 256, tx_mem, &tx_vtable)); // RX allocator setup and shared RX instance with callbacks. instrumented_allocator_t rx_alloc_frag{}; @@ -147,8 +176,7 @@ void test_udpard_tx_rx_end_to_end() const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; udpard_rx_t rx; - std::array rx_tx{}; - udpard_rx_new(&rx, rx_tx.data(), 0); + udpard_rx_new(&rx, nullptr); // Test parameters. constexpr std::array topic_hashes{ 0x123456789ABCDEF0ULL, @@ -157,7 +185,6 @@ void test_udpard_tx_rx_end_to_end() constexpr std::array subject_ids{ 10U, 20U, 30U }; constexpr std::array reorder_windows{ 2000, UDPARD_RX_REORDERING_WINDOW_UNORDERED, 5000 }; constexpr std::array extents{ 1000, 5000, SIZE_MAX }; - std::array iface_indices{ 0U, 1U, 2U }; // Configure ports with varied extents and reordering windows to cover truncation and different RX modes. std::array ports{}; @@ -174,6 +201,9 @@ void test_udpard_tx_rx_end_to_end() .port = static_cast(7400U + i) }; } rx.user = &ctx; + constexpr udpard_mem_deleter_t tx_payload_deleter{ .user = nullptr, .free = &tx_refcount_free }; + std::vector frames; + tx.user = &frames; // Main test loop: generate transfers, push into TX, drain and shuffle frames, push into RX. std::array transfer_ids{ static_cast(rand()), @@ -182,6 +212,7 @@ void test_udpard_tx_rx_end_to_end() udpard_us_t now = 0; for (size_t transfer_index = 0; transfer_index < 1000; transfer_index++) { now += static_cast(random_range(1000, 5000)); + frames.clear(); // Pick a port, build a random payload, and remember what to expect on that topic. const size_t port_index = random_range(0, ports.size() - 1U); @@ -200,43 +231,39 @@ void test_udpard_tx_rx_end_to_end() TEST_ASSERT_TRUE(inserted); // Generate MTUs per redundant interface. - std::array mtu_values{}; + std::array mtu_values{}; for (auto& x : mtu_values) { x = random_range(UDPARD_MTU_MIN, 3000U); } - - // Enqueue one transfer per interface with the per-interface MTU applied. - const udpard_us_t deadline = now + 1000000; - for (size_t iface = 0; iface < 3; iface++) { - tx.mtu = mtu_values[iface]; - TEST_ASSERT_GREATER_THAN_UINT32(0U, - udpard_tx_push(&tx, - now, - deadline, - priority, - topic_hashes[port_index], - dest, - transfer_id, - payload_view, - false, - &iface_indices[iface])); + for (size_t iface = 0; iface < UDPARD_IFACE_COUNT_MAX; iface++) { + tx.mtu[iface] = mtu_values[iface]; } + std::array dest_per_iface{}; + dest_per_iface.fill(dest); - // Drain TX queue into local frame list so we can shuffle before injecting into RX ports. - std::vector frames; - frames.reserve(tx.queue_size); - while (udpard_tx_item_t* const item = udpard_tx_peek(&tx, now)) { - udpard_tx_pop(&tx, item); - frames.push_back({ .datagram = item->datagram_payload, - .iface_index = *static_cast(item->user_transfer_reference) }); - item->datagram_payload.data = nullptr; - item->datagram_payload.size = 0; - udpard_tx_free(tx.memory, item); - } + // Enqueue one transfer spanning all interfaces. + const udpard_us_t deadline = now + 1000000; + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push(&tx, + now, + deadline, + priority, + topic_hashes[port_index], + dest_per_iface.data(), + transfer_id, + payload_view, + nullptr, + nullptr)); + udpard_tx_poll(&tx, now, UDPARD_IFACE_MASK_ALL); // Shuffle and push frames into the RX pipeline, simulating out-of-order redundant arrival. - shuffle_frames(frames); + std::vector arrivals; + arrivals.reserve(frames.size()); for (const auto& [datagram, iface_index] : frames) { + arrivals.push_back(Arrival{ .datagram = datagram, .iface_index = iface_index }); + } + shuffle_frames(arrivals); + for (const auto& [datagram, iface_index] : arrivals) { TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &ports[port_index], now, @@ -249,7 +276,6 @@ void test_udpard_tx_rx_end_to_end() // Let the RX pipeline purge timeouts and deliver ready transfers. udpard_rx_poll(&rx, now); - TEST_ASSERT_EQUAL_size_t(0, tx.queue_size); } // Final poll/validation and cleanup. @@ -261,13 +287,14 @@ void test_udpard_tx_rx_end_to_end() for (auto& port : ports) { udpard_rx_port_free(&rx, &port); } + udpard_tx_free(&tx); TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, tx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); instrumented_allocator_reset(&rx_alloc_frag); instrumented_allocator_reset(&rx_alloc_session); - instrumented_allocator_reset(&tx_alloc_frag); + instrumented_allocator_reset(&tx_alloc_transfer); instrumented_allocator_reset(&tx_alloc_payload); } diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index bb67fab..0ade7cc 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -1594,85 +1594,72 @@ typedef struct uint64_t acked_transfer_id; } ack_tx_info_t; -// Per-interface TX pipelines used by RX for acks. typedef struct { - instrumented_allocator_t alloc_frag[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; - instrumented_allocator_t alloc_payload[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; - udpard_tx_t tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; - udpard_tx_t* ptrs[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; + instrumented_allocator_t alloc_transfer; + instrumented_allocator_t alloc_payload; + udpard_tx_t tx; + ack_tx_info_t captured[16]; + size_t captured_count; } tx_fixture_t; -static void tx_fixture_init(tx_fixture_t* const self, const uint64_t uid, const size_t capacity) +static bool tx_capture_ack(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection) { - for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { - instrumented_allocator_new(&self->alloc_frag[i]); - instrumented_allocator_new(&self->alloc_payload[i]); - const udpard_tx_mem_resources_t mem = { - .fragment = instrumented_allocator_make_resource(&self->alloc_frag[i]), - .payload = instrumented_allocator_make_resource(&self->alloc_payload[i]), - }; - TEST_ASSERT(udpard_tx_new(&self->tx[i], uid, capacity, mem)); - self->ptrs[i] = &self->tx[i]; + tx_fixture_t* const self = (tx_fixture_t*)tx->user; + if ((self == NULL) || (self->captured_count >= (sizeof(self->captured) / sizeof(self->captured[0])))) { + return false; + } + udpard_tx_refcount_inc(ejection.datagram); + meta_t meta = { 0 }; + uint32_t frame_index = 0; + uint32_t frame_offset = 0; + uint32_t prefix_crc = 0; + udpard_bytes_t payload = { 0 }; + const bool ok = + header_deserialize((udpard_bytes_mut_t){ .size = ejection.datagram.size, .data = (void*)ejection.datagram.data }, + &meta, + &frame_index, + &frame_offset, + &prefix_crc, + &payload); + if (ok && (frame_index == 0U) && (frame_offset == 0U) && (payload.size == UDPARD_P2P_HEADER_BYTES)) { + const byte_t* const pl = (const byte_t*)payload.data; + if (pl[0] == P2P_KIND_ACK) { + ack_tx_info_t* const info = &self->captured[self->captured_count++]; + info->priority = meta.priority; + info->transfer_id = meta.transfer_id; + info->topic_hash = meta.topic_hash; + info->destination = ejection.destination; + (void)deserialize_u64(pl + 8U, &info->acked_topic_hash); + (void)deserialize_u64(pl + 16U, &info->acked_transfer_id); + } } + udpard_tx_refcount_dec(ejection.datagram); + return true; } -static void tx_fixture_free(tx_fixture_t* const self) +static void tx_fixture_init(tx_fixture_t* const self, const uint64_t uid, const size_t capacity) { - for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { - TEST_ASSERT_EQUAL(0, self->tx[i].queue_size); - TEST_ASSERT_EQUAL(0, self->alloc_frag[i].allocated_fragments); - TEST_ASSERT_EQUAL(0, self->alloc_payload[i].allocated_fragments); - instrumented_allocator_reset(&self->alloc_frag[i]); - instrumented_allocator_reset(&self->alloc_payload[i]); + instrumented_allocator_new(&self->alloc_transfer); + instrumented_allocator_new(&self->alloc_payload); + self->captured_count = 0; + udpard_tx_mem_resources_t mem = { 0 }; + mem.transfer = instrumented_allocator_make_resource(&self->alloc_transfer); + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&self->alloc_payload); } + static const udpard_tx_vtable_t vtb = { .eject = &tx_capture_ack }; + TEST_ASSERT(udpard_tx_new(&self->tx, uid, 1U, capacity, mem, &vtb)); + self->tx.user = self; } -// Drains ack frames while returning the last one. -static size_t drain_ack_tx(udpard_tx_t* const tx[], const udpard_us_t now, ack_tx_info_t* const last_out) +static void tx_fixture_free(tx_fixture_t* const self) { - size_t count = 0; - for (size_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) { - udpard_tx_t* pipeline = tx[i]; - if (pipeline == NULL) { - continue; - } - for (udpard_tx_item_t* item = udpard_tx_peek(pipeline, now); item != NULL; - item = udpard_tx_peek(pipeline, now)) { - meta_t meta = { 0 }; - uint32_t frame_index = 0; - uint32_t frame_offset = 0; - uint32_t prefix_crc = 0; - udpard_bytes_t payload = { 0 }; - ack_tx_info_t info = { 0 }; - const bool ok = header_deserialize( - (udpard_bytes_mut_t){ .size = item->datagram_payload.size, .data = item->datagram_payload.data }, - &meta, - &frame_index, - &frame_offset, - &prefix_crc, - &payload); - TEST_ASSERT_TRUE(ok); - TEST_ASSERT_EQUAL_UINT32(0, frame_index); - TEST_ASSERT_EQUAL_UINT32(0, frame_offset); - TEST_ASSERT_EQUAL_size_t(UDPARD_P2P_HEADER_BYTES, payload.size); - const byte_t* const pl = (const byte_t*)payload.data; - TEST_ASSERT_EQUAL_UINT8(P2P_KIND_ACK, pl[0]); - info.priority = meta.priority; - info.transfer_id = meta.transfer_id; - info.topic_hash = meta.topic_hash; - info.destination = item->destination; - (void)deserialize_u64(pl + 8U, &info.acked_topic_hash); - (void)deserialize_u64(pl + 16U, &info.acked_transfer_id); - if (last_out != NULL) { - *last_out = info; - } - udpard_tx_pop(pipeline, item); - udpard_tx_free(pipeline->memory, item); - count++; - } - } - return count; + udpard_tx_free(&self->tx); + TEST_ASSERT_EQUAL(0, self->alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL(0, self->alloc_payload.allocated_fragments); + instrumented_allocator_reset(&self->alloc_transfer); + instrumented_allocator_reset(&self->alloc_payload); } typedef struct @@ -1757,7 +1744,7 @@ static void test_rx_ack_enqueued(void) tx_fixture_init(&tx_fix, 0xBADC0FFEE0DDF00DULL, 8); udpard_rx_t rx; - udpard_rx_new(&rx, tx_fix.ptrs, 10); + udpard_rx_new(&rx, &tx_fix.tx); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -1791,8 +1778,12 @@ static void test_rx_ack_enqueued(void) now += 100; rx_session_update(ses, &rx, now, ep0, make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 0); TEST_ASSERT_EQUAL(1, cb_result.message.count); - cb_result.ack.count += drain_ack_tx(tx_fix.ptrs, now, &cb_result.ack.last); - TEST_ASSERT_EQUAL(1, cb_result.ack.count); + udpard_tx_poll(&tx_fix.tx, now, (uint_fast8_t)(1U << 0U)); + cb_result.ack.count = tx_fix.captured_count; + if (tx_fix.captured_count > 0) { + cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; + } + TEST_ASSERT(cb_result.ack.count >= 1); TEST_ASSERT_EQUAL_UINT64(topic_hash, cb_result.ack.last.acked_topic_hash); TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.acked_transfer_id); TEST_ASSERT_EQUAL_UINT32(ep0.ip, cb_result.ack.last.destination.ip); @@ -1806,8 +1797,12 @@ static void test_rx_ack_enqueued(void) const udpard_udpip_ep_t ep1 = { .ip = 0x0A000002, .port = 0x5678 }; now += 100; rx_session_update(ses, &rx, now, ep1, make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 1); - cb_result.ack.count += drain_ack_tx(tx_fix.ptrs, now, &cb_result.ack.last); - TEST_ASSERT_EQUAL(3, cb_result.ack.count); // acks on interfaces 0 and 1 + udpard_tx_poll(&tx_fix.tx, now, (uint_fast8_t)(1U << 1U)); + cb_result.ack.count = tx_fix.captured_count; + if (tx_fix.captured_count > 0) { + cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; + } + TEST_ASSERT(cb_result.ack.count >= 2); // acks on interfaces 0 and 1 TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.acked_transfer_id); udpard_rx_port_free(&rx, &port); @@ -1838,9 +1833,8 @@ static void test_rx_session_ordered(void) const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -1979,9 +1973,8 @@ static void test_rx_session_unordered(void) const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -2126,7 +2119,7 @@ static void test_rx_session_unordered_reject_old(void) tx_fixture_t tx_fix = { 0 }; tx_fixture_init(&tx_fix, 0xF00DCAFEF00DCAFEULL, 4); udpard_rx_t rx; - udpard_rx_new(&rx, tx_fix.ptrs, 2); + udpard_rx_new(&rx, &tx_fix.tx); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -2195,7 +2188,11 @@ static void test_rx_session_unordered_reject_old(void) del_payload, 0); TEST_ASSERT_EQUAL(2, cb_result.message.count); - cb_result.ack.count += drain_ack_tx(tx_fix.ptrs, now, &cb_result.ack.last); + udpard_tx_poll(&tx_fix.tx, now, UDPARD_IFACE_MASK_ALL); + cb_result.ack.count = tx_fix.captured_count; + if (tx_fix.captured_count > 0) { + cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; + } TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, cb_result.ack.count); TEST_ASSERT_EQUAL_UINT64(10, cb_result.ack.last.acked_transfer_id); TEST_ASSERT_EQUAL_UINT64(port.topic_hash, cb_result.ack.last.acked_topic_hash); @@ -2225,9 +2222,8 @@ static void test_rx_session_unordered_duplicates(void) const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -2302,9 +2298,8 @@ static void test_rx_session_ordered_reject_stale_after_jump(void) const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -2410,9 +2405,8 @@ static void test_rx_session_ordered_zero_reordering_window(void) const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -2505,9 +2499,8 @@ static void test_rx_port(void) const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -2582,9 +2575,8 @@ static void test_rx_port_timeouts(void) const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -2646,9 +2638,8 @@ static void test_rx_port_oom(void) const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; @@ -2705,9 +2696,8 @@ static void test_rx_port_free_loop(void) const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session }; - udpard_rx_t rx; - udpard_tx_t* rx_tx[UDPARD_NETWORK_INTERFACE_COUNT_MAX] = { NULL, NULL, NULL }; - udpard_rx_new(&rx, rx_tx, 0); + udpard_rx_t rx; + udpard_rx_new(&rx, NULL); callback_result_t cb_result = { 0 }; rx.user = &cb_result; From 4c13edbe0cbf5a8580450b6f617d41a6835d1a03 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 22:04:58 +0200 Subject: [PATCH 20/42] udpard_tx_push_p2p --- libudpard/udpard.c | 55 ++++++++++++++++++++++++++++++---------------- libudpard/udpard.h | 19 +++++++++++----- 2 files changed, 49 insertions(+), 25 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index d0a578a..436e59f 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -890,9 +890,11 @@ static uint32_t tx_push(udpard_tx_t* const tx, (void)cavl2_find_or_insert( &tx->index_deadline, &tr->deadline, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); // Add to the transfer index for incoming ack management. - const tx_transfer_key_t key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; - (void)cavl2_find_or_insert( + const tx_transfer_key_t key = { .topic_hash = tr->topic_hash, .transfer_id = tr->transfer_id }; + const udpard_tree_t* const tree_transfer = cavl2_find_or_insert( &tx->index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); + UDPARD_ASSERT(tree_transfer == &tr->index_transfer); // ensure no duplicates; checked at the API level + (void)tree_transfer; // Add to the agewise list to allow instant sacrifice when needed; oldest at the tail. enlist_head(&tx->agewise, &tr->agewise); @@ -1039,23 +1041,38 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, // Before attempting to enqueue a new transfer, we need to update the transmission scheduler. // It may release some items from the tx queue, and it may also promote some staged transfers to the queue. udpard_tx_poll(self, now, UDPARD_IFACE_MASK_ALL); - const meta_t meta = { - .priority = priority, - .flag_ack = feedback != NULL, - .transfer_payload_size = (uint32_t)payload.size, - .transfer_id = transfer_id, - .sender_uid = self->local_uid, - .topic_hash = topic_hash, - }; - out = tx_push(self, // - now, - deadline, - meta, - remote_ep, - payload, - feedback, - user_transfer_reference, - NULL); + const meta_t meta = { .priority = priority, + .flag_ack = feedback != NULL, + .transfer_payload_size = (uint32_t)payload.size, + .transfer_id = transfer_id, + .sender_uid = self->local_uid, + .topic_hash = topic_hash }; + out = tx_push(self, now, deadline, meta, remote_ep, payload, feedback, user_transfer_reference, NULL); + } + return out; +} + +uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const udpard_remote_t remote, + const udpard_bytes_t payload, + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), + void* const user_transfer_reference) +{ + uint32_t out = 0; + if (self != NULL) { + out = udpard_tx_push(self, + now, + deadline, + priority, + remote.uid, + remote.endpoints, + self->p2p_transfer_id++, + payload, + feedback, + user_transfer_reference); } return out; } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index cccc314..2602f2d 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -329,7 +329,7 @@ struct udpard_tx_t /// The globally unique identifier of the local node. Must not change after initialization. uint64_t local_uid; - /// A random-initialized transfer-ID counter for all outgoing P2P transfers. + /// A random-initialized transfer-ID counter for all outgoing P2P transfers. Must not be changed by the application. uint64_t p2p_transfer_id; /// The maximum number of Cyphal transfer payload bytes per UDP datagram. @@ -392,10 +392,6 @@ bool udpard_tx_new(udpard_tx_t* const self, /// transmission queue at the appropriate position. The transfer payload will be copied into the transmission queue /// so that the lifetime of the datagrams is not related to the lifetime of the input payload buffer. /// -/// The topic hash is not defined for P2P transfers since there are no topics involved; in P2P, this parameter -/// is used to pass the destination node's UID instead. Setting it incorrectly will cause the destination node -/// to reject the transfer as misaddressed. -/// /// The transfer_id parameter is used to populate the transfer_id field of the generated Cyphal/UDP frames. /// The caller shall increment the transfer-ID counter after each successful invocation of this function /// per redundant interface; the same transfer published over redundant interfaces shall have the same transfer-ID. @@ -435,13 +431,24 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_us_t now, const udpard_us_t deadline, const udpard_prio_t priority, - const uint64_t topic_hash, // For P2P transfers, this is the destination's UID. + const uint64_t topic_hash, const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX], // May be invalid for some ifaces. const uint64_t transfer_id, const udpard_bytes_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. void* const user_transfer_reference); +/// Specialization for P2P transfers. The semantics are identical to udpard_tx_push(). +/// The transfer-ID will be provided by the library based on the udpard_tx_t::p2p_transfer_id counter. +uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const udpard_remote_t remote, // Endpoints may be invalid for some ifaces. + const udpard_bytes_t payload, + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. + void* const user_transfer_reference); + /// This should be invoked whenever the socket/NIC of this queue becomes ready to accept new datagrams for transmission. /// It is fine to also invoke it periodically unconditionally to drive the transmission process. /// Internally, the function will query the scheduler for the next frame to be transmitted and will attempt From ce43a18ac49effa059e2d94b4359c8fa7c7eb774 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 22:16:21 +0200 Subject: [PATCH 21/42] add tests --- AGENTS.md | 1 + README.md | 2 +- tests/src/test_e2e_edge.cpp | 101 +++++++++++++++++++++++++++++++++++- 3 files changed, 101 insertions(+), 3 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 88168c7..02d8989 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -5,3 +5,4 @@ Please read `README.md` for general information about LibUDPard. Keep the code and comments very brief. Be sure every significant code block is preceded with a brief comment. When building the code, don't hesitate to use multiple jobs to use all CPU cores. +To speed things up, it is best to configure CMake with `NO_STATIC_ANALYSIS=1`. diff --git a/README.md b/README.md index 86f1396..93b079c 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ next-generation intelligent vehicles: manned and unmanned aircraft, spacecraft, - Detailed time complexity and memory requirement models for the benefit of real-time high-integrity applications. - Runs anywhere out of the box, including extremely resource-constrained baremetal environments with ~100K ROM/RAM. No porting required. -- MISRA C compliance (reach out to ). +- Partial MISRA C compliance (reach out to ). - Full implementation in a single C file with only ~2k lines of straightforward C99! - Extensive test coverage. diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index beffddc..1fed924 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -16,6 +16,8 @@ namespace { void on_message(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_rx_transfer_t transfer); void on_collision(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_remote_t remote); constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; +void on_message_p2p(udpard_rx_t* rx, udpard_rx_port_p2p_t* port, udpard_rx_transfer_p2p_t transfer); +constexpr udpard_rx_port_p2p_vtable_t p2p_callbacks{ &on_message_p2p }; struct CapturedFrame { @@ -47,8 +49,9 @@ constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx_frame }; struct Context { std::vector ids; - size_t collisions = 0; - uint64_t expected_uid = 0; + size_t collisions = 0; + uint64_t expected_uid = 0; + uint64_t expected_topic = 0; udpard_udpip_ep_t source{}; }; @@ -164,6 +167,19 @@ void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const ctx->collisions++; } +void on_message_p2p(udpard_rx_t* const rx, udpard_rx_port_p2p_t* const port, const udpard_rx_transfer_p2p_t transfer) +{ + auto* const ctx = static_cast(rx->user); + ctx->ids.push_back(transfer.base.transfer_id); + if (ctx->expected_topic != 0) { + TEST_ASSERT_EQUAL_UINT64(ctx->expected_topic, transfer.topic_hash); + } + TEST_ASSERT_EQUAL_UINT64(ctx->expected_uid, transfer.base.remote.uid); + TEST_ASSERT_EQUAL_UINT32(ctx->source.ip, transfer.base.remote.endpoints[0].ip); + TEST_ASSERT_EQUAL_UINT16(ctx->source.port, transfer.base.remote.endpoints[0].port); + udpard_fragment_free_all(transfer.base.payload, port->base.memory.fragment); +} + /// UNORDERED mode should drop duplicates while keeping arrival order. void test_udpard_rx_unordered_duplicates() { @@ -262,6 +278,86 @@ void test_udpard_rx_ordered_head_advanced_late() TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); } +/// P2P helper should emit frames with auto transfer-ID and proper addressing. +void test_udpard_tx_push_p2p() +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1122334455667788ULL, 5U, 8, tx_mem, &tx_vtable)); + std::vector frames; + tx.user = &frames; + + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + udpard_rx_t rx{}; + udpard_rx_port_p2p_t port{}; + Context ctx{}; + const udpard_udpip_ep_t source{ .ip = 0x0A0000AAU, .port = 7600U }; + const udpard_udpip_ep_t dest{ .ip = 0x0A000010U, .port = 7400U }; + const uint64_t local_uid = 0xCAFEBABECAFED00DULL; + const uint64_t topic_hash = 0xAABBCCDDEEFF1122ULL; + ctx.expected_uid = tx.local_uid; + ctx.expected_topic = topic_hash; + ctx.source = source; + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&port, local_uid, 1024, rx_mem, &p2p_callbacks)); + + udpard_remote_t remote{}; + remote.uid = local_uid; + remote.endpoints[0U] = dest; + + std::array payload_buf{}; + constexpr uint8_t p2p_kind_response = 0U; + payload_buf[0] = p2p_kind_response; + for (size_t i = 0; i < sizeof(topic_hash); i++) { + payload_buf[8U + i] = static_cast((topic_hash >> (i * 8U)) & 0xFFU); + } + const uint64_t response_transfer_id = 55; + for (size_t i = 0; i < sizeof(response_transfer_id); i++) { + payload_buf[16U + i] = static_cast((response_transfer_id >> (i * 8U)) & 0xFFU); + } + const udpard_bytes_t payload{ .size = payload_buf.size(), .data = payload_buf.data() }; + const udpard_us_t now = 0; + const uint64_t first_id = tx.p2p_transfer_id; + TEST_ASSERT_GREATER_THAN_UINT32( + 0U, udpard_tx_push_p2p(&tx, now, now + 1000000, udpard_prio_nominal, remote, payload, nullptr, nullptr)); + udpard_tx_poll(&tx, now, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_FALSE(frames.empty()); + + const udpard_mem_deleter_t tx_payload_deleter{ .user = nullptr, .free = &tx_refcount_free }; + for (const auto& f : frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, reinterpret_cast(&port), now, source, f.datagram, tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&rx, now); + TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size()); + TEST_ASSERT_EQUAL_UINT64(first_id, ctx.ids[0]); + TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); + + udpard_rx_port_free(&rx, reinterpret_cast(&port)); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); +} + } // namespace extern "C" void setUp() {} @@ -274,5 +370,6 @@ int main() RUN_TEST(test_udpard_rx_unordered_duplicates); RUN_TEST(test_udpard_rx_ordered_out_of_order); RUN_TEST(test_udpard_rx_ordered_head_advanced_late); + RUN_TEST(test_udpard_tx_push_p2p); return UNITY_END(); } From d032a718bada825be79591b242898f0461fb5b44 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sat, 27 Dec 2025 23:03:29 +0200 Subject: [PATCH 22/42] nits --- libudpard/udpard.c | 114 +++++++++++++++++++++++---------------------- libudpard/udpard.h | 6 ++- 2 files changed, 63 insertions(+), 57 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 436e59f..78fe58a 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -183,7 +183,7 @@ bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep) static uint32_t valid_ep_mask(const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX]) { uint32_t mask = 0U; - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if (udpard_is_valid_endpoint(remote_ep[i])) { mask |= (1U << i); } @@ -201,7 +201,7 @@ void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_mem_re { if (frag != NULL) { // Descend the tree - for (uint_fast8_t i = 0; i < 2; i++) { + for (size_t i = 0; i < 2; i++) { if (frag->index_offset.lr[i] != NULL) { frag->index_offset.lr[i]->up = NULL; // Prevent backtrack ascension from this branch udpard_fragment_free_all((udpard_fragment_t*)frag->index_offset.lr[i], fragment_mem_resource); @@ -348,7 +348,6 @@ static uint32_t crc_full(const size_t n_bytes, const void* const data) // --------------------------------------------- LIST CONTAINER --------------------------------------------- -/// True iff the member is in the list. static bool is_listed(const udpard_list_t* const list, const udpard_list_member_t* const member) { return (member->next != NULL) || (member->prev != NULL) || (list->head == member); @@ -391,8 +390,8 @@ static void enlist_head(udpard_list_t* const list, udpard_list_member_t* const m assert((list->head != NULL) && (list->tail != NULL)); } -#define LIST_MEMBER(ptr, owner_type, owner_field) ((owner_type*)unbias_ptr((ptr), offsetof(owner_type, owner_field))) -static void* unbias_ptr(const void* const ptr, const size_t offset) +#define LIST_MEMBER(ptr, owner_type, owner_field) ((owner_type*)ptr_unbias((ptr), offsetof(owner_type, owner_field))) +static void* ptr_unbias(const void* const ptr, const size_t offset) { return (ptr == NULL) ? NULL : (void*)((char*)ptr - offset); } @@ -405,7 +404,7 @@ static void* unbias_ptr(const void* const ptr, const size_t offset) #define HEADER_SIZE_BYTES 48U #define HEADER_VERSION 2U #define HEADER_FLAG_ACK 0x01U -#define HEADER_FRAME_INDEX_MAX 0xFFFFFFU /// 4 GiB with 256-byte MTU +#define HEADER_FRAME_INDEX_MAX 0xFFFFFFU /// 4 GiB with 256-byte MTU; 21.6 GiB with 1384-byte MTU typedef struct { @@ -510,7 +509,7 @@ static udpard_bytes_t tx_frame_view(const tx_frame_t* const frame) static tx_frame_t* tx_frame_from_view(const udpard_bytes_t view) { - return (tx_frame_t*)unbias_ptr(view.data, offsetof(tx_frame_t, data)); + return (tx_frame_t*)ptr_unbias(view.data, offsetof(tx_frame_t, data)); } static tx_frame_t* tx_frame_new(udpard_tx_t* const tx, const udpard_mem_resource_t mem, const size_t data_size) @@ -536,9 +535,8 @@ typedef struct } tx_transfer_key_t; /// The transmission scheduler maintains several indexes for the transfers in the pipeline. -/// /// The segregated priority queue only contains transfers that are ready for transmission. -/// The staged index contains transfers ordered by readiness time; +/// The staged index contains transfers ordered by readiness for retransmission; /// transfers that will no longer be transmitted but are retained waiting for the ack are in neither of these. /// The deadline index contains ALL transfers, ordered by their deadlines, used for purging expired transfers. /// The transfer index contains ALL transfers, used for lookup by (topic_hash, transfer_id). @@ -558,8 +556,8 @@ typedef struct tx_transfer_t /// Mutable transmission state. All other fields, except for the index handles, are immutable. tx_frame_t* cursor[UDPARD_IFACE_COUNT_MAX]; - uint_fast8_t epoch; ///< Does not overflow due to exponential backoff. - udpard_us_t staged_until; ///< If staged_until>=deadline, this is the last attempt; frames can be freed as leave. + uint_fast8_t epoch; ///< Does not overflow due to exponential backoff; e.g. 1us with epoch=48 => 9 years. + udpard_us_t staged_until; ///< If staged_until>=deadline, this is the last attempt; frames can be freed on the go. /// Constant transfer properties supplied by the client. uint64_t topic_hash; @@ -575,7 +573,7 @@ typedef struct tx_transfer_t /// These entities are specific to outgoing acks only. I considered extracting them into a polymorphic /// tx_transfer_ack_t subtype with a virtual destructor, but it adds a bit more complexity than I would like /// to tolerate for a gain of only a dozen bytes per transfer object. - /// These are undefined for non-ack transfers. + /// These are unused for non-ack transfers. udpard_tree_t index_transfer_remote; ///< Key: tx_transfer_key_t but referencing the remotes. uint64_t remote_topic_hash; uint64_t remote_transfer_id; @@ -583,7 +581,7 @@ typedef struct tx_transfer_t static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) { - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if ((memory.payload[i].alloc == NULL) || (memory.payload[i].free == NULL)) { return false; } @@ -594,7 +592,7 @@ static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) static void tx_transfer_free_payload(tx_transfer_t* const tr) { UDPARD_ASSERT(tr != NULL); - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { const tx_frame_t* frame = tr->head[i]; while (frame != NULL) { const tx_frame_t* const next = frame->next; @@ -610,7 +608,7 @@ static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) { UDPARD_ASSERT(tr != NULL); tx_transfer_free_payload(tr); - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { delist(&tx->queue[i][tr->priority], &tr->queue[i]); } delist(&tx->agewise, &tr->agewise); @@ -688,7 +686,7 @@ static tx_transfer_t* tx_transfer_find(udpard_tx_t* const tx, const uint64_t top /// True iff listed in at least one interface queue. static bool tx_is_pending(const udpard_tx_t* const tx, const tx_transfer_t* const tr) { - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if (is_listed(&tx->queue[i][tr->priority], &tr->queue[i])) { return true; } @@ -777,10 +775,10 @@ static size_t tx_predict_frame_count(const size_t mtu[UDPARD_IFAC const size_t payload_size) { size_t n_frames_total = 0; - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if (udpard_is_valid_endpoint(endpoint[i])) { bool shared = false; - for (uint_fast8_t j = 0; j < i; j++) { + for (size_t j = 0; j < i; j++) { shared = shared || (udpard_is_valid_endpoint(endpoint[j]) && tx_spool_shareable(mtu[i], memory[i], mtu[j], memory[j])); } @@ -789,6 +787,7 @@ static size_t tx_predict_frame_count(const size_t mtu[UDPARD_IFAC } } } + UDPARD_ASSERT(n_frames_total > 0); // The caller ensures that at least one endpoint is valid. return n_frames_total; } @@ -804,9 +803,11 @@ static uint32_t tx_push(udpard_tx_t* const tx, { UDPARD_ASSERT(now <= deadline); UDPARD_ASSERT(tx != NULL); + UDPARD_ASSERT(valid_ep_mask(endpoint) != 0); + UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); // Ensure the queue has enough space. - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { tx->mtu[i] = larger(tx->mtu[i], UDPARD_MTU_MIN); // enforce minimum MTU } const size_t n_frames = tx_predict_frame_count(tx->mtu, tx->memory.payload, endpoint, meta.transfer_payload_size); @@ -815,7 +816,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, return 0; } - // Construct the transfer object, without the frames for now. The frame spools will be constructed next. + // Construct the empty transfer object, without the frames for now. The frame spools will be constructed next. tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t)); if (tr == NULL) { tx->errors_oom++; @@ -832,15 +833,15 @@ static uint32_t tx_push(udpard_tx_t* const tx, tr->feedback = feedback; tr->staged_until = meta.flag_ack ? (now + tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, tr->epoch)) : HEAT_DEATH; - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { tr->destination[i] = endpoint[i]; tr->head[i] = tr->cursor[i] = NULL; } - // Spool the frames for each interface, with deduplication where possible to conserve space. + // Spool the frames for each interface, with deduplication where possible to conserve memory and queue space. const size_t enqueued_frames_before = tx->enqueued_frames_count; bool oom = false; - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if (udpard_is_valid_endpoint(tr->destination[i])) { if (tr->head[i] == NULL) { tr->head[i] = tx_spool(tx, tx->memory.payload[i], tx->mtu[i], meta, payload); @@ -850,7 +851,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, break; } // Detect which interfaces can use the same spool to conserve memory. - for (uint_fast8_t j = i + 1; j < UDPARD_IFACE_COUNT_MAX; j++) { + for (size_t j = i + 1; j < UDPARD_IFACE_COUNT_MAX; j++) { if (udpard_is_valid_endpoint(tr->destination[j]) && tx_spool_shareable(tx->mtu[i], tx->memory.payload[i], tx->mtu[j], tx->memory.payload[j])) { tr->head[j] = tr->head[i]; @@ -876,7 +877,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, (void)enqueued_frames_before; // Enqueue for transmission immediately. - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if (udpard_is_valid_endpoint(tr->destination[i])) { enlist_head(&tx->queue[i][tr->priority], &tr->queue[i]); } @@ -895,7 +896,7 @@ static uint32_t tx_push(udpard_tx_t* const tx, &tx->index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); UDPARD_ASSERT(tree_transfer == &tr->index_transfer); // ensure no duplicates; checked at the API level (void)tree_transfer; - // Add to the agewise list to allow instant sacrifice when needed; oldest at the tail. + // Add to the agewise list for sacrifice management on queue exhaustion. enlist_head(&tx->agewise, &tr->agewise); // Finalize. @@ -1010,9 +1011,9 @@ bool udpard_tx_new(udpard_tx_t* const self, self->index_deadline = NULL; self->index_transfer = NULL; self->user = NULL; - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { self->mtu[i] = UDPARD_MTU_DEFAULT; - for (uint_fast8_t p = 0; p < UDPARD_PRIORITY_COUNT; p++) { + for (size_t p = 0; p < UDPARD_PRIORITY_COUNT; p++) { self->queue[i][p].head = NULL; self->queue[i][p].tail = NULL; } @@ -1077,7 +1078,7 @@ uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, return out; } -static void tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) +static void tx_purge_expired_transfers(udpard_tx_t* const self, const udpard_us_t now) { while (true) { // we can use next_greater instead of doing min search every time tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_deadline), tx_transfer_t, index_deadline); @@ -1095,7 +1096,7 @@ static void tx_purge_expired(udpard_tx_t* const self, const udpard_us_t now) } } -static void tx_promote_staged(udpard_tx_t* const self, const udpard_us_t now) +static void tx_promote_staged_transfers(udpard_tx_t* const self, const udpard_us_t now) { while (true) { // we can use next_greater instead of doing min search every time tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_staged), tx_transfer_t, index_staged); @@ -1113,7 +1114,7 @@ static void tx_promote_staged(udpard_tx_t* const self, const udpard_us_t now) cavl2_trivial_factory); } // Enqueue for transmission unless it's been there since the last attempt (stalled interface?) - for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { UDPARD_ASSERT(tr->cursor[i] == tr->head[i]); if (udpard_is_valid_endpoint(tr->destination[i]) && !is_listed(&self->queue[i][tr->priority], &tr->queue[i])) { @@ -1126,14 +1127,14 @@ static void tx_promote_staged(udpard_tx_t* const self, const udpard_us_t now) } } -static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t ifindex) +static void tx_eject_pending_frames(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t ifindex) { while (true) { // Find the highest-priority pending transfer. tx_transfer_t* tr = NULL; for (size_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { tx_transfer_t* const candidate = // This pointer arithmetic is ugly and perhaps should be improved - unbias_ptr(self->queue[ifindex][prio].tail, + ptr_unbias(self->queue[ifindex][prio].tail, offsetof(tx_transfer_t, queue) + (sizeof(udpard_list_member_t) * ifindex)); if (candidate != NULL) { tr = candidate; @@ -1184,14 +1185,14 @@ static void tx_eject_pending(udpard_tx_t* const self, const udpard_us_t now, con } } -void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t iface_mask) +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint32_t iface_mask) { - if ((self != NULL) && (now >= 0)) { // This is the main scheduler state machine update tick. - tx_purge_expired(self, now); // This may free up some memory and some queue slots. - tx_promote_staged(self, now); // This may add some new transfers to the queue. + if ((self != NULL) && (now >= 0)) { // This is the main scheduler state machine update tick. + tx_purge_expired_transfers(self, now); // This may free up some memory and some queue slots. + tx_promote_staged_transfers(self, now); // This may add some new transfers to the queue. for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if ((iface_mask & (1U << i)) != 0U) { - tx_eject_pending(self, now, i); + tx_eject_pending_frames(self, now, i); } } } @@ -1237,7 +1238,7 @@ void udpard_tx_free(udpard_tx_t* const self) // Ports are created by the application per subject to subscribe to. There are various parameters defined per port, // such as the extent (max payload size to accept) and the reassembly mode (ORDERED, UNORDERED, STATELESS). // -// Each port automatically creates a dedicated session per remote node that publishes on that subject +// Each port automatically dynamically creates a dedicated session per remote node that publishes on that subject // (unless the STATELESS mode is used, which is simple and limited). Sessions are automatically cleaned up and // removed when the remote node ceases to publish for a certain (large) timeout period. // @@ -1249,29 +1250,31 @@ void udpard_tx_free(udpard_tx_t* const self) // and defragmentation; since all interfaces are pooled together, the reassembler is completely insensitive to // permanent or transient failure of any of the redundant interfaces; as long as at least one of them is able to // deliver frames, the link will function; further, transient packet loss in one of the interfaces does not affect -// the overall reliability. +// the overall reliability. The message reception machine always operates at the throughput and latency of the +// best-performing interface at any given time with seamless failover. // -// Each session holds an efficient bitmap of recently received/seen transfers, which is used for ack retransmission +// Each session keeps track of recently received/seen transfers, which is used for ack retransmission // if the remote end attempts to retransmit a transfer that was already fully received, and is also used for duplicate // rejection. In the ORDERED mode, late transfers (those arriving out of order past the reordering window closure) -// are never acked, but they may still be received and acked by some other nodes in the network. +// are never acked, but they may still be received and acked by some other nodes in the network that were able to +// accept them. // // Acks are transmitted immediately upon successful reception of a transfer. If the remote end retransmits the transfer // (e.g., if the first ack was lost or due to a spurious duplication), repeat acks are only retransmitted -// for the first frame of the transfer because: -// -// - We don't want to flood the network with duplicate ACKs for every fragment of a multi-frame transfer. -// They are already duplicated for each redundant interface. -// -// - The application may need to look at the head of the transfer to handle acks, which is in the first frame. +// for the first frame of the transfer because we don't want to flood the network with duplicate ACKs for every // // The redundant interfaces may have distinct MTUs, so the fragment offsets and sizes may vary significantly. -// The reassembler decides if a newly arrived fragment is needed based on gap detection in the fragment tree. +// The reassembler decides if a newly arrived fragment is needed based on gap/overlap detection in the fragment tree. // An accepted fragment may overlap with neighboring fragments; however, the reassembler guarantees that no fragment is // fully contained within another fragment; this also implies that there are no fragments sharing the same offset, // and that fragments ordered by offset are also ordered by their ends. -// The reassembler prefers to keep fewer large fragments over many small fragments, to reduce the overhead of +// The reassembler prefers to keep fewer large fragments over many small fragments to reduce the overhead of // managing the fragment tree and the amount of auxiliary memory required for it. +// +// The code here does a lot of linear lookups. This is intentional and is not expected to bring any performance issues +// because all loops are tightly bounded with a compile-time known maximum number of iterations that is very small +// in practice (e.g., number of slots per session, number of priority levels, number of interfaces). For small +// number of iterations this is much faster than more sophisticated lookup structures. /// All but the transfer metadata: fields that change from frame to frame within the same transfer. typedef struct @@ -1468,8 +1471,7 @@ static rx_fragment_tree_update_result_t rx_fragment_tree_update(udpard_tree_t** } /// 1. Eliminates payload overlaps. They may appear if redundant interfaces with different MTU settings are used. -/// 2. Verifies the CRC of the reassembled payload. -/// 3. Links all fragments into a linked list for convenient application consumption. +/// 2. Verifies the end-to-end CRC of the full reassembled payload. /// Returns true iff the transfer is valid and safe to deliver to the application. /// Observe that this function alters the tree ordering keys, but it does not alter the tree topology, /// because each fragment's offset is changed within the bounds that preserve the ordering. @@ -1761,10 +1763,10 @@ static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx } /// In the ORDERED mode, checks which slots can be ejected or interned in the reordering window. -/// This is only useful for the ORDERED mode. +/// This is only useful for the ORDERED mode. This mode is much more complex and CPU-heavy than the UNORDERED mode. /// Should be invoked whenever a slot MAY or MUST be ejected (i.e., on completion or when an empty slot is required). /// If the force flag is set, at least one DONE slot will be ejected even if its reordering window is still open; -/// this is used to forcibly free up at least one slot when all slots are busy and a new transfer arrives. +/// this is used to forcibly free up at least one slot when no slot is idle and a new transfer arrives. static void rx_session_ordered_scan_slots(rx_session_t* const self, udpard_rx_t* const rx, const udpard_us_t ts, @@ -1921,6 +1923,7 @@ static void rx_session_update(rx_session_t* const self, } /// The ORDERED mode implementation. May delay incoming transfers to maintain strict transfer-ID ordering. +/// The ORDERED mode is much more complex and CPU-heavy. static void rx_session_update_ordered(rx_session_t* const self, udpard_rx_t* const rx, const udpard_us_t ts, @@ -1966,6 +1969,7 @@ static void rx_session_update_ordered(rx_session_t* const self, } /// The UNORDERED mode implementation. Ejects every transfer immediately upon completion without delay. +/// The reordering timer is not used. static void rx_session_update_unordered(rx_session_t* const self, udpard_rx_t* const rx, const udpard_us_t ts, @@ -2100,7 +2104,7 @@ void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx) void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now) { - // Retire timed out sessions. We retire at most one per poll to avoid burstiness because session retirement + // Retire timed out sessions. We retire at most one per poll to avoid burstiness -- session retirement // may potentially free up a lot of memory at once. { rx_session_t* const ses = LIST_TAIL(self->list_session_by_animation, rx_session_t, list_by_animation); diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 2602f2d..1e4eeed 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -456,7 +456,7 @@ uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, /// The iface mask indicates which interfaces are currently available for transmission; /// eject() will only be invoked on these interfaces. /// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. -void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint_fast8_t iface_mask); +void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint32_t iface_mask); /// When a datagram is ejected and the application opts to keep it, these functions must be used to manage the /// datagram buffer lifetime. The datagram will be freed once the reference count reaches zero. @@ -776,7 +776,9 @@ bool udpard_rx_port_new_p2p(udpard_rx_port_p2p_t* const self, const udpard_rx_port_p2p_vtable_t* const vtable); /// Returns all memory allocated for the sessions, slots, fragments, etc of the given port. -/// Does not free the port itself and does not alter the RX instance aside from unlinking the port from it. +/// This is usable with udpard_rx_port_p2p_t as well via the base member. +/// Does not free the port itself since it is allocated by the application rather than the library, +/// and does not alter the RX instance aside from unlinking the port from it. /// It is safe to invoke this at any time, but the port instance shall not be used again unless re-initialized. /// The function has no effect if any of the arguments are NULL. void udpard_rx_port_free(udpard_rx_t* const rx, udpard_rx_port_t* const port); From 7dcbe379841e97816c7810426fa8c0e891211a5e Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 00:24:01 +0200 Subject: [PATCH 23/42] update the docs --- libudpard/udpard.c | 47 ++++++------ libudpard/udpard.h | 173 +++++++++++++++++++++++++-------------------- 2 files changed, 125 insertions(+), 95 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 78fe58a..ec185e4 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -197,25 +197,25 @@ udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id) } // NOLINTNEXTLINE(misc-no-recursion) -void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_mem_resource_t fragment_mem_resource) +void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_mem_resource_t mem_fragment) { if (frag != NULL) { // Descend the tree for (size_t i = 0; i < 2; i++) { if (frag->index_offset.lr[i] != NULL) { frag->index_offset.lr[i]->up = NULL; // Prevent backtrack ascension from this branch - udpard_fragment_free_all((udpard_fragment_t*)frag->index_offset.lr[i], fragment_mem_resource); + udpard_fragment_free_all((udpard_fragment_t*)frag->index_offset.lr[i], mem_fragment); frag->index_offset.lr[i] = NULL; // Avoid dangly pointers even if we're headed for imminent destruction } } // Delete this fragment udpard_fragment_t* const parent = (udpard_fragment_t*)frag->index_offset.up; mem_free_payload(frag->payload_deleter, frag->origin); - mem_free(fragment_mem_resource, sizeof(udpard_fragment_t), frag); + mem_free(mem_fragment, sizeof(udpard_fragment_t), frag); // Ascend the tree. if (parent != NULL) { parent->index_offset.lr[parent->index_offset.lr[1] == (udpard_tree_t*)frag] = NULL; - udpard_fragment_free_all(parent, fragment_mem_resource); // tail call hopefully + udpard_fragment_free_all(parent, mem_fragment); // tail call } } } @@ -757,15 +757,18 @@ static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_ return baseline * (1L << smaller((size_t)prio + attempts, 62)); // NOLINT(*-signed-bitwise) } -/// A transfer can use the same fragments between two interfaces if both have the same MTU and use the same allocator. +/// A transfer can use the same fragments between two interfaces if +/// (both have the same MTU OR the transfer fits in both MTU) AND both use the same allocator. +/// Either they will share the same spool, or there is only a single frame so the MTU difference does not matter. /// The allocator requirement is important because it is possible that distinct NICs may not be able to reach the /// same memory region via DMA. static bool tx_spool_shareable(const size_t mtu_a, const udpard_mem_resource_t mem_a, const size_t mtu_b, - const udpard_mem_resource_t mem_b) + const udpard_mem_resource_t mem_b, + const size_t payload_size) { - return (mtu_a == mtu_b) && mem_same(mem_a, mem_b); + return ((mtu_a == mtu_b) || (payload_size <= smaller(mtu_a, mtu_b))) && mem_same(mem_a, mem_b); } /// The prediction takes into account that some interfaces may share the same frame spool. @@ -774,13 +777,14 @@ static size_t tx_predict_frame_count(const size_t mtu[UDPARD_IFAC const udpard_udpip_ep_t endpoint[UDPARD_IFACE_COUNT_MAX], const size_t payload_size) { + UDPARD_ASSERT(valid_ep_mask(endpoint) != 0); // The caller ensures that at least one endpoint is valid. size_t n_frames_total = 0; for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if (udpard_is_valid_endpoint(endpoint[i])) { bool shared = false; for (size_t j = 0; j < i; j++) { shared = shared || (udpard_is_valid_endpoint(endpoint[j]) && - tx_spool_shareable(mtu[i], memory[i], mtu[j], memory[j])); + tx_spool_shareable(mtu[i], memory[i], mtu[j], memory[j], payload_size)); } if (!shared) { n_frames_total += larger(1, (payload_size + mtu[i] - 1U) / mtu[i]); @@ -853,7 +857,11 @@ static uint32_t tx_push(udpard_tx_t* const tx, // Detect which interfaces can use the same spool to conserve memory. for (size_t j = i + 1; j < UDPARD_IFACE_COUNT_MAX; j++) { if (udpard_is_valid_endpoint(tr->destination[j]) && - tx_spool_shareable(tx->mtu[i], tx->memory.payload[i], tx->mtu[j], tx->memory.payload[j])) { + tx_spool_shareable(tx->mtu[i], + tx->memory.payload[i], + tx->mtu[j], + tx->memory.payload[j], + meta.transfer_payload_size)) { tr->head[j] = tr->head[i]; tr->cursor[j] = tr->cursor[i]; tx_frame_t* frame = tr->head[j]; @@ -2014,7 +2022,7 @@ static void rx_port_accept_stateful(udpard_rx_t* const rx, const udpard_udpip_ep_t source_ep, rx_frame_t* const frame, const udpard_mem_deleter_t payload_deleter, - const uint_fast8_t redundant_iface_index) + const uint_fast8_t iface_index) { rx_session_factory_args_t fac_args = { .owner = port, .sessions_by_animation = &rx->list_session_by_animation, @@ -2027,7 +2035,7 @@ static void rx_port_accept_stateful(udpard_rx_t* const rx, &fac_args, &cavl_factory_rx_session_by_remote_uid); if (ses != NULL) { - rx_session_update(ses, rx, timestamp, source_ep, frame, payload_deleter, redundant_iface_index); + rx_session_update(ses, rx, timestamp, source_ep, frame, payload_deleter, iface_index); } else { mem_free_payload(payload_deleter, frame->base.origin); ++rx->errors_oom; @@ -2041,7 +2049,7 @@ static void rx_port_accept_stateless(udpard_rx_t* const rx, const udpard_udpip_ep_t source_ep, rx_frame_t* const frame, const udpard_mem_deleter_t payload_deleter, - const uint_fast8_t redundant_iface_index) + const uint_fast8_t iface_index) { const size_t required_size = smaller(port->extent, frame->meta.transfer_payload_size); const bool full_transfer = (frame->base.offset == 0) && (frame->base.payload.size >= required_size); @@ -2050,8 +2058,8 @@ static void rx_port_accept_stateless(udpard_rx_t* const rx, // Maybe we could do something about it in the future to avoid this allocation. udpard_fragment_t* const frag = rx_fragment_new(port->memory.fragment, payload_deleter, frame->base); if (frag != NULL) { - udpard_remote_t remote = { .uid = frame->meta.sender_uid }; - remote.endpoints[redundant_iface_index] = source_ep; + udpard_remote_t remote = { .uid = frame->meta.sender_uid }; + remote.endpoints[iface_index] = source_ep; // The CRC is validated by the frame parser for the first frame of any transfer. It is certainly correct. UDPARD_ASSERT(frame->base.crc == crc_full(frame->base.payload.size, frame->base.payload.data)); const udpard_rx_transfer_t transfer = { @@ -2242,11 +2250,11 @@ bool udpard_rx_port_push(udpard_rx_t* const rx, const udpard_udpip_ep_t source_ep, const udpard_bytes_mut_t datagram_payload, const udpard_mem_deleter_t payload_deleter, - const uint_fast8_t redundant_iface_index) + const uint_fast8_t iface_index) { const bool ok = (rx != NULL) && (port != NULL) && (timestamp >= 0) && udpard_is_valid_endpoint(source_ep) && (datagram_payload.data != NULL) && (payload_deleter.free != NULL) && - (redundant_iface_index < UDPARD_IFACE_COUNT_MAX); + (iface_index < UDPARD_IFACE_COUNT_MAX); if (ok) { rx_frame_t frame = { 0 }; uint32_t frame_index = 0; @@ -2258,12 +2266,11 @@ bool udpard_rx_port_push(udpard_rx_t* const rx, frame.base.origin = datagram_payload; // Take ownership of the payload. if (frame_valid) { if (frame.meta.topic_hash == port->topic_hash) { - port->vtable_private->accept( - rx, port, timestamp, source_ep, &frame, payload_deleter, redundant_iface_index); + port->vtable_private->accept(rx, port, timestamp, source_ep, &frame, payload_deleter, iface_index); } else { // Collisions are discovered early so that we don't attempt to allocate sessions for them. mem_free_payload(payload_deleter, frame.base.origin); - udpard_remote_t remote = { .uid = frame.meta.sender_uid }; - remote.endpoints[redundant_iface_index] = source_ep; + udpard_remote_t remote = { .uid = frame.meta.sender_uid }; + remote.endpoints[iface_index] = source_ep; port->vtable->on_collision(rx, port, remote); } } else { diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 1e4eeed..9e9bf33 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -5,35 +5,36 @@ /// `____/ .___/`___/_/ /_/`____/`__, / .___/_/ /_/`__,_/_/ /// /_/ /____/_/ /// -/// LibUDPard is a compact implementation of the Cyphal/UDP protocol for high-integrity real-time embedded systems. -/// It is designed for use in robust deterministic embedded systems equipped with at least 64K ROM and RAM. +/// LibUDPard is a compact implementation of the Cyphal/UDP transport for high-integrity real-time embedded systems. +/// It is designed for use in robust deterministic embedded systems equipped with at least ~100K ROM and RAM, +/// as well as in general-purpose software. +/// /// The codebase is compliant with a large subset of MISRA C and is fully covered by unit and end-to-end tests. /// The library is designed to be compatible with any conventional target platform, from 8 to 64 bit, little- and -/// big-endian, RTOS-based or baremetal, as long as there is a standards-compliant ISO C99+ compiler available. +/// big-endian, RTOS-based or baremetal, as long as there is a standards-compliant ISO C99 or C11 compiler available. /// -/// The library is intended to be integrated into the end application by simply copying its source files into the +/// The library is intended to be integrated into the end application by simply copying udpard.c/.h into the /// source tree of the project; it does not require any special compilation options and should work out of the box. /// There are build-time configuration parameters defined near the top of udpard.c, but they are optional to use. /// -/// To use the library, the application needs to provide a UDP/IPv4 stack supporting IGMP and ARP. +/// To use the library, the application needs to provide a minimal UDP/IPv4 stack supporting IGMP v2 and passive ARP. /// POSIX-based systems may use the standard Berkeley sockets API, while more constrained embedded systems may choose -/// to rely either on a third-party solution like LwIP or a custom UDP/IP stack. +/// to rely either on a third-party solution like LwIP or a custom minimal UDP/IP stack. /// /// The library can be used either with a regular heap (preferably constant-time) or with a collection of fixed-size /// block pool allocators (may be preferable in safety-certified systems). /// If block pool allocators are used, the following block sizes should be served: /// - MTU-sized blocks for the TX and RX pipelines (typically at most 1.5 KB unless jumbo frames are used). -/// - sizeof(tx_transfer_t) blocks for the TX pipeline. -/// - sizeof(tx_frame_t) blocks for the TX pipeline. -/// - sizeof(rx_session_t) blocks for the RX pipeline. -/// - sizeof(udpard_fragment_t) blocks for the RX pipeline. +/// The TX pipeline adds a small overhead of sizeof(tx_frame_t). +/// - sizeof(tx_transfer_t) blocks for the TX pipeline to store outgoing transfer metadata. +/// - sizeof(rx_session_t) blocks for the RX pipeline to store incoming transfer session metadata. +/// - sizeof(udpard_fragment_t) blocks for the RX pipeline to store received data fragments. /// -/// Suitable allocators may be found here: +/// Suitable memory allocators may be found here: /// - Constant-time ultrafast deterministic heap: https://github.com/pavel-kirienko/o1heap /// - Single-header fixed-size block pool: https://gist.github.com/pavel-kirienko/daf89e0481e6eac0f1fa8a7614667f59 /// /// -------------------------------------------------------------------------------------------------------------------- -/// /// This software is distributed under the terms of the MIT License. /// Copyright (C) OpenCyphal Development Team /// Copyright Amazon.com Inc. or its affiliates. @@ -65,9 +66,10 @@ extern "C" /// RFC 791 states that hosts must be prepared to accept datagrams of up to 576 octets and it is expected that this /// library will receive non IP-fragmented datagrams thus the minimum MTU should be larger than 576. -/// This is also the maximum size of a single-frame transfer. /// That being said, the MTU here is set to a larger value that is derived as: /// 1500B Ethernet MTU (RFC 894) - 60B IPv4 max header - 8B UDP Header - 48B Cyphal header +/// This is also the default maximum size of a single-frame transfer. +/// The application can change this value at runtime as needed. #define UDPARD_MTU_DEFAULT 1384U /// MTU less than this should not be used. This value may be increased in a future version of the library. @@ -78,7 +80,7 @@ extern "C" #define UDPARD_IFACE_MASK_ALL ((1U << UDPARD_IFACE_COUNT_MAX) - 1U) -/// All P2P transfers have a fixed prefix, handled by the library transparently for the application, +/// All P2P transfers have a fixed prefix in the payload, handled by the library transparently for the application, /// defined as follows in DSDL notation: /// /// uint8 KIND_RESPONSE = 0 # The topic hash and transfer-ID specify which message this is a response to. @@ -96,6 +98,7 @@ extern "C" typedef int64_t udpard_us_t; /// See udpard_tx_t::ack_baseline_timeout. +/// This default value might be a good starting point for many applications running over a local network. #define UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us 16000LL /// The subject-ID only affects the formation of the multicast UDP/IP endpoint address. @@ -159,7 +162,8 @@ typedef struct udpard_udpip_ep_t /// The RX pipeline will attempt to discover the sender's UDP/IP endpoint per redundant interface /// based on the source address of the received UDP datagrams. If the sender's endpoint could not be discovered /// for a certain interface (e.g., if the sender is not connected to that interface), the corresponding entry in -/// the endpoints array will be zeroed. +/// the endpoints array will be zeroed and udpard_is_valid_endpoint() will return false for that entry. +/// /// Cyphal/UDP thus allows nodes to change their network interface addresses dynamically. /// The library does not make any assumptions about the specific values and their uniqueness; /// as such, multiple remote nodes can even share the same endpoint. @@ -172,18 +176,16 @@ typedef struct udpard_remote_t /// Returns true if the given UDP/IP endpoint appears to be valid. Zero port or IP are considered invalid. bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep); -/// Returns the destination multicast UDP/IP endpoint for the given subject ID. +/// Returns the destination multicast UDP/IP endpoint for the given subject-ID. /// The application should use this function when setting up subscription sockets or sending transfers. -/// If the subject-ID exceeds the allowed range, the excessive bits are masked out. -/// For P2P ports use the unicast node address instead. +/// If the subject-ID exceeds UDPARD_IPv4_SUBJECT_ID_MAX, the excessive bits are masked out. +/// For P2P use the unicast node address directly instead, as provided by the RX pipeline per received transfer. udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id); /// The semantics are similar to malloc/free. -/// Consider using O1Heap: https://github.com/pavel-kirienko/o1heap. Alternatively, some applications may prefer to -/// use a set of fixed-size block pool allocators (see the high-level overview for details); for example: -/// https://github.com/OpenCyphal-Garage/demos/blob/87741d8242bcb27b39e22115559a4b91e92ffe06/libudpard_demo/src/memory_block.h +/// Consider using O1Heap: https://github.com/pavel-kirienko/o1heap. /// The API documentation is written on the assumption that the memory management functions are O(1). -/// The value of the user reference is taken from the corresponding field of the memory resource structure. +/// The user pointer is taken from the corresponding field of the memory resource structure. typedef void* (*udpard_mem_alloc_t)(void* const user, const size_t size); typedef void (*udpard_mem_free_t)(void* const user, const size_t size, void* const pointer); @@ -194,9 +196,6 @@ typedef struct udpard_mem_deleter_t udpard_mem_free_t free; } udpard_mem_deleter_t; -/// A memory resource encapsulates the dynamic memory allocation and deallocation facilities. -/// Note that the library allocates a large amount of small fixed-size objects for bookkeeping purposes; -/// allocators for them can be implemented using fixed-size block pools to eliminate extrinsic memory fragmentation. typedef struct udpard_mem_resource_t { void* user; @@ -237,7 +236,7 @@ typedef struct udpard_fragment_t /// All fragments in the tree will be freed and invalidated. /// The passed fragment can be any fragment inside the tree (not necessarily the root). /// If the fragment argument is NULL, the function has no effect. The complexity is linear in the number of fragments. -void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_mem_resource_t fragment_mem_resource); +void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_mem_resource_t mem_fragment); /// Given any fragment in a transfer, returns the fragment that contains the given payload offset. /// Returns NULL if the offset points beyond the stored payload, or if frag is NULL. @@ -252,6 +251,7 @@ udpard_fragment_t* udpard_fragment_seek(const udpard_fragment_t* frag, const siz /// The complexity is amortized-constant. udpard_fragment_t* udpard_fragment_next(const udpard_fragment_t* frag); +/// A convenience function built on top of udpard_fragment_seek() and udpard_fragment_next(). /// Copies `size` bytes of payload stored in a fragment tree starting from `offset` into `destination`. /// The cursor pointer is an iterator updated to the last fragment touched, enabling very efficient sequential /// access without repeated searches; it is never set to NULL. @@ -267,22 +267,30 @@ size_t udpard_fragment_gather(const udpard_fragment_t** cursor, // ================================================= TX PIPELINE ================================================= // ===================================================================================================================== +/// Graphically, the transmission pipeline is arranged as shown below. +/// There is a single pipeline instance that serves all topics, P2P, and all network interfaces. +/// +/// +---> REDUNDANT INTERFACE A +/// | +/// TRANSFERS ---> udpard_tx_t ---+---> REDUNDANT INTERFACE B +/// | +/// +---> ... +/// typedef struct udpard_tx_t udpard_tx_t; -/// A TX queue uses these memory resources for allocating the enqueued items (UDP datagrams). -/// There are exactly two allocations per enqueued item: -/// - the first for bookkeeping purposes (udpard_tx_item_t) -/// - second for payload storage (the frame data) -/// In a simple application, there would be just one memory resource shared by all parts of the library. -/// If the application knows its MTU, it can use block allocation to avoid extrinsic fragmentation. typedef struct udpard_tx_mem_resources_t { - /// The queue bookkeeping structures are allocated per outgoing transfer. - /// Each instance is sizeof(tx_transfer_t), so a trivial zero-fragmentation block allocator is enough. + /// The queue bookkeeping structures are allocated per outgoing transfer, i.e., one per udpard_tx_push(). + /// Each allocation is sizeof(tx_transfer_t). udpard_mem_resource_t transfer; - /// The UDP datagram payload buffers are allocated per frame; each buffer is of size at most - /// HEADER_SIZE + MTU + small overhead, so a trivial block pool is enough if MTU is known in advance. + /// The UDP datagram payload buffers are allocated per frame, each at most HEADER_SIZE+MTU+sizeof(tx_frame_t). + /// These may be distinct per interface to allow each interface to draw buffers from a specific memory region + /// or a specific DMA-compatible memory pool. + /// + /// IMPORTANT: DISTINCT MEMORY RESOURCES INCREASE TX MEMORY USAGE AND DATA COPYING. + /// If possible, it is recommended to use the same memory resource for all interfaces, because the library will be + /// able to avoid frame duplication and instead reuse each frame across all interfaces when the MTUs are identical. udpard_mem_resource_t payload[UDPARD_IFACE_COUNT_MAX]; } udpard_tx_mem_resources_t; @@ -296,8 +304,13 @@ typedef struct udpard_tx_feedback_t bool success; ///< False if no ack was received from the remote end before deadline expiration or forced eviction. } udpard_tx_feedback_t; +/// Request to transmit a UDP datagram over the specified interface to the given destination endpoint. +/// Which interface indexes are available is determined by the user when pushing a transfer: the endpoints for +/// unavailable interfaces should be zeroed, then no ejection will be requested for those interfaces. +/// If Berkeley sockets or similar API is used, the application should use a dedicated socket per redundant interface. typedef struct udpard_tx_ejection_t { + /// The current time carried over from the API function that initiated the ejection. udpard_us_t now; /// Specifies when the frame should be considered expired and dropped if not yet transmitted by then; @@ -305,23 +318,27 @@ typedef struct udpard_tx_ejection_t udpard_us_t deadline; uint_fast8_t iface_index; ///< The interface index on which the datagram is to be transmitted. - uint_fast8_t dscp; ///< Set the DSCP field of the outgoing packet to this. - udpard_udpip_ep_t destination; ///< Unicast or multicast UDP/IP endpoint. + uint_fast8_t dscp; ///< Set the DSCP field of the outgoing UDP packet to this. + udpard_udpip_ep_t destination; ///< Unicast (for P2P transfers) or multicast UDP/IP endpoint. - /// If the datagram pointer is retained by the application, udpard_tx_refcount_inc() must be invoked on it. - /// When no longer needed (e.g, upon transmission), udpard_tx_refcount_dec() must be invoked. + /// If the datagram pointer is retained by the application, udpard_tx_refcount_inc() must be invoked on it + /// to prevent it from being garbage collected. When no longer needed (e.g, upon transmission), + /// udpard_tx_refcount_dec() must be invoked to release the reference. udpard_bytes_t datagram; /// This is the same pointer that was passed to udpard_tx_push(). void* user_transfer_reference; } udpard_tx_ejection_t; +/// Virtual function table for the TX pipeline, to be provided by the application. typedef struct udpard_tx_vtable_t { - /// Invoked from udpard_tx_poll() to push outgoing UDP datagrams into the socket/NIC driver. + /// Invoked from udpard_tx_poll() et al to push outgoing UDP datagrams into the socket/NIC driver. bool (*eject)(udpard_tx_t*, udpard_tx_ejection_t); } udpard_tx_vtable_t; +/// The application must create a single instance of this struct to manage the TX pipeline. +/// A single instance manages all redundant interfaces. struct udpard_tx_t { const udpard_tx_vtable_t* vtable; @@ -332,9 +349,13 @@ struct udpard_tx_t /// A random-initialized transfer-ID counter for all outgoing P2P transfers. Must not be changed by the application. uint64_t p2p_transfer_id; - /// The maximum number of Cyphal transfer payload bytes per UDP datagram. - /// The Cyphal/UDP header is added to this value to obtain the total UDP datagram payload size. See UDPARD_MTU_*. + /// The maximum number of Cyphal transfer payload bytes per UDP datagram. See UDPARD_MTU_*. + /// The Cyphal/UDP header is added to this value to obtain the total UDP datagram payload size. /// The value can be changed arbitrarily between enqueue operations as long as it is at least UDPARD_MTU_MIN. + /// + /// IMPORTANT: DISTINCT MTU VALUES INCREASE TX MEMORY USAGE AND DATA COPYING. + /// If possible, it is recommended to use the same MTU for all interfaces, because the library will be + /// able to avoid frame duplication and instead reuse each frame across all interfaces. size_t mtu[UDPARD_IFACE_COUNT_MAX]; /// This duration is used to derive the acknowledgment timeout for reliable transfers in tx_ack_timeout(). @@ -377,9 +398,20 @@ struct udpard_tx_t void* user; }; -/// The parameters are initialized deterministically (MTU defaults to UDPARD_MTU_DEFAULT and counters are reset) +/// The parameters are default-initialized (MTU defaults to UDPARD_MTU_DEFAULT and counters are reset) /// and can be changed later by modifying the struct fields directly. No memory allocation is going to take place /// until the first transfer is successfully pushed via udpard_tx_push(). +/// +/// The local UID should be a globally unique EUI-64 identifier assigned to the local node. It may be a random +/// EUI-64, which is especially useful for short-lived software nodes. +/// +/// The p2p_transfer_id_initial value must be chosen randomly such that it is likely to be distinct per application +/// startup. See the transfer-ID counter requirements in udpard_tx_push() for details. +/// +/// The enqueued_frames_limit should be large enough to accommodate the expected burstiness of the application traffic. +/// If the limit is reached, the library will apply heuristics to sacrifice some older transfers to make room +/// for the new one. This behavior allows the library to make progress even when some interfaces are stalled. +/// /// True on success, false if any of the arguments are invalid. bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, @@ -388,13 +420,11 @@ bool udpard_tx_new(udpard_tx_t* const self, const udpard_tx_mem_resources_t memory, const udpard_tx_vtable_t* const vtable); -/// This function serializes a transfer into a sequence of UDP datagrams and inserts them into the prioritized -/// transmission queue at the appropriate position. The transfer payload will be copied into the transmission queue -/// so that the lifetime of the datagrams is not related to the lifetime of the input payload buffer. +/// Submit a transfer for transmission. The payload data will be copied into the transmission queue, so it can be +/// invalidated immediately after this function returns. When redundant interfaces are used, the library will attempt to +/// minimize the number of copies by reusing frames across interfaces with identical MTU values and memory resources. /// -/// The transfer_id parameter is used to populate the transfer_id field of the generated Cyphal/UDP frames. -/// The caller shall increment the transfer-ID counter after each successful invocation of this function -/// per redundant interface; the same transfer published over redundant interfaces shall have the same transfer-ID. +/// The caller shall increment the transfer-ID counter after each successful invocation of this function per topic. /// There shall be a separate transfer-ID counter per topic. The initial value shall be chosen randomly /// such that it is likely to be distinct per application startup (embedded systems can use noinit memory sections, /// hash uninitialized SRAM, use timers or ADC noise, etc). @@ -402,31 +432,24 @@ bool udpard_tx_new(udpard_tx_t* const self, /// The user_transfer_reference is an opaque pointer that will be stored for each enqueued item of this transfer. /// The library itself does not use or check this value in any way, so it can be NULL if not needed. /// -/// The function returns the number of UDP datagrams enqueued, which is always a positive number, in case of success. +/// The function returns the number of payload fragments created, which is always a positive number, in case of success. /// In case of failure, the function returns zero. Runtime failures increment the corresponding error counters, -/// while invocations with invalid arguments just return zero without modifying the queue state. In all cases, -/// either all frames of the transfer are enqueued successfully or none are. -/// -/// An attempt to push a transfer with a (topic hash, transfer-ID) pair that is already enqueued will fail. -/// -/// The callback is invoked from udpard_tx_poll() to report the result of reliable transfer transmission attempts. -/// This is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push() successfully. -/// Set the callback to NULL for best-effort (non-acknowledged) transfers. +/// while invocations with invalid arguments just return zero without modifying the queue state. /// -/// Reliable transfers will keep retransmitting until either an acknowledgment is received from the remote, -/// or the deadline expires. The number of retransmissions cannot be limited directly. Each subsequent -/// retransmission timeout is doubled compared to the previous one (exponential backoff). +/// An attempt to push a transfer with a (topic hash, transfer-ID) pair that is already enqueued will fail, +/// as that violates the transfer-ID uniqueness requirement stated above. /// -/// The memory allocation requirement is two allocations per datagram: -/// a single-frame transfer takes two allocations; a multi-frame transfer of N frames takes N*2 allocations. -/// In each pair of allocations: -/// - the first allocation is for `udpard_tx_item_t`; the size is `sizeof(udpard_tx_item_t)`; -/// the TX queue `memory.fragment` memory resource is used for this allocation (and later for deallocation); -/// - the second allocation is for the payload (the datagram data) - the size is normally MTU but could be less for -/// the last frame of the transfer; the TX queue `memory.payload` resource is used for this allocation. +/// The feedback callback is set to NULL for best-effort (non-acknowledged) transfers. Otherwise, the transfer is +/// treated as reliable, requesting a delivery acknowledgement from at least one remote node (subscriber), +/// with repeated retransmissions until an acknowledgement is received or the deadline has expired. +/// The feedback callback is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push() successfully, +/// indicating either success (acknowledgment received before deadline) or failure (deadline expired without ack). +/// The retransmission delay is increased exponentially with each retransmission attempt; please refer to +/// udpard_tx_t::ack_baseline_timeout for details. /// +/// On success, the function allocates a single transfer state instance and a number of payload fragments. /// The time complexity is O(p + log e), where p is the transfer payload size, and e is the number of -/// transfers (not frames) already enqueued in the transmission queue. +/// transfers already enqueued in the transmission queue. uint32_t udpard_tx_push(udpard_tx_t* const self, const udpard_us_t now, const udpard_us_t deadline, @@ -453,8 +476,7 @@ uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, /// It is fine to also invoke it periodically unconditionally to drive the transmission process. /// Internally, the function will query the scheduler for the next frame to be transmitted and will attempt /// to submit it via the eject() callback provided in the vtable. -/// The iface mask indicates which interfaces are currently available for transmission; -/// eject() will only be invoked on these interfaces. +/// The iface mask indicates which interfaces are currently ready to accept new datagrams. /// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint32_t iface_mask); @@ -473,7 +495,6 @@ void udpard_tx_free(udpard_tx_t* const self); /// The reception (RX) pipeline is used to subscribe to subjects and to receive P2P transfers. /// The reception pipeline is highly robust and is able to accept datagrams with arbitrary MTU distinct per interface, /// delivered out-of-order (OOO) with duplication and arbitrary interleaving between transfers. -/// Robust OOO reassembly is particularly interesting when simple repetition coding FEC is used. /// All redundant interfaces are pooled together into a single fragment stream per RX port, /// thus providing seamless failover and great resilience against packet loss on any of the interfaces. /// The RX pipeline operates at the speed/latency of the best-performing interface at any given time. @@ -558,6 +579,7 @@ void udpard_tx_free(udpard_tx_t* const self); #define UDPARD_RX_REORDERING_WINDOW_UNORDERED ((udpard_us_t)(-1)) #define UDPARD_RX_REORDERING_WINDOW_STATELESS ((udpard_us_t)(-2)) +/// The application will have a single RX instance to manage all subscriptions and P2P ports. typedef struct udpard_rx_t { udpard_list_t list_session_by_animation; ///< Oldest at the tail. @@ -573,6 +595,7 @@ typedef struct udpard_rx_t /// The transmission pipeline is needed to manage ack transmission and removal of acknowledged transfers. /// If the application wants to only listen, the pointer may be NULL (no acks will be sent). + /// When initializing the library, the TX instance needs to be created first. udpard_tx_t* tx; void* user; ///< Opaque pointer for the application use only. Not accessed by the library. @@ -738,7 +761,7 @@ void udpard_rx_poll(udpard_rx_t* const self, const udpard_us_t now); /// 2. Per redundant network interface: /// - Create a new RX socket bound to the IP multicast group address and UDP port number returned by /// udpard_make_subject_endpoint() for the desired subject-ID. -/// For P2P transfer ports use ordinary sockets. +/// For P2P transfer ports use ordinary unicast sockets. /// 3. Read data from the sockets continuously and forward each datagram to udpard_rx_port_push(), /// along with the index of the redundant interface the datagram was received on. /// @@ -809,7 +832,7 @@ bool udpard_rx_port_push(udpard_rx_t* const rx, const udpard_udpip_ep_t source_ep, const udpard_bytes_mut_t datagram_payload, const udpard_mem_deleter_t payload_deleter, - const uint_fast8_t redundant_iface_index); + const uint_fast8_t iface_index); #ifdef __cplusplus } From af65a663eae9e671645c8eb71317a36da78b4111 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 13:32:37 +0200 Subject: [PATCH 24/42] full line coverage --- AGENTS.md | 2 +- libudpard/udpard.c | 6 +- tests/src/test_intrusive_rx.c | 197 +++++++++++++++++++ tests/src/test_intrusive_tx.c | 345 ++++++++++++++++++++++++++++++++++ 4 files changed, 547 insertions(+), 3 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 02d8989..c12f071 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,6 +1,6 @@ # LibUDPard instructions for AI agents -Please read `README.md` for general information about LibUDPard. +Please read `README.md` for general information about LibUDPard, and `CONTRIBUTING.md` for development-related notes. Keep the code and comments very brief. Be sure every significant code block is preceded with a brief comment. diff --git a/libudpard/udpard.c b/libudpard/udpard.c index ec185e4..d2285ff 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -954,9 +954,11 @@ static void tx_send_ack(udpard_rx_t* const rx, if (!new_better) { return; // Can we get an ack? We have ack at home! } - if (prior != NULL) { - tx_transfer_free(tx, prior); // avoid redundant acks for the same transfer -- replace with better one + if (prior != NULL) { // avoid redundant acks for the same transfer -- replace with better one + tx_transfer_free(tx, prior); // this will free up a queue slot and some memory } + // Even if the new, better ack fails to enqueue for some reason, it's no big deal -- we will send the next one. + // The only reason it might fail is an OOM but we just freed a slot so it should be fine. // Serialize the ACK payload. byte_t header[UDPARD_P2P_HEADER_BYTES]; diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index 0ade7cc..5ea54c5 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -2774,6 +2774,202 @@ static void test_rx_port_free_loop(void) instrumented_allocator_reset(&alloc_payload); } +static size_t g_collision_count = 0; // NOLINT(*-avoid-non-const-global-variables) + +static void stub_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + (void)rx; + udpard_fragment_free_all(transfer.payload, port->memory.fragment); +} + +static void stub_on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) +{ + (void)rx; + (void)port; + (void)remote; + g_collision_count++; +} + +static void stub_on_message_p2p(udpard_rx_t* const rx, + udpard_rx_port_p2p_t* const port, + const udpard_rx_transfer_p2p_t transfer) +{ + (void)rx; + udpard_fragment_free_all(transfer.base.payload, port->base.memory.fragment); +} + +static udpard_udpip_ep_t make_ep(const uint32_t ip) { return (udpard_udpip_ep_t){ .ip = ip, .port = 1U }; } + +static void test_rx_additional_coverage(void) +{ + instrumented_allocator_t alloc_frag = { 0 }; + instrumented_allocator_t alloc_ses = { 0 }; + instrumented_allocator_new(&alloc_frag); + instrumented_allocator_new(&alloc_ses); + const udpard_rx_mem_resources_t mem = { .session = instrumented_allocator_make_resource(&alloc_ses), + .fragment = instrumented_allocator_make_resource(&alloc_frag) }; + + // Session helpers and free paths. + udpard_rx_port_t port = { .memory = mem, + .vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, + .on_collision = stub_on_collision }, + .reordering_window = 10, + .topic_hash = 1 }; + rx_session_t* ses = mem.session.alloc(mem.session.user, sizeof(rx_session_t)); + TEST_ASSERT_NOT_NULL(ses); + mem_zero(sizeof(*ses), ses); + ses->port = &port; + ses->remote.uid = 77; + ses->slots[0].state = rx_slot_done; + ses->slots[0].transfer_id = 5; + TEST_ASSERT_TRUE(rx_session_is_transfer_interned(ses, 5)); + udpard_us_t dl_key = 5; + (void)cavl_compare_rx_session_by_reordering_deadline(&dl_key, &ses->index_reordering_window); + udpard_list_t anim_list = { 0 }; + udpard_tree_t* by_reorder = NULL; + cavl2_find_or_insert(&port.index_session_by_remote_uid, + &ses->remote.uid, + cavl_compare_rx_session_by_remote_uid, + &ses->index_remote_uid, + cavl2_trivial_factory); + ses->reordering_window_deadline = 3; + cavl2_find_or_insert(&by_reorder, + &ses->reordering_window_deadline, + cavl_compare_rx_session_by_reordering_deadline, + &ses->index_reordering_window, + cavl2_trivial_factory); + enlist_head(&anim_list, &ses->list_by_animation); + rx_session_free(ses, &anim_list, &by_reorder); + + // Ordered scan cleans late busy slots. + rx_session_t ses_busy; + mem_zero(sizeof(ses_busy), &ses_busy); + ses_busy.port = &port; + ses_busy.history[0] = 10; + ses_busy.slots[0].state = rx_slot_busy; + ses_busy.slots[0].transfer_id = 10; + ses_busy.slots[0].ts_min = 0; + ses_busy.slots[0].ts_max = 0; + udpard_rx_t rx = { 0 }; + rx_session_ordered_scan_slots(&ses_busy, &rx, 10, false); + + // Slot acquisition covers stale busy, busy eviction, and done eviction. + rx_session_t ses_slots; + mem_zero(sizeof(ses_slots), &ses_slots); + ses_slots.port = &port; + ses_slots.history_current = 0; + for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { + ses_slots.history[i] = 1; + } + ses_slots.slots[0].state = rx_slot_busy; + ses_slots.slots[0].ts_max = 0; + ses_slots.slots[0].transfer_id = 1; + rx_slot_t* slot = rx_session_get_slot(&ses_slots, &rx, SESSION_LIFETIME + 1, 99); + TEST_ASSERT_NOT_NULL(slot); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + ses_slots.slots[i].state = (i == 0) ? rx_slot_busy : rx_slot_done; + ses_slots.slots[i].ts_max = 10 + (udpard_us_t)i; + } + slot = rx_session_get_slot(&ses_slots, &rx, 50, 2); + TEST_ASSERT_NOT_NULL(slot); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) { + ses_slots.slots[i].state = rx_slot_done; + ses_slots.slots[i].transfer_id = i + 1U; + ses_slots.slots[i].ts_min = (udpard_us_t)i; + ses_slots.slots[i].ts_max = (udpard_us_t)i; + } + port.vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, .on_collision = stub_on_collision }; + slot = rx_session_get_slot(&ses_slots, &rx, 60, 3); + TEST_ASSERT_NOT_NULL(slot); + + // Stateless accept success, OOM, malformed. + g_collision_count = 0; + port.vtable = &(udpard_rx_port_vtable_t){ .on_message = stub_on_message, .on_collision = stub_on_collision }; + port.extent = 8; + port.reordering_window = UDPARD_RX_REORDERING_WINDOW_STATELESS; + rx_frame_t frame; + byte_t payload[4] = { 1, 2, 3, 4 }; + mem_zero(sizeof(frame), &frame); + void* payload_buf = mem.fragment.alloc(mem.fragment.user, sizeof(payload)); + memcpy(payload_buf, payload, sizeof(payload)); + frame.base.payload = (udpard_bytes_t){ .data = payload_buf, .size = sizeof(payload) }; + frame.base.origin = (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(payload) }; + frame.base.crc = crc_full(frame.base.payload.size, frame.base.payload.data); + frame.meta.transfer_payload_size = (uint32_t)frame.base.payload.size; + frame.meta.sender_uid = 9; + frame.meta.transfer_id = 11; + rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + alloc_frag.limit_fragments = 0; + frame.base.payload.data = payload; + frame.base.payload.size = sizeof(payload); + frame.base.origin = (udpard_bytes_mut_t){ 0 }; + frame.base.crc = crc_full(frame.base.payload.size, frame.base.payload.data); + rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + frame.base.payload.size = 0; + frame.meta.transfer_payload_size = 8; + rx_port_accept_stateless(&rx, &port, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); + udpard_rx_port_t port_stateless_new = { 0 }; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&port_stateless_new, 22, 8, UDPARD_RX_REORDERING_WINDOW_STATELESS, mem, port.vtable)); + TEST_ASSERT_NOT_NULL(port_stateless_new.vtable_private); + udpard_rx_port_free(&rx, &port_stateless_new); + instrumented_allocator_reset(&alloc_frag); + + // P2P ack dispatch. + udpard_rx_port_p2p_t port_p2p = { .vtable = &(udpard_rx_port_p2p_vtable_t){ .on_message = stub_on_message_p2p }, + .base = { .memory = mem } }; + byte_t p2p_header[UDPARD_P2P_HEADER_BYTES] = { P2P_KIND_ACK }; + udpard_fragment_t frag = { .view = { .data = p2p_header, .size = UDPARD_P2P_HEADER_BYTES }, + .origin = { .data = NULL, .size = 0 } }; + udpard_rx_transfer_t transfer = { .payload = &frag, + .payload_size_stored = UDPARD_P2P_HEADER_BYTES, + .payload_size_wire = UDPARD_P2P_HEADER_BYTES }; + rx_p2p_on_message(&rx, (udpard_rx_port_t*)&port_p2p, transfer); + udpard_fragment_t* frag_short = mem.fragment.alloc(mem.fragment.user, sizeof(udpard_fragment_t)); + TEST_ASSERT_NOT_NULL(frag_short); + mem_zero(sizeof(*frag_short), frag_short); + byte_t small_buf[UDPARD_P2P_HEADER_BYTES - 1] = { 0 }; + frag_short->view = (udpard_bytes_t){ .data = small_buf, .size = sizeof(small_buf) }; + frag_short->origin = (udpard_bytes_mut_t){ .data = mem.fragment.alloc(mem.fragment.user, sizeof(small_buf)), + .size = sizeof(small_buf) }; + frag_short->payload_deleter = instrumented_allocator_make_deleter(&alloc_frag); + memcpy(frag_short->origin.data, small_buf, sizeof(small_buf)); + transfer.payload = frag_short; + rx.errors_transfer_malformed = 0; + rx_p2p_on_message(&rx, (udpard_rx_port_t*)&port_p2p, transfer); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_transfer_malformed); + rx_p2p_on_collision(&rx, (udpard_rx_port_t*)&port_p2p, (udpard_remote_t){ 0 }); + + // P2P constructor failure. + TEST_ASSERT_FALSE(udpard_rx_port_new_p2p(&port_p2p, 1U, 8U, mem, &(udpard_rx_port_p2p_vtable_t){ 0 })); + + // Port push collision and malformed header. + udpard_rx_port_t port_normal = { 0 }; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port_normal, 1, 8, 10, mem, port.vtable)); + udpard_bytes_mut_t bad_payload = { .data = mem.fragment.alloc(mem.fragment.user, 4), .size = 4 }; + TEST_ASSERT(udpard_rx_port_push( + &rx, &port_normal, 0, make_ep(2), bad_payload, instrumented_allocator_make_deleter(&alloc_frag), 0)); + byte_t good_dgram[HEADER_SIZE_BYTES + 1] = { 0 }; + meta_t meta = { .priority = udpard_prio_nominal, + .flag_ack = false, + .transfer_payload_size = 1, + .transfer_id = 1, + .sender_uid = 2, + .topic_hash = 99 }; + good_dgram[HEADER_SIZE_BYTES] = 0xAA; + header_serialize(good_dgram, meta, 0, 0, crc_full(1, &good_dgram[HEADER_SIZE_BYTES])); + udpard_bytes_mut_t good_payload = { .data = mem.fragment.alloc(mem.fragment.user, sizeof(good_dgram)), + .size = sizeof(good_dgram) }; + memcpy(good_payload.data, good_dgram, sizeof(good_dgram)); + TEST_ASSERT(udpard_rx_port_push( + &rx, &port_normal, 0, make_ep(3), good_payload, instrumented_allocator_make_deleter(&alloc_frag), 1)); + TEST_ASSERT_GREATER_THAN_UINT64(0, g_collision_count); + udpard_rx_port_free(&rx, &port_normal); + udpard_rx_port_free(&rx, (udpard_rx_port_t*)&port_p2p); + instrumented_allocator_reset(&alloc_frag); + instrumented_allocator_reset(&alloc_ses); +} + void setUp(void) {} void tearDown(void) {} @@ -2802,6 +2998,7 @@ int main(void) RUN_TEST(test_rx_port_timeouts); RUN_TEST(test_rx_port_oom); RUN_TEST(test_rx_port_free_loop); + RUN_TEST(test_rx_additional_coverage); return UNITY_END(); } diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index a4b48d2..2b10fd2 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -7,6 +7,51 @@ #include "helpers.h" #include +typedef struct +{ + size_t count; + bool allow; +} eject_state_t; + +typedef struct +{ + size_t count; + udpard_tx_feedback_t last; +} feedback_state_t; + +static void noop_free(void* const user, const size_t size, void* const pointer) +{ + (void)user; + (void)size; + (void)pointer; +} + +// Ejects with a configurable outcome. +static bool eject_with_flag(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection) +{ + (void)ejection; + eject_state_t* const st = (eject_state_t*)tx->user; + if (st != NULL) { + st->count++; + return st->allow; + } + return true; +} + +// Records feedback into the provided state via user_transfer_reference. +static void record_feedback(udpard_tx_t* const tx, const udpard_tx_feedback_t fb) +{ + (void)tx; + feedback_state_t* const st = (feedback_state_t*)fb.user_transfer_reference; + if (st != NULL) { + st->count++; + st->last = fb; + } +} + +// Minimal endpoint helper. +static udpard_udpip_ep_t make_ep(const uint32_t ip) { return (udpard_udpip_ep_t){ .ip = ip, .port = 1U }; } + static void test_tx_serialize_header(void) { typedef struct @@ -47,6 +92,302 @@ static void test_tx_serialize_header(void) } } +static void test_tx_validation_and_free(void) +{ + // Invalid memory config fails fast. + udpard_tx_mem_resources_t bad = { 0 }; + TEST_ASSERT_FALSE(tx_validate_mem_resources(bad)); + + instrumented_allocator_t alloc_transfer = { 0 }; + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_transfer); + instrumented_allocator_new(&alloc_payload); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc_transfer) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc_payload); + } + + // Populate indexes then free to hit all removal paths. + udpard_tx_t tx = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + tx_transfer_t* const tr = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*tr), tr); + tr->priority = udpard_prio_fast; + tr->deadline = 10; + tr->staged_until = 1; + tr->remote_topic_hash = 99; + tr->remote_transfer_id = 100; + tx_transfer_key_t key = { .topic_hash = 5, .transfer_id = 7 }; + (void)cavl2_find_or_insert( + &tx.index_staged, &tr->staged_until, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); + (void)cavl2_find_or_insert( + &tx.index_deadline, &tr->deadline, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert( + &tx.index_transfer, &key, tx_cavl_compare_transfer, &tr->index_transfer, cavl2_trivial_factory); + (void)cavl2_find_or_insert(&tx.index_transfer_remote, + &key, + tx_cavl_compare_transfer_remote, + &tr->index_transfer_remote, + cavl2_trivial_factory); + enlist_head(&tx.agewise, &tr->agewise); + const udpard_tx_feedback_t fb = tx_make_feedback(tr, true); + TEST_ASSERT_TRUE(fb.success); + tx_transfer_free(&tx, tr); + TEST_ASSERT_NULL(tx.index_staged); + TEST_ASSERT_NULL(tx.index_transfer_remote); + instrumented_allocator_reset(&alloc_transfer); + instrumented_allocator_reset(&alloc_payload); +} + +static void test_tx_comparators_and_feedback(void) +{ + tx_transfer_t tr; + mem_zero(sizeof(tr), &tr); + tr.staged_until = 5; + tr.deadline = 7; + tr.topic_hash = 10; + tr.transfer_id = 20; + tr.remote_topic_hash = 3; + tr.remote_transfer_id = 4; + + // Staged/deadline comparisons both ways. + udpard_us_t us = 6; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&us, &tr.index_staged)); + us = 4; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&us, &tr.index_staged)); + us = 8; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&us, &tr.index_deadline)); + us = 6; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&us, &tr.index_deadline)); + + // Transfer comparator covers all branches. + tx_transfer_key_t key = { .topic_hash = 5, .transfer_id = 1 }; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + key.topic_hash = 15; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + key.topic_hash = tr.topic_hash; + key.transfer_id = 15; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + key.transfer_id = 25; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + key.transfer_id = tr.transfer_id; + TEST_ASSERT_EQUAL(0, tx_cavl_compare_transfer(&key, &tr.index_transfer)); + + // Remote comparator mirrors the above. + tx_transfer_key_t rkey = { .topic_hash = 2, .transfer_id = 1 }; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_remote)); + rkey.topic_hash = 5; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_remote)); + rkey.topic_hash = tr.remote_topic_hash; + rkey.transfer_id = 2; + TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_remote)); + rkey.transfer_id = 6; + TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_remote)); + rkey.transfer_id = tr.remote_transfer_id; + TEST_ASSERT_EQUAL(0, tx_cavl_compare_transfer_remote(&rkey, &tr.index_transfer_remote)); +} + +static void test_tx_spool_and_queue_errors(void) +{ + // OOM in spool after first frame. + instrumented_allocator_t alloc_payload = { 0 }; + instrumented_allocator_new(&alloc_payload); + alloc_payload.limit_fragments = 1; + udpard_tx_t tx = { .enqueued_frames_limit = 1, .enqueued_frames_count = 0 }; + tx.memory.payload[0] = instrumented_allocator_make_resource(&alloc_payload); + byte_t buffer[64] = { 0 }; + udpard_bytes_t payload = { .size = sizeof(buffer), .data = buffer }; + const meta_t meta = { .priority = udpard_prio_fast, + .flag_ack = false, + .transfer_payload_size = (uint32_t)payload.size, + .transfer_id = 1, + .sender_uid = 1, + .topic_hash = 1 }; + TEST_ASSERT_NULL(tx_spool(&tx, tx.memory.payload[0], 32, meta, payload)); + TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); + TEST_ASSERT_EQUAL_UINT64(80, tx_ack_timeout(5, udpard_prio_high, 1)); + instrumented_allocator_reset(&alloc_payload); + + // Capacity exhaustion. + instrumented_allocator_new(&alloc_payload); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc_payload) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc_payload); + } + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 2U, 2U, 1U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + udpard_udpip_ep_t ep[UDPARD_IFACE_COUNT_MAX] = { make_ep(1), { 0 } }; + byte_t big_buf[2000] = { 0 }; + const udpard_bytes_t big_payload = { .size = sizeof(big_buf), .data = big_buf }; + TEST_ASSERT_EQUAL_UINT32(0, udpard_tx_push(&tx, 0, 1000, udpard_prio_fast, 11, ep, 1, big_payload, NULL, NULL)); + TEST_ASSERT_EQUAL_size_t(1, tx.errors_capacity); + + // Immediate rejection when the request exceeds limits. + udpard_tx_t tx_limit = { .enqueued_frames_limit = 1, .enqueued_frames_count = 0 }; + tx_limit.memory.transfer.free = noop_free; + tx_limit.memory.transfer.alloc = dummy_alloc; + TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_limit, 3)); + + // Sacrifice clears space when the queue is full. + udpard_tx_t tx_sac = { .enqueued_frames_limit = 1, .enqueued_frames_count = 1, .errors_sacrifice = 0 }; + tx_sac.memory.transfer.free = noop_free; + tx_sac.memory.transfer.alloc = dummy_alloc; + tx_transfer_t victim; + mem_zero(sizeof(victim), &victim); + victim.priority = udpard_prio_fast; + enlist_head(&tx_sac.agewise, &victim.agewise); + TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_sac, 1)); + TEST_ASSERT_EQUAL_size_t(1, tx_sac.errors_sacrifice); + + // Transfer allocation OOM. + alloc_payload.limit_fragments = 0; + tx.errors_capacity = 0; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 3U, 3U, 2U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + TEST_ASSERT_EQUAL_UINT32( + 0, udpard_tx_push(&tx, 0, 1000, udpard_prio_fast, 12, ep, 2, (udpard_bytes_t){ 0 }, NULL, NULL)); + TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); + + // Spool OOM inside tx_push. + alloc_payload.limit_fragments = 1; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 4U, 4U, 4U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + TEST_ASSERT_EQUAL_UINT32(0, udpard_tx_push(&tx, 0, 1000, udpard_prio_fast, 13, ep, 3, big_payload, NULL, NULL)); + TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); + + // Reliable transfer gets staged. + alloc_payload.limit_fragments = SIZE_MAX; + feedback_state_t fstate = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 5U, 5U, 4U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + tx.ack_baseline_timeout = 1; + TEST_ASSERT_GREATER_THAN_UINT32( + 0, + udpard_tx_push(&tx, 0, 100000, udpard_prio_nominal, 14, ep, 4, (udpard_bytes_t){ 0 }, record_feedback, &fstate)); + TEST_ASSERT_NOT_NULL(tx.index_staged); + udpard_tx_free(&tx); + instrumented_allocator_reset(&alloc_payload); +} + +static void test_tx_ack_and_scheduler(void) +{ + instrumented_allocator_t alloc = { 0 }; + instrumented_allocator_new(&alloc); + udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + mem.payload[i] = instrumented_allocator_make_resource(&alloc); + } + + // Ack reception triggers feedback. + feedback_state_t fstate = { 0 }; + udpard_tx_t tx1 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx1, 10U, 1U, 8U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + udpard_udpip_ep_t ep[UDPARD_IFACE_COUNT_MAX] = { make_ep(2), { 0 } }; + TEST_ASSERT_EQUAL_UINT32( + 1, udpard_tx_push(&tx1, 0, 1000, udpard_prio_fast, 21, ep, 42, (udpard_bytes_t){ 0 }, record_feedback, &fstate)); + udpard_rx_t rx = { .tx = &tx1 }; + tx_receive_ack(&rx, 21, 42); + TEST_ASSERT_EQUAL_size_t(1, fstate.count); + udpard_tx_free(&tx1); + + // Ack suppressed when coverage not improved. + udpard_tx_t tx2 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx2, 11U, 2U, 4U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + tx_transfer_t prior; + mem_zero(sizeof(prior), &prior); + prior.destination[0] = make_ep(3); + prior.remote_topic_hash = 7; + prior.remote_transfer_id = 8; + cavl2_find_or_insert(&tx2.index_transfer_remote, + &(tx_transfer_key_t){ .topic_hash = 7, .transfer_id = 8 }, + tx_cavl_compare_transfer_remote, + &prior.index_transfer_remote, + cavl2_trivial_factory); + rx.errors_ack_tx = 0; + rx.tx = &tx2; + tx_send_ack(&rx, 0, udpard_prio_fast, 7, 8, (udpard_remote_t){ .uid = 9, .endpoints = { make_ep(3) } }); + TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); + udpard_tx_free(&tx2); + + // Ack replaced with broader coverage. + udpard_tx_t tx3 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx3, 12U, 3U, 4U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + rx.tx = &tx3; + tx_send_ack(&rx, 0, udpard_prio_fast, 9, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4) } }); + tx_send_ack( + &rx, 0, udpard_prio_fast, 9, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4), make_ep(5) } }); + udpard_tx_free(&tx3); + + // Ack push failure with TX present. + udpard_tx_mem_resources_t fail_mem = { .transfer = { .user = NULL, .alloc = dummy_alloc, .free = noop_free } }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + fail_mem.payload[i] = fail_mem.transfer; + } + udpard_tx_t tx6 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx6, 15U, 6U, 1U, fail_mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + rx.errors_ack_tx = 0; + rx.tx = &tx6; + tx_send_ack(&rx, 0, udpard_prio_fast, 2, 2, (udpard_remote_t){ .uid = 1, .endpoints = { make_ep(6) } }); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_ack_tx); + udpard_tx_free(&tx6); + + // Ack push failure increments error. + udpard_rx_t rx_fail = { .tx = NULL }; + tx_send_ack(&rx_fail, 0, udpard_prio_fast, 1, 1, (udpard_remote_t){ 0 }); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx_fail.errors_ack_tx); + + // Expired transfer purge with feedback. + udpard_tx_t tx4 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx4, 13U, 4U, 4U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + tx4.errors_expiration = 0; + tx_transfer_t* exp = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); + mem_zero(sizeof(*exp), exp); + exp->deadline = 1; + exp->priority = udpard_prio_slow; + exp->topic_hash = 55; + exp->transfer_id = 66; + exp->user_transfer_reference = &fstate; + exp->feedback = record_feedback; + (void)cavl2_find_or_insert( + &tx4.index_deadline, &exp->deadline, tx_cavl_compare_deadline, &exp->index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert(&tx4.index_transfer, + &(tx_transfer_key_t){ .topic_hash = 55, .transfer_id = 66 }, + tx_cavl_compare_transfer, + &exp->index_transfer, + cavl2_trivial_factory); + tx_purge_expired_transfers(&tx4, 2); + TEST_ASSERT_GREATER_THAN_UINT64(0, tx4.errors_expiration); + udpard_tx_free(&tx4); + + // Staged promotion re-enqueues transfer. + udpard_tx_t tx5 = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx5, 14U, 5U, 4U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); + tx_transfer_t staged; + mem_zero(sizeof(staged), &staged); + staged.staged_until = 0; + staged.deadline = 100; + staged.priority = udpard_prio_fast; + staged.destination[0] = make_ep(7); + tx_frame_t dummy_frame = { 0 }; + staged.head[0] = staged.cursor[0] = &dummy_frame; + cavl2_find_or_insert( + &tx5.index_staged, &staged.staged_until, tx_cavl_compare_staged, &staged.index_staged, cavl2_trivial_factory); + tx5.ack_baseline_timeout = 1; + tx_promote_staged_transfers(&tx5, 1); + TEST_ASSERT_NOT_NULL(tx5.queue[0][staged.priority].head); + + // Ejection stops when NIC refuses. + staged.cursor[0] = staged.head[0]; + staged.queue[0].next = NULL; + staged.queue[0].prev = NULL; + tx5.queue[0][staged.priority].head = &staged.queue[0]; + tx5.queue[0][staged.priority].tail = &staged.queue[0]; + eject_state_t eject_flag = { .count = 0, .allow = false }; + tx5.vtable = &(udpard_tx_vtable_t){ .eject = eject_with_flag }; + tx5.user = &eject_flag; + tx_eject_pending_frames(&tx5, 5, 0); + TEST_ASSERT_EQUAL_size_t(1, eject_flag.count); + udpard_tx_free(&tx5); + + instrumented_allocator_reset(&alloc); +} + void setUp(void) {} void tearDown(void) {} @@ -55,5 +396,9 @@ int main(void) { UNITY_BEGIN(); RUN_TEST(test_tx_serialize_header); + RUN_TEST(test_tx_validation_and_free); + RUN_TEST(test_tx_comparators_and_feedback); + RUN_TEST(test_tx_spool_and_queue_errors); + RUN_TEST(test_tx_ack_and_scheduler); return UNITY_END(); } From f73110cf05480a951237a68282b168d97ffe182a Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 14:00:50 +0200 Subject: [PATCH 25/42] ALWAYS invoke the feedback --- libudpard/udpard.c | 62 +++++++++++++++++------------------ tests/src/test_intrusive_tx.c | 4 +-- 2 files changed, 32 insertions(+), 34 deletions(-) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index d2285ff..6cae219 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -604,10 +604,18 @@ static void tx_transfer_free_payload(tx_transfer_t* const tr) } } -static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) +static void tx_transfer_retire(udpard_tx_t* const tx, tx_transfer_t* const tr, const bool success) { - UDPARD_ASSERT(tr != NULL); - tx_transfer_free_payload(tr); + // Construct the feedback object first before the transfer is destroyed. + const udpard_tx_feedback_t fb = { .topic_hash = tr->topic_hash, + .transfer_id = tr->transfer_id, + .user_transfer_reference = tr->user_transfer_reference, + .success = success }; + UDPARD_ASSERT(tr->reliable == (tr->feedback != NULL)); + // save the feedback pointer + void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t) = tr->feedback; + + // Remove from all indexes and lists. for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { delist(&tx->queue[i][tr->priority], &tr->queue[i]); } @@ -620,7 +628,15 @@ static void tx_transfer_free(udpard_tx_t* const tx, tx_transfer_t* const tr) if (cavl2_is_inserted(tx->index_transfer_remote, &tr->index_transfer_remote)) { cavl2_remove(&tx->index_transfer_remote, &tr->index_transfer_remote); } + + // Free the memory. The payload memory may already be empty depending on where we were invoked from. + tx_transfer_free_payload(tr); mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); + + // Finally, when the internal state is updated and consistent, invoke the feedback callback if any. + if (feedback != NULL) { + feedback(tx, fb); + } } /// When the queue is exhausted, finds a transfer to sacrifice using simple heuristics and returns it. @@ -637,11 +653,11 @@ static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_fram return false; // not gonna happen } while (total_frames_needed > (tx->enqueued_frames_limit - tx->enqueued_frames_count)) { - tx_transfer_t* const victim = tx_sacrifice(tx); - if (victim == NULL) { + tx_transfer_t* const tr = tx_sacrifice(tx); + if (tr == NULL) { break; // We may have no transfers anymore but the NIC TX driver could still be holding some frames. } - tx_transfer_free(tx, victim); + tx_transfer_retire(tx, tr, false); tx->errors_sacrifice++; } return total_frames_needed <= (tx->enqueued_frames_limit - tx->enqueued_frames_count); @@ -694,15 +710,6 @@ static bool tx_is_pending(const udpard_tx_t* const tx, const tx_transfer_t* cons return false; } -static udpard_tx_feedback_t tx_make_feedback(const tx_transfer_t* const tr, const bool success) -{ - const udpard_tx_feedback_t fb = { .topic_hash = tr->topic_hash, - .transfer_id = tr->transfer_id, - .user_transfer_reference = tr->user_transfer_reference, - .success = success }; - return fb; -} - /// Returns the head of the transfer chain; NULL on OOM. static tx_frame_t* tx_spool(udpard_tx_t* const tx, const udpard_mem_resource_t memory, @@ -921,12 +928,7 @@ static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t topic_hash, con if (rx->tx != NULL) { tx_transfer_t* const tr = tx_transfer_find(rx->tx, topic_hash, transfer_id); if ((tr != NULL) && tr->reliable) { - if (tr->feedback != NULL) { - const udpard_tx_feedback_t fb = tx_make_feedback(tr, true); - tx_transfer_free_payload(tr); // do this early to release memory before callback - tr->feedback(rx->tx, fb); - } - tx_transfer_free(rx->tx, tr); + tx_transfer_retire(rx->tx, tr, true); } } } @@ -954,8 +956,9 @@ static void tx_send_ack(udpard_rx_t* const rx, if (!new_better) { return; // Can we get an ack? We have ack at home! } - if (prior != NULL) { // avoid redundant acks for the same transfer -- replace with better one - tx_transfer_free(tx, prior); // this will free up a queue slot and some memory + if (prior != NULL) { // avoid redundant acks for the same transfer -- replace with better one + UDPARD_ASSERT(prior->feedback == NULL); + tx_transfer_retire(tx, prior, false); // this will free up a queue slot and some memory } // Even if the new, better ack fails to enqueue for some reason, it's no big deal -- we will send the next one. // The only reason it might fail is an OOM but we just freed a slot so it should be fine. @@ -1093,12 +1096,7 @@ static void tx_purge_expired_transfers(udpard_tx_t* const self, const udpard_us_ while (true) { // we can use next_greater instead of doing min search every time tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_deadline), tx_transfer_t, index_deadline); if ((tr != NULL) && (now > tr->deadline)) { - if (tr->feedback != NULL) { - const udpard_tx_feedback_t fb = tx_make_feedback(tr, false); - tx_transfer_free_payload(tr); // do this early to release memory before callback - tr->feedback(self, fb); - } - tx_transfer_free(self, tr); + tx_transfer_retire(self, tr, false); self->errors_expiration++; } else { break; @@ -1189,7 +1187,8 @@ static void tx_eject_pending_frames(udpard_tx_t* const self, const udpard_us_t n delist(&self->queue[ifindex][tr->priority], &tr->queue[ifindex]); // no longer pending for transmission UDPARD_ASSERT(!last_attempt || (tr->head[ifindex] == NULL)); // this iface is done with the payload if (last_attempt && !tr->reliable && !tx_is_pending(self, tr)) { // remove early once all ifaces are done - tx_transfer_free(self, tr); + UDPARD_ASSERT(tr->feedback == NULL); // non-reliable transfers have no feedback callback + tx_transfer_retire(self, tr, true); } } } @@ -1234,7 +1233,8 @@ void udpard_tx_free(udpard_tx_t* const self) { if (self != NULL) { while (self->index_transfer != NULL) { - tx_transfer_free(self, CAVL2_TO_OWNER(self->index_transfer, tx_transfer_t, index_transfer)); + tx_transfer_t* tr = CAVL2_TO_OWNER(self->index_transfer, tx_transfer_t, index_transfer); + tx_transfer_retire(self, tr, false); } } } diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index 2b10fd2..38bd5ff 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -130,9 +130,7 @@ static void test_tx_validation_and_free(void) &tr->index_transfer_remote, cavl2_trivial_factory); enlist_head(&tx.agewise, &tr->agewise); - const udpard_tx_feedback_t fb = tx_make_feedback(tr, true); - TEST_ASSERT_TRUE(fb.success); - tx_transfer_free(&tx, tr); + tx_transfer_retire(&tx, tr, true); TEST_ASSERT_NULL(tx.index_staged); TEST_ASSERT_NULL(tx.index_transfer_remote); instrumented_allocator_reset(&alloc_transfer); From eafaaece6311cef90393b0feb669496271a68d77 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 14:19:45 +0200 Subject: [PATCH 26/42] tests --- tests/.clang-tidy | 3 + tests/src/test_e2e_edge.cpp | 126 ++++++++++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+) diff --git a/tests/.clang-tidy b/tests/.clang-tidy index 0c49ca1..971c4bd 100644 --- a/tests/.clang-tidy +++ b/tests/.clang-tidy @@ -52,6 +52,9 @@ Checks: >- -*DeprecatedOrUnsafeBufferHandling, -*-prefer-static-over-anonymous-namespace, -*-pro-bounds-avoid-unchecked-container-access, + -*-array*decay, + -*-avoid-c-arrays, + -*-named-parameter, WarningsAsErrors: '*' HeaderFilterRegex: '.*\.hpp' FormatStyle: file diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 1fed924..459e368 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -19,6 +19,13 @@ constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_coll void on_message_p2p(udpard_rx_t* rx, udpard_rx_port_p2p_t* port, udpard_rx_transfer_p2p_t transfer); constexpr udpard_rx_port_p2p_vtable_t p2p_callbacks{ &on_message_p2p }; +struct FbState +{ + size_t count = 0; + bool success = false; + uint64_t tid = 0; +}; + struct CapturedFrame { udpard_bytes_mut_t datagram; @@ -46,6 +53,24 @@ bool capture_tx_frame(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx_frame }; +void fb_record(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* st = static_cast(fb.user_transfer_reference); + if (st != nullptr) { + st->count++; + st->success = fb.success; + st->tid = fb.transfer_id; + } +} + +void release_frames(std::vector& frames) +{ + for (const auto& [datagram, iface_index] : frames) { + udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); + } + frames.clear(); +} + struct Context { std::vector ids; @@ -278,6 +303,106 @@ void test_udpard_rx_ordered_head_advanced_late() TEST_ASSERT_EQUAL_size_t(0, fix.ctx.collisions); } +// Feedback must fire regardless of disposal path. +void test_udpard_tx_feedback_always_called() +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + const udpard_udpip_ep_t endpoint = udpard_make_subject_endpoint(1); + + // Expiration path triggers feedback=false. + { + std::vector frames; + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4, tx_mem, &tx_vtable)); + tx.user = &frames; + FbState fb{}; + udpard_udpip_ep_t dests[UDPARD_IFACE_COUNT_MAX] = { endpoint, {} }; + TEST_ASSERT_GREATER_THAN_UINT32( + 0, + udpard_tx_push( + &tx, 10, 10, udpard_prio_fast, 1, dests, 11, udpard_bytes_t{ .size = 0, .data = nullptr }, fb_record, &fb)); + udpard_tx_poll(&tx, 11, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_EQUAL_size_t(1, fb.count); + TEST_ASSERT_FALSE(fb.success); + release_frames(frames); + udpard_tx_free(&tx); + } + + // Sacrifice path should also emit feedback. + { + std::vector frames; + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 2U, 1U, 1, tx_mem, &tx_vtable)); + tx.user = &frames; + FbState fb_old{}; + FbState fb_new{}; + udpard_udpip_ep_t dests[UDPARD_IFACE_COUNT_MAX] = { endpoint, {} }; + TEST_ASSERT_GREATER_THAN_UINT32(0, + udpard_tx_push(&tx, + 0, + 1000, + udpard_prio_fast, + 2, + dests, + 21, + udpard_bytes_t{ .size = 0, .data = nullptr }, + fb_record, + &fb_old)); + (void)udpard_tx_push(&tx, + 0, + 1000, + udpard_prio_fast, + 3, + dests, + 22, + udpard_bytes_t{ .size = 0, .data = nullptr }, + fb_record, + &fb_new); + TEST_ASSERT_EQUAL_size_t(1, fb_old.count); + TEST_ASSERT_FALSE(fb_old.success); + TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, tx.errors_sacrifice); + TEST_ASSERT_EQUAL_size_t(0, fb_new.count); + release_frames(frames); + udpard_tx_free(&tx); + } + + // Destroying a TX with pending transfers still calls feedback. + { + std::vector frames; + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 3U, 1U, 4, tx_mem, &tx_vtable)); + tx.user = &frames; + FbState fb{}; + udpard_udpip_ep_t dests[UDPARD_IFACE_COUNT_MAX] = { endpoint, {} }; + TEST_ASSERT_GREATER_THAN_UINT32(0, + udpard_tx_push(&tx, + 0, + 1000, + udpard_prio_fast, + 4, + dests, + 33, + udpard_bytes_t{ .size = 0, .data = nullptr }, + fb_record, + &fb)); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(1, fb.count); + TEST_ASSERT_FALSE(fb.success); + release_frames(frames); + } + + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); +} + /// P2P helper should emit frames with auto transfer-ID and proper addressing. void test_udpard_tx_push_p2p() { @@ -370,6 +495,7 @@ int main() RUN_TEST(test_udpard_rx_unordered_duplicates); RUN_TEST(test_udpard_rx_ordered_out_of_order); RUN_TEST(test_udpard_rx_ordered_head_advanced_late); + RUN_TEST(test_udpard_tx_feedback_always_called); RUN_TEST(test_udpard_tx_push_p2p); return UNITY_END(); } From 152bec35f44b43513090841f1fcf33a8bd4e52af Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 14:33:42 +0200 Subject: [PATCH 27/42] fix tests --- AGENTS.md | 2 ++ tests/src/test_intrusive_tx.c | 24 +++++++++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index c12f071..78e7b28 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -6,3 +6,5 @@ Keep the code and comments very brief. Be sure every significant code block is p When building the code, don't hesitate to use multiple jobs to use all CPU cores. To speed things up, it is best to configure CMake with `NO_STATIC_ANALYSIS=1`. + +Run all tests in debug build to ensure that all assertion checks are enabled. diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index 38bd5ff..4bd0dc9 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -220,18 +220,35 @@ static void test_tx_spool_and_queue_errors(void) TEST_ASSERT_EQUAL_size_t(1, tx.errors_capacity); // Immediate rejection when the request exceeds limits. - udpard_tx_t tx_limit = { .enqueued_frames_limit = 1, .enqueued_frames_count = 0 }; + udpard_tx_t tx_limit; + mem_zero(sizeof(tx_limit), &tx_limit); + tx_limit.enqueued_frames_limit = 1; + tx_limit.enqueued_frames_count = 0; tx_limit.memory.transfer.free = noop_free; tx_limit.memory.transfer.alloc = dummy_alloc; TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_limit, 3)); // Sacrifice clears space when the queue is full. - udpard_tx_t tx_sac = { .enqueued_frames_limit = 1, .enqueued_frames_count = 1, .errors_sacrifice = 0 }; + udpard_tx_t tx_sac; + mem_zero(sizeof(tx_sac), &tx_sac); + tx_sac.enqueued_frames_limit = 1; + tx_sac.enqueued_frames_count = 1; + tx_sac.errors_sacrifice = 0; tx_sac.memory.transfer.free = noop_free; tx_sac.memory.transfer.alloc = dummy_alloc; tx_transfer_t victim; mem_zero(sizeof(victim), &victim); - victim.priority = udpard_prio_fast; + victim.priority = udpard_prio_fast; + victim.deadline = 1; + victim.topic_hash = 7; + victim.transfer_id = 9; + (void)cavl2_find_or_insert( + &tx_sac.index_deadline, &victim.deadline, tx_cavl_compare_deadline, &victim.index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert(&tx_sac.index_transfer, + &(tx_transfer_key_t){ .topic_hash = victim.topic_hash, .transfer_id = victim.transfer_id }, + tx_cavl_compare_transfer, + &victim.index_transfer, + cavl2_trivial_factory); enlist_head(&tx_sac.agewise, &victim.agewise); TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_sac, 1)); TEST_ASSERT_EQUAL_size_t(1, tx_sac.errors_sacrifice); @@ -341,6 +358,7 @@ static void test_tx_ack_and_scheduler(void) exp->topic_hash = 55; exp->transfer_id = 66; exp->user_transfer_reference = &fstate; + exp->reliable = true; exp->feedback = record_feedback; (void)cavl2_find_or_insert( &tx4.index_deadline, &exp->deadline, tx_cavl_compare_deadline, &exp->index_deadline, cavl2_trivial_factory); From 377ba4f464a98a27f8f95441d47eccfadee932b7 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 16:42:41 +0200 Subject: [PATCH 28/42] more tests --- AGENTS.md | 1 - libudpard/udpard.c | 14 +- libudpard/udpard.h | 1 + tests/CMakeLists.txt | 1 + tests/src/test_e2e_api.cpp | 422 ++++++++++++++++++++++++++++++++++ tests/src/test_e2e_random.cpp | 141 ++++++++++-- tests/src/test_intrusive_rx.c | 13 +- 7 files changed, 566 insertions(+), 27 deletions(-) create mode 100644 tests/src/test_e2e_api.cpp diff --git a/AGENTS.md b/AGENTS.md index 78e7b28..8811474 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -5,6 +5,5 @@ Please read `README.md` for general information about LibUDPard, and `CONTRIBUTI Keep the code and comments very brief. Be sure every significant code block is preceded with a brief comment. When building the code, don't hesitate to use multiple jobs to use all CPU cores. -To speed things up, it is best to configure CMake with `NO_STATIC_ANALYSIS=1`. Run all tests in debug build to ensure that all assertion checks are enabled. diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 6cae219..86f9205 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -2167,7 +2167,7 @@ bool udpard_rx_port_new(udpard_rx_port_t* const self, } /// A thin proxy that reads the P2P header and dispatches the message to the appropriate handler. -static void rx_p2p_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +static void rx_p2p_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, udpard_rx_transfer_t transfer) { udpard_rx_port_p2p_t* const self = (udpard_rx_port_p2p_t*)port; @@ -2190,18 +2190,20 @@ static void rx_p2p_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const por UDPARD_ASSERT((ptr == (UDPARD_P2P_HEADER_BYTES + (byte_t*)frag0->view.data))); (void)ptr; - // Remove the header from the view. + // Remove the header from the view and update the transfer metadata. + transfer.transfer_id = transfer_id; + transfer.payload_size_stored -= UDPARD_P2P_HEADER_BYTES; frag0->view.size -= UDPARD_P2P_HEADER_BYTES; frag0->view.data = UDPARD_P2P_HEADER_BYTES + (byte_t*)(frag0->view.data); // Process the data depending on the kind. if (kind == P2P_KIND_ACK) { tx_receive_ack(rx, topic_hash, transfer_id); + udpard_fragment_free_all(transfer.payload, port->memory.fragment); } else if (kind == P2P_KIND_RESPONSE) { - const udpard_rx_transfer_p2p_t tr = { .base = transfer, .topic_hash = topic_hash }; - self->vtable->on_message(rx, self, tr); - } else { - (void)0; // Malformed, ignored. + self->vtable->on_message(rx, self, (udpard_rx_transfer_p2p_t){ .base = transfer, .topic_hash = topic_hash }); + } else { // malformed + udpard_fragment_free_all(transfer.payload, port->memory.fragment); } } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 9e9bf33..21cd274 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -738,6 +738,7 @@ typedef struct udpard_rx_port_p2p_vtable_t } udpard_rx_port_p2p_vtable_t; /// A specialization of udpard_rx_port_t for the local node's P2P port. +/// Each node must have exactly one P2P port, which is used for P2P transfers and acknowledgments. struct udpard_rx_port_p2p_t { udpard_rx_port_t base; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 278408b..06be236 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -91,6 +91,7 @@ gen_test_matrix(test_intrusive_rx "src/test_intrusive_rx.c") gen_test_matrix(test_fragment "src/test_fragment.cpp;${library_dir}/udpard.c") gen_test_matrix(test_e2e_random "src/test_e2e_random.cpp;${library_dir}/udpard.c") gen_test_matrix(test_e2e_edge "src/test_e2e_edge.cpp;${library_dir}/udpard.c") +gen_test_matrix(test_e2e_api "src/test_e2e_api.cpp;${library_dir}/udpard.c") # Coverage targets. Usage: # cmake -DENABLE_COVERAGE=ON .. diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp new file mode 100644 index 0000000..f0db682 --- /dev/null +++ b/tests/src/test_e2e_api.cpp @@ -0,0 +1,422 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +// ReSharper disable CppPassValueParameterByConstReference + +#include +#include "helpers.h" +#include +#include +#include + +namespace { + +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +struct FeedbackState +{ + size_t count = 0; + bool success = false; + uint64_t transfer_id = 0; +}; + +struct RxContext +{ + std::vector expected; + std::array sources{}; + uint64_t remote_uid = 0; + size_t received = 0; + size_t collisions = 0; +}; + +// Refcount helpers keep captured datagrams alive. +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +bool capture_tx_frame(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection.datagram); + void* const data = const_cast(ejection.datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) + frames->push_back(CapturedFrame{ .datagram = { .size = ejection.datagram.size, .data = data }, + .iface_index = ejection.iface_index }); + return true; +} + +void drop_frame(const CapturedFrame& frame) +{ + udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data }); +} + +void fill_random(std::vector& data) +{ + for (auto& byte : data) { + byte = static_cast(rand()) & 0xFFU; + } +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx_frame }; + +// Feedback callback records completion. +void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* st = static_cast(fb.user_transfer_reference); + if (st != nullptr) { + st->count++; + st->success = fb.success; + st->transfer_id = fb.transfer_id; + } +} + +// RX callbacks validate payload and sender. +void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* ctx = static_cast(rx->user); + TEST_ASSERT_EQUAL_UINT64(ctx->remote_uid, transfer.remote.uid); + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((transfer.remote.endpoints[i].ip != 0U) || (transfer.remote.endpoints[i].port != 0U)) { + TEST_ASSERT_EQUAL_UINT32(ctx->sources[i].ip, transfer.remote.endpoints[i].ip); + TEST_ASSERT_EQUAL_UINT16(ctx->sources[i].port, transfer.remote.endpoints[i].port); + } + } + std::vector assembled(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + const size_t gathered = udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, assembled.data()); + TEST_ASSERT_EQUAL_size_t(transfer.payload_size_stored, gathered); + TEST_ASSERT_EQUAL_size_t(ctx->expected.size(), transfer.payload_size_wire); + if (!ctx->expected.empty()) { + TEST_ASSERT_EQUAL_MEMORY(ctx->expected.data(), assembled.data(), transfer.payload_size_stored); + } + udpard_fragment_free_all(transfer.payload, port->memory.fragment); + ctx->received++; +} + +void on_collision(udpard_rx_t* const rx, udpard_rx_port_t* const /*port*/, const udpard_remote_t /*remote*/) +{ + auto* ctx = static_cast(rx->user); + ctx->collisions++; +} +constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message, .on_collision = &on_collision }; + +// Ack port frees responses. +void on_ack_response(udpard_rx_t*, udpard_rx_port_p2p_t* port, const udpard_rx_transfer_p2p_t tr) +{ + udpard_fragment_free_all(tr.base.payload, port->base.memory.fragment); +} +constexpr udpard_rx_port_p2p_vtable_t ack_callbacks{ &on_ack_response }; + +// Reliable delivery must survive data and ack loss. +void test_reliable_delivery_under_losses() +{ + seed_prng(); + + // Allocators. + instrumented_allocator_t pub_alloc_transfer{}; + instrumented_allocator_t pub_alloc_payload{}; + instrumented_allocator_t sub_alloc_frag{}; + instrumented_allocator_t sub_alloc_session{}; + instrumented_allocator_t acktx_alloc_transfer{}; + instrumented_allocator_t acktx_alloc_payload{}; + instrumented_allocator_t ackrx_alloc_frag{}; + instrumented_allocator_t ackrx_alloc_session{}; + instrumented_allocator_new(&pub_alloc_transfer); + instrumented_allocator_new(&pub_alloc_payload); + instrumented_allocator_new(&sub_alloc_frag); + instrumented_allocator_new(&sub_alloc_session); + instrumented_allocator_new(&acktx_alloc_transfer); + instrumented_allocator_new(&acktx_alloc_payload); + instrumented_allocator_new(&ackrx_alloc_frag); + instrumented_allocator_new(&ackrx_alloc_session); + + // Memory views. + udpard_tx_mem_resources_t pub_mem{}; + pub_mem.transfer = instrumented_allocator_make_resource(&pub_alloc_transfer); + for (auto& res : pub_mem.payload) { + res = instrumented_allocator_make_resource(&pub_alloc_payload); + } + udpard_tx_mem_resources_t ack_mem{}; + ack_mem.transfer = instrumented_allocator_make_resource(&acktx_alloc_transfer); + for (auto& res : ack_mem.payload) { + res = instrumented_allocator_make_resource(&acktx_alloc_payload); + } + const udpard_rx_mem_resources_t sub_mem{ .session = instrumented_allocator_make_resource(&sub_alloc_session), + .fragment = instrumented_allocator_make_resource(&sub_alloc_frag) }; + const udpard_rx_mem_resources_t ack_rx_mem{ .session = instrumented_allocator_make_resource(&ackrx_alloc_session), + .fragment = instrumented_allocator_make_resource(&ackrx_alloc_frag) }; + + // Pipelines. + udpard_tx_t pub_tx{}; + std::vector pub_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&pub_tx, 0x1111222233334444ULL, 10U, 64, pub_mem, &tx_vtable)); + pub_tx.user = &pub_frames; + pub_tx.ack_baseline_timeout = 8000; + udpard_tx_t ack_tx{}; + std::vector ack_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&ack_tx, 0xABCDEF0012345678ULL, 77U, 8, ack_mem, &tx_vtable)); + ack_tx.user = &ack_frames; + + udpard_rx_t sub_rx{}; + udpard_rx_new(&sub_rx, &ack_tx); + udpard_rx_port_t sub_port{}; + const uint64_t topic_hash = 0x0123456789ABCDEFULL; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&sub_port, topic_hash, 6000, UDPARD_RX_REORDERING_WINDOW_UNORDERED, sub_mem, &callbacks)); + udpard_rx_t ack_rx{}; + udpard_rx_port_p2p_t ack_port{}; + udpard_rx_new(&ack_rx, &pub_tx); + TEST_ASSERT_TRUE( + udpard_rx_port_new_p2p(&ack_port, pub_tx.local_uid, UDPARD_P2P_HEADER_BYTES, ack_rx_mem, &ack_callbacks)); + + // Endpoints. + const std::array publisher_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 7400U }, + udpard_udpip_ep_t{ .ip = 0x0A000002U, .port = 7401U }, + udpard_udpip_ep_t{ .ip = 0x0A000003U, .port = 7402U }, + }; + const std::array subscriber_endpoints{ + udpard_make_subject_endpoint(111U), + udpard_udpip_ep_t{ .ip = 0x0A00000BU, .port = 7501U }, + udpard_udpip_ep_t{ .ip = 0x0A00000CU, .port = 7502U }, + }; + const std::array ack_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000010U, .port = 7600U }, + udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 7601U }, + udpard_udpip_ep_t{ .ip = 0x0A000012U, .port = 7602U }, + }; + + // Payload and context. + std::vector payload(4096); + fill_random(payload); + RxContext ctx{}; + ctx.expected = payload; + ctx.sources = publisher_sources; + ctx.remote_uid = pub_tx.local_uid; + sub_rx.user = &ctx; + + // Reliable transfer with staged losses. + FeedbackState fb{}; + const udpard_bytes_t payload_view{ .size = payload.size(), .data = payload.data() }; + std::array dest_per_iface = subscriber_endpoints; + pub_tx.mtu[0] = 600; + pub_tx.mtu[1] = 900; + pub_tx.mtu[2] = 500; + const udpard_us_t start = 0; + const udpard_us_t deadline = start + 200000; + const udpard_mem_deleter_t tx_payload_deleter{ .user = nullptr, .free = &tx_refcount_free }; + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push(&pub_tx, + start, + deadline, + udpard_prio_fast, + topic_hash, + dest_per_iface.data(), + 1U, + payload_view, + &record_feedback, + &fb)); + + // Send until acked; drop first data frame and first ack. + bool first_round = true; + udpard_us_t now = start; + size_t attempts = 0; + const size_t attempt_cap = 6; + while ((fb.count == 0) && (attempts < attempt_cap)) { + pub_frames.clear(); + udpard_tx_poll(&pub_tx, now, UDPARD_IFACE_MASK_ALL); + bool data_loss_done = false; + for (const auto& frame : pub_frames) { + const bool drop = first_round && !data_loss_done && (frame.iface_index == 1U); + if (drop) { + drop_frame(frame); + data_loss_done = true; + continue; + } + TEST_ASSERT_TRUE(udpard_rx_port_push(&sub_rx, + &sub_port, + now, + publisher_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&sub_rx, now); + + ack_frames.clear(); + udpard_tx_poll(&ack_tx, now, UDPARD_IFACE_MASK_ALL); + bool ack_sent = false; + for (const auto& ack : ack_frames) { + const bool drop_ack = first_round && !ack_sent; + if (drop_ack) { + drop_frame(ack); + continue; + } + ack_sent = true; + TEST_ASSERT_TRUE(udpard_rx_port_push(&ack_rx, + reinterpret_cast(&ack_port), + now, + ack_sources[ack.iface_index], + ack.datagram, + tx_payload_deleter, + ack.iface_index)); + } + udpard_rx_poll(&ack_rx, now); + first_round = false; + attempts++; + now += pub_tx.ack_baseline_timeout + 5000; + } + + TEST_ASSERT_EQUAL_size_t(1, fb.count); + TEST_ASSERT_TRUE(fb.success); + TEST_ASSERT_EQUAL_size_t(1, ctx.received); + TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); + + // Cleanup. + udpard_rx_port_free(&sub_rx, &sub_port); + udpard_rx_port_free(&ack_rx, reinterpret_cast(&ack_port)); + udpard_tx_free(&pub_tx); + udpard_tx_free(&ack_tx); + TEST_ASSERT_EQUAL_size_t(0, sub_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, pub_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, pub_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, acktx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, acktx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ackrx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ackrx_alloc_session.allocated_fragments); + instrumented_allocator_reset(&sub_alloc_frag); + instrumented_allocator_reset(&sub_alloc_session); + instrumented_allocator_reset(&pub_alloc_transfer); + instrumented_allocator_reset(&pub_alloc_payload); + instrumented_allocator_reset(&acktx_alloc_transfer); + instrumented_allocator_reset(&acktx_alloc_payload); + instrumented_allocator_reset(&ackrx_alloc_frag); + instrumented_allocator_reset(&ackrx_alloc_session); +} + +// Counters must reflect expired deliveries and ack failures. +void test_reliable_stats_and_failures() +{ + seed_prng(); + + // Expiration path. + instrumented_allocator_t exp_alloc_transfer{}; + instrumented_allocator_t exp_alloc_payload{}; + instrumented_allocator_new(&exp_alloc_transfer); + instrumented_allocator_new(&exp_alloc_payload); + udpard_tx_mem_resources_t exp_mem{}; + exp_mem.transfer = instrumented_allocator_make_resource(&exp_alloc_transfer); + for (auto& res : exp_mem.payload) { + res = instrumented_allocator_make_resource(&exp_alloc_payload); + } + udpard_tx_t exp_tx{}; + std::vector exp_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&exp_tx, 0x9999000011112222ULL, 2U, 4, exp_mem, &tx_vtable)); + exp_tx.user = &exp_frames; + FeedbackState fb_fail{}; + const udpard_udpip_ep_t exp_dest[UDPARD_IFACE_COUNT_MAX] = { udpard_make_subject_endpoint(99U), {}, {} }; + const udpard_bytes_t exp_payload{ .size = 4, .data = "ping" }; + TEST_ASSERT_GREATER_THAN_UINT32( + 0U, + udpard_tx_push( + &exp_tx, 0, 10, udpard_prio_fast, 0xABCULL, exp_dest, 5U, exp_payload, &record_feedback, &fb_fail)); + udpard_tx_poll(&exp_tx, 0, UDPARD_IFACE_MASK_ALL); + for (const auto& f : exp_frames) { + drop_frame(f); + } + exp_frames.clear(); + udpard_tx_poll(&exp_tx, 20, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_EQUAL_size_t(1, fb_fail.count); + TEST_ASSERT_FALSE(fb_fail.success); + TEST_ASSERT_GREATER_THAN_UINT64(0, exp_tx.errors_expiration); + udpard_tx_free(&exp_tx); + TEST_ASSERT_EQUAL_size_t(0, exp_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, exp_alloc_payload.allocated_fragments); + instrumented_allocator_reset(&exp_alloc_transfer); + instrumented_allocator_reset(&exp_alloc_payload); + + // Ack push failure increments counters. + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t src_alloc_transfer{}; + instrumented_allocator_t src_alloc_payload{}; + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + instrumented_allocator_new(&src_alloc_transfer); + instrumented_allocator_new(&src_alloc_payload); + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + udpard_tx_mem_resources_t src_mem{}; + src_mem.transfer = instrumented_allocator_make_resource(&src_alloc_transfer); + for (auto& res : src_mem.payload) { + res = instrumented_allocator_make_resource(&src_alloc_payload); + } + + udpard_tx_t src_tx{}; + std::vector src_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&src_tx, 0x5555AAAABBBBCCCCULL, 3U, 4, src_mem, &tx_vtable)); + src_tx.user = &src_frames; + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxContext ctx{}; + ctx.remote_uid = src_tx.local_uid; + ctx.sources = { udpard_udpip_ep_t{ .ip = 0x0A000021U, .port = 7700U }, udpard_udpip_ep_t{}, udpard_udpip_ep_t{} }; + ctx.expected.assign({ 1U, 2U, 3U, 4U }); + udpard_rx_new(&rx, nullptr); + rx.user = &ctx; + TEST_ASSERT_TRUE( + udpard_rx_port_new(&port, 0x12340000ULL, 64, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); + + const udpard_udpip_ep_t src_dest[UDPARD_IFACE_COUNT_MAX] = { udpard_make_subject_endpoint(12U), {}, {} }; + const udpard_bytes_t src_payload{ .size = ctx.expected.size(), .data = ctx.expected.data() }; + FeedbackState fb_ignore{}; + TEST_ASSERT_GREATER_THAN_UINT32( + 0U, + udpard_tx_push( + &src_tx, 0, 1000, udpard_prio_fast, port.topic_hash, src_dest, 7U, src_payload, &record_feedback, &fb_ignore)); + udpard_tx_poll(&src_tx, 0, UDPARD_IFACE_MASK_ALL); + const udpard_mem_deleter_t tx_payload_deleter{ .user = nullptr, .free = &tx_refcount_free }; + for (const auto& f : src_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 0, ctx.sources[f.iface_index], f.datagram, tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&rx, 0); + TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_ack_tx); + TEST_ASSERT_EQUAL_size_t(1, ctx.received); + + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&src_tx); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, src_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, src_alloc_payload.allocated_fragments); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&src_alloc_transfer); + instrumented_allocator_reset(&src_alloc_payload); +} + +} // namespace + +extern "C" void setUp() {} + +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_reliable_delivery_under_losses); + RUN_TEST(test_reliable_stats_and_failures); + return UNITY_END(); +} diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp index 4e94016..5117929 100644 --- a/tests/src/test_e2e_random.cpp +++ b/tests/src/test_e2e_random.cpp @@ -42,10 +42,12 @@ struct ExpectedPayload struct Context { std::unordered_map expected; - size_t received = 0; - size_t collisions = 0; - size_t truncated = 0; - uint64_t remote_uid = 0; + size_t received = 0; + size_t collisions = 0; + size_t truncated = 0; + uint64_t remote_uid = 0; + size_t reliable_feedback_success = 0; + size_t reliable_feedback_failure = 0; std::array remote_endpoints{}; }; @@ -103,6 +105,24 @@ bool capture_tx_frame(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx_frame }; +void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* ctx = static_cast(fb.user_transfer_reference); + if (ctx != nullptr) { + if (fb.success) { + ctx->reliable_feedback_success++; + } else { + ctx->reliable_feedback_failure++; + } + } +} + +void on_ack_response(udpard_rx_t*, udpard_rx_port_p2p_t* port, const udpard_rx_transfer_p2p_t tr) +{ + udpard_fragment_free_all(tr.base.payload, port->base.memory.fragment); +} +constexpr udpard_rx_port_p2p_vtable_t ack_callbacks{ &on_ack_response }; + void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { auto* const ctx = static_cast(rx->user); @@ -110,7 +130,10 @@ void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpar // Match the incoming transfer against the expected table keyed by topic hash and transfer-ID. const TransferKey key{ .transfer_id = transfer.transfer_id, .topic_hash = port->topic_hash }; const auto it = ctx->expected.find(key); - TEST_ASSERT(it != ctx->expected.end()); + if (it == ctx->expected.end()) { + udpard_fragment_free_all(transfer.payload, port->memory.fragment); + return; + } // Gather fragments into a contiguous buffer so we can compare the stored prefix (payload may be truncated). std::vector assembled(transfer.payload_size_stored); @@ -167,6 +190,17 @@ void test_udpard_tx_rx_end_to_end() } udpard_tx_t tx{}; TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 123U, 256, tx_mem, &tx_vtable)); + instrumented_allocator_t ack_alloc_transfer{}; + instrumented_allocator_t ack_alloc_payload{}; + instrumented_allocator_new(&ack_alloc_transfer); + instrumented_allocator_new(&ack_alloc_payload); + udpard_tx_mem_resources_t ack_mem{}; + ack_mem.transfer = instrumented_allocator_make_resource(&ack_alloc_transfer); + for (auto& res : ack_mem.payload) { + res = instrumented_allocator_make_resource(&ack_alloc_payload); + } + udpard_tx_t ack_tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&ack_tx, 0x1020304050607080ULL, 321U, 256, ack_mem, &tx_vtable)); // RX allocator setup and shared RX instance with callbacks. instrumented_allocator_t rx_alloc_frag{}; @@ -176,7 +210,16 @@ void test_udpard_tx_rx_end_to_end() const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; udpard_rx_t rx; - udpard_rx_new(&rx, nullptr); + udpard_rx_new(&rx, &ack_tx); + instrumented_allocator_t ack_rx_alloc_frag{}; + instrumented_allocator_t ack_rx_alloc_session{}; + instrumented_allocator_new(&ack_rx_alloc_frag); + instrumented_allocator_new(&ack_rx_alloc_session); + const udpard_rx_mem_resources_t ack_rx_mem{ .session = instrumented_allocator_make_resource(&ack_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&ack_rx_alloc_frag) }; + udpard_rx_t ack_rx{}; + udpard_rx_port_p2p_t ack_port{}; + udpard_rx_new(&ack_rx, &tx); // Test parameters. constexpr std::array topic_hashes{ 0x123456789ABCDEF0ULL, @@ -202,14 +245,24 @@ void test_udpard_tx_rx_end_to_end() } rx.user = &ctx; constexpr udpard_mem_deleter_t tx_payload_deleter{ .user = nullptr, .free = &tx_refcount_free }; - std::vector frames; + // Ack path wiring. + std::vector frames; tx.user = &frames; + std::vector ack_frames; + ack_tx.user = &ack_frames; + TEST_ASSERT_TRUE( + udpard_rx_port_new_p2p(&ack_port, tx.local_uid, UDPARD_P2P_HEADER_BYTES, ack_rx_mem, &ack_callbacks)); + std::array ack_sources{}; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + ack_sources[i] = { .ip = static_cast(0x0A000020U + i), .port = static_cast(7700U + i) }; + } // Main test loop: generate transfers, push into TX, drain and shuffle frames, push into RX. std::array transfer_ids{ static_cast(rand()), static_cast(rand()), static_cast(rand()) }; - udpard_us_t now = 0; + size_t reliable_total = 0; + udpard_us_t now = 0; for (size_t transfer_index = 0; transfer_index < 1000; transfer_index++) { now += static_cast(random_range(1000, 5000)); frames.clear(); @@ -220,6 +273,10 @@ void test_udpard_tx_rx_end_to_end() const size_t payload_size = random_range(0, 10000); std::vector payload(payload_size); fill_random(payload); + const bool reliable = (random_range(0, 3) == 0); // About a quarter reliable. + if (reliable) { + reliable_total++; + } // Each transfer is sent on all redundant interfaces with different MTUs to exercise fragmentation variety. const udpard_bytes_t payload_view{ .size = payload.size(), .data = payload.data() }; @@ -252,8 +309,8 @@ void test_udpard_tx_rx_end_to_end() dest_per_iface.data(), transfer_id, payload_view, - nullptr, - nullptr)); + reliable ? &record_feedback : nullptr, + reliable ? &ctx : nullptr)); udpard_tx_poll(&tx, now, UDPARD_IFACE_MASK_ALL); // Shuffle and push frames into the RX pipeline, simulating out-of-order redundant arrival. @@ -263,39 +320,89 @@ void test_udpard_tx_rx_end_to_end() arrivals.push_back(Arrival{ .datagram = datagram, .iface_index = iface_index }); } shuffle_frames(arrivals); + const size_t keep_iface = reliable ? random_range(0, UDPARD_IFACE_COUNT_MAX - 1U) : 0U; + const size_t loss_iface = reliable ? ((keep_iface + 1U) % UDPARD_IFACE_COUNT_MAX) : UDPARD_IFACE_COUNT_MAX; + const size_t ack_loss_iface = loss_iface; for (const auto& [datagram, iface_index] : arrivals) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, - &ports[port_index], - now, - ctx.remote_endpoints[iface_index], - datagram, - tx_payload_deleter, - iface_index)); + const bool drop = reliable && (iface_index == loss_iface) && ((rand() % 3) == 0); + if (drop) { + udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); + } else { + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, + &ports[port_index], + now, + ctx.remote_endpoints[iface_index], + datagram, + tx_payload_deleter, + iface_index)); + } now += 1; } // Let the RX pipeline purge timeouts and deliver ready transfers. udpard_rx_poll(&rx, now); + ack_frames.clear(); + udpard_tx_poll(&ack_tx, now, UDPARD_IFACE_MASK_ALL); + bool ack_delivered = false; + for (const auto& [datagram, iface_index] : ack_frames) { + const bool drop_ack = reliable && (iface_index == ack_loss_iface); + if (drop_ack) { + udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); + continue; + } + ack_delivered = true; + TEST_ASSERT_TRUE(udpard_rx_port_push(&ack_rx, + reinterpret_cast(&ack_port), + now, + ack_sources[iface_index], + datagram, + tx_payload_deleter, + iface_index)); + } + if (reliable && !ack_delivered && !ack_frames.empty()) { + const auto& [datagram, iface_index] = ack_frames.front(); + TEST_ASSERT_TRUE(udpard_rx_port_push(&ack_rx, + reinterpret_cast(&ack_port), + now, + ack_sources[iface_index], + datagram, + tx_payload_deleter, + iface_index)); + } + udpard_rx_poll(&ack_rx, now); } // Final poll/validation and cleanup. udpard_rx_poll(&rx, now + 1000000); + udpard_rx_poll(&ack_rx, now + 1000000); TEST_ASSERT_TRUE(ctx.expected.empty()); TEST_ASSERT_EQUAL_size_t(1000, ctx.received); TEST_ASSERT_TRUE(ctx.truncated > 0); TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); + TEST_ASSERT_EQUAL_size_t(reliable_total, ctx.reliable_feedback_success); + TEST_ASSERT_EQUAL_size_t(0, ctx.reliable_feedback_failure); for (auto& port : ports) { udpard_rx_port_free(&rx, &port); } + udpard_rx_port_free(&ack_rx, reinterpret_cast(&ack_port)); udpard_tx_free(&tx); + udpard_tx_free(&ack_tx); TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ack_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ack_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ack_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, ack_rx_alloc_session.allocated_fragments); instrumented_allocator_reset(&rx_alloc_frag); instrumented_allocator_reset(&rx_alloc_session); instrumented_allocator_reset(&tx_alloc_transfer); instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&ack_alloc_transfer); + instrumented_allocator_reset(&ack_alloc_payload); + instrumented_allocator_reset(&ack_rx_alloc_frag); + instrumented_allocator_reset(&ack_rx_alloc_session); } } // namespace diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index 5ea54c5..f451a87 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -2919,9 +2919,16 @@ static void test_rx_additional_coverage(void) udpard_rx_port_p2p_t port_p2p = { .vtable = &(udpard_rx_port_p2p_vtable_t){ .on_message = stub_on_message_p2p }, .base = { .memory = mem } }; byte_t p2p_header[UDPARD_P2P_HEADER_BYTES] = { P2P_KIND_ACK }; - udpard_fragment_t frag = { .view = { .data = p2p_header, .size = UDPARD_P2P_HEADER_BYTES }, - .origin = { .data = NULL, .size = 0 } }; - udpard_rx_transfer_t transfer = { .payload = &frag, + void* ack_buf = mem.fragment.alloc(mem.fragment.user, UDPARD_P2P_HEADER_BYTES); + TEST_ASSERT_NOT_NULL(ack_buf); + memcpy(ack_buf, p2p_header, UDPARD_P2P_HEADER_BYTES); + udpard_fragment_t* frag = (udpard_fragment_t*)mem.fragment.alloc(mem.fragment.user, sizeof(udpard_fragment_t)); + TEST_ASSERT_NOT_NULL(frag); + mem_zero(sizeof(*frag), frag); + frag->view = (udpard_bytes_t){ .data = ack_buf, .size = UDPARD_P2P_HEADER_BYTES }; + frag->origin = (udpard_bytes_mut_t){ .data = ack_buf, .size = UDPARD_P2P_HEADER_BYTES }; + frag->payload_deleter = instrumented_allocator_make_deleter(&alloc_frag); + udpard_rx_transfer_t transfer = { .payload = frag, .payload_size_stored = UDPARD_P2P_HEADER_BYTES, .payload_size_wire = UDPARD_P2P_HEADER_BYTES }; rx_p2p_on_message(&rx, (udpard_rx_port_t*)&port_p2p, transfer); From 6d5aa3fdab104233192f17fa6c4fa6707d2d1cee Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 16:59:57 +0200 Subject: [PATCH 29/42] fix tests and add coverage to CI --- .github/workflows/main.yml | 32 +++++++++++++++++++++++++++++++- tests/CMakeLists.txt | 10 +++++++--- tests/src/test_e2e_edge.cpp | 5 ++--- 3 files changed, 40 insertions(+), 7 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 63aace2..802ac3f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -55,7 +55,7 @@ jobs: - toolchain: gcc c-compiler: gcc cxx-compiler: g++ - cxx-flags: -fno-strict-aliasing # GCC in MinSizeRel C++20 mode misoptimizes the Cavl test. + cxx-flags: "" - toolchain: clang c-compiler: clang cxx-compiler: clang++ @@ -87,6 +87,36 @@ jobs: path: ${{github.workspace}}/**/* retention-days: 2 + coverage: + if: github.event_name == 'push' + runs-on: ubuntu-latest + container: ghcr.io/opencyphal/toolshed:ts24.4.3 + steps: + - uses: actions/checkout@v4 + with: + submodules: true + # language=bash + - run: > + cmake + -B ${{ github.workspace }}/build + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + -DCMAKE_C_COMPILER=${{ matrix.c-compiler }} + -DCMAKE_CXX_COMPILER=${{ matrix.cxx-compiler }} + -DCMAKE_CXX_FLAGS="${{ matrix.cxx-flags }}" + -DNO_STATIC_ANALYSIS=ON + -DENABLE_COVERAGE=ON + . + # language=bash + - run: | + cd ${{ github.workspace }}/build + make -j$(nproc) && make test && make coverage + - uses: actions/upload-artifact@v4 + if: always() + with: + name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}} + path: ${{github.workspace}}/**/* + retention-days: 30 + # TODO: re-enable this # avr: # if: github.event_name == 'push' diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 06be236..bfdf275 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -80,6 +80,10 @@ function(gen_test_matrix name files) gen_test("${name}_x32_c11" "${files}" "" "-m32" "-m32" "11") endfunction() +function(gen_test_single name files) # When the full matrix is not needed, to keep pipelines fast. + gen_test("${name}" "${files}" "" "-m32" "-m32" "11") +endfunction() + # Add the test targets. # Those that are written in C may #include to reach its internals; they are called "intrusive". # The public interface tests may be written in C++ for convenience. @@ -89,9 +93,9 @@ gen_test_matrix(test_intrusive_misc "src/test_intrusive_misc.c") gen_test_matrix(test_intrusive_tx "src/test_intrusive_tx.c") gen_test_matrix(test_intrusive_rx "src/test_intrusive_rx.c") gen_test_matrix(test_fragment "src/test_fragment.cpp;${library_dir}/udpard.c") -gen_test_matrix(test_e2e_random "src/test_e2e_random.cpp;${library_dir}/udpard.c") -gen_test_matrix(test_e2e_edge "src/test_e2e_edge.cpp;${library_dir}/udpard.c") -gen_test_matrix(test_e2e_api "src/test_e2e_api.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_random "src/test_e2e_random.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_edge "src/test_e2e_edge.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_api "src/test_e2e_api.cpp;${library_dir}/udpard.c") # Coverage targets. Usage: # cmake -DENABLE_COVERAGE=ON .. diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 459e368..082460f 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -454,8 +454,7 @@ void test_udpard_tx_push_p2p() payload_buf[16U + i] = static_cast((response_transfer_id >> (i * 8U)) & 0xFFU); } const udpard_bytes_t payload{ .size = payload_buf.size(), .data = payload_buf.data() }; - const udpard_us_t now = 0; - const uint64_t first_id = tx.p2p_transfer_id; + const udpard_us_t now = 0; TEST_ASSERT_GREATER_THAN_UINT32( 0U, udpard_tx_push_p2p(&tx, now, now + 1000000, udpard_prio_nominal, remote, payload, nullptr, nullptr)); udpard_tx_poll(&tx, now, UDPARD_IFACE_MASK_ALL); @@ -468,7 +467,7 @@ void test_udpard_tx_push_p2p() } udpard_rx_poll(&rx, now); TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size()); - TEST_ASSERT_EQUAL_UINT64(first_id, ctx.ids[0]); + TEST_ASSERT_EQUAL_UINT64(response_transfer_id, ctx.ids[0]); TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); udpard_rx_port_free(&rx, reinterpret_cast(&port)); From fab1e382e222b03082b23cbc7a592eea3f5f9258 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 17:21:07 +0200 Subject: [PATCH 30/42] ci --- .github/workflows/main.yml | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 802ac3f..3b2b58c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -97,15 +97,7 @@ jobs: submodules: true # language=bash - run: > - cmake - -B ${{ github.workspace }}/build - -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} - -DCMAKE_C_COMPILER=${{ matrix.c-compiler }} - -DCMAKE_CXX_COMPILER=${{ matrix.cxx-compiler }} - -DCMAKE_CXX_FLAGS="${{ matrix.cxx-flags }}" - -DNO_STATIC_ANALYSIS=ON - -DENABLE_COVERAGE=ON - . + cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=Debug -DNO_STATIC_ANALYSIS=ON -DENABLE_COVERAGE=ON . # language=bash - run: | cd ${{ github.workspace }}/build From 6d12306c008dfab6e7da16430c10ffe07d1fc598 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 18:06:21 +0200 Subject: [PATCH 31/42] scattered tx buffers --- README.md | 4 +- libudpard/udpard.c | 192 +++++++++++++++++++++------------- libudpard/udpard.h | 50 +++++---- tests/src/helpers.h | 10 ++ tests/src/test_e2e_api.cpp | 16 +-- tests/src/test_e2e_edge.cpp | 51 +++------ tests/src/test_e2e_random.cpp | 10 +- tests/src/test_intrusive_tx.c | 87 +++++++++++---- 8 files changed, 251 insertions(+), 169 deletions(-) diff --git a/README.md b/README.md index 93b079c..cbd4861 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,7 @@ next-generation intelligent vehicles: manned and unmanned aircraft, spacecraft, ## Features - Zero-copy RX pipeline -- payload is moved from the NIC driver all the way to the application without copying. +- ≤1-copy TX pipeline with deduplication across multiple interfaces and scattered input buffer support. - Support for redundant network interfaces with seamless interface aggregation and zero fail-over delay. - Robust message reassembler supporting highly distorted datagram streams: out-of-order fragments, message ordering recovery, fragment/message deduplication, interleaving, variable MTU, ... @@ -29,13 +30,12 @@ next-generation intelligent vehicles: manned and unmanned aircraft, spacecraft, - Packet loss mitigation via: - redundant interfaces (packet lost on one interface may be received on another, transparent to the application); - reliable topics (retransmit until acknowledged; callback notifications for successful/failed deliveries). -- Single-copy TX pipeline with fragment deduplication across multiple interfaces and reference counting. - Heap not required; the library can be used with fixed-size block pool allocators. - Detailed time complexity and memory requirement models for the benefit of real-time high-integrity applications. - Runs anywhere out of the box, including extremely resource-constrained baremetal environments with ~100K ROM/RAM. No porting required. - Partial MISRA C compliance (reach out to ). -- Full implementation in a single C file with only ~2k lines of straightforward C99! +- Full implementation in a single C file with only 2k lines of straightforward C99! - Extensive test coverage. ## Usage diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 86f9205..7d3f28a 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -155,26 +155,6 @@ static const byte_t* deserialize_u64(const byte_t* ptr, uint64_t* const out_valu // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) static void mem_zero(const size_t size, void* const data) { (void)memset(data, 0, size); } -/// We require that the fragment tree does not contain fully-contained or equal-range fragments. This implies that no -/// two fragments have the same offset, and that fragments ordered by offset also order by their ends. -static int32_t cavl_compare_fragment_offset(const void* const user, const udpard_tree_t* const node) -{ - const size_t u = *(const size_t*)user; - const size_t v = ((const udpard_fragment_t*)node)->offset; // clang-format off - if (u < v) { return -1; } - if (u > v) { return +1; } - return 0; // clang-format on -} -static int32_t cavl_compare_fragment_end(const void* const user, const udpard_tree_t* const node) -{ - const size_t u = *(const size_t*)user; - const udpard_fragment_t* const f = (const udpard_fragment_t*)node; - const size_t v = f->offset + f->view.size; // clang-format off - if (u < v) { return -1; } - if (u > v) { return +1; } - return 0; // clang-format on -} - bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep) { return (ep.port != 0) && (ep.ip != 0) && (ep.ip != UINT32_MAX); @@ -196,6 +176,58 @@ udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id) return (udpard_udpip_ep_t){ .ip = IPv4_MCAST_PREFIX | (subject_id & UDPARD_IPv4_SUBJECT_ID_MAX), .port = UDP_PORT }; } +typedef struct +{ + const udpard_bytes_scattered_t* cursor; ///< Initially points at the head. + size_t position; ///< Position within the current fragment, initially zero. +} bytes_scattered_reader_t; + +/// Sequentially reads data from a scattered byte array into a contiguous destination buffer. +/// Requires that the total amount of read data does not exceed the total size of the scattered array. +static void bytes_scattered_read(bytes_scattered_reader_t* const reader, const size_t size, void* const destination) +{ + UDPARD_ASSERT((reader != NULL) && (reader->cursor != NULL) && (destination != NULL)); + byte_t* ptr = (byte_t*)destination; + size_t remaining = size; + while (remaining > 0U) { + UDPARD_ASSERT(reader->position <= reader->cursor->bytes.size); + while (reader->position == reader->cursor->bytes.size) { // Advance while skipping empty fragments. + reader->position = 0U; + reader->cursor = reader->cursor->next; + UDPARD_ASSERT(reader->cursor != NULL); + } + UDPARD_ASSERT(reader->position < reader->cursor->bytes.size); + const size_t progress = smaller(remaining, reader->cursor->bytes.size - reader->position); + UDPARD_ASSERT((progress > 0U) && (progress <= remaining)); + UDPARD_ASSERT((reader->position + progress) <= reader->cursor->bytes.size); + // NOLINTNEXTLINE(*DeprecatedOrUnsafeBufferHandling) + (void)memcpy(ptr, ((const byte_t*)reader->cursor->bytes.data) + reader->position, progress); + ptr += progress; + remaining -= progress; + reader->position += progress; + } +} + +/// We require that the fragment tree does not contain fully-contained or equal-range fragments. This implies that no +/// two fragments have the same offset, and that fragments ordered by offset also order by their ends. +static int32_t cavl_compare_fragment_offset(const void* const user, const udpard_tree_t* const node) +{ + const size_t u = *(const size_t*)user; + const size_t v = ((const udpard_fragment_t*)node)->offset; // clang-format off + if (u < v) { return -1; } + if (u > v) { return +1; } + return 0; // clang-format on +} +static int32_t cavl_compare_fragment_end(const void* const user, const udpard_tree_t* const node) +{ + const size_t u = *(const size_t*)user; + const udpard_fragment_t* const f = (const udpard_fragment_t*)node; + const size_t v = f->offset + f->view.size; // clang-format off + if (u < v) { return -1; } + if (u > v) { return +1; } + return 0; // clang-format on +} + // NOLINTNEXTLINE(misc-no-recursion) void udpard_fragment_free_all(udpard_fragment_t* const frag, const udpard_mem_resource_t mem_fragment) { @@ -711,22 +743,22 @@ static bool tx_is_pending(const udpard_tx_t* const tx, const tx_transfer_t* cons } /// Returns the head of the transfer chain; NULL on OOM. -static tx_frame_t* tx_spool(udpard_tx_t* const tx, - const udpard_mem_resource_t memory, - const size_t mtu, - const meta_t meta, - const udpard_bytes_t payload) +static tx_frame_t* tx_spool(udpard_tx_t* const tx, + const udpard_mem_resource_t memory, + const size_t mtu, + const meta_t meta, + const udpard_bytes_scattered_t payload) { UDPARD_ASSERT(mtu > 0); - UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); - uint32_t prefix_crc = CRC_INITIAL; - tx_frame_t* head = NULL; - tx_frame_t* tail = NULL; - size_t frame_index = 0U; - size_t offset = 0U; + uint32_t prefix_crc = CRC_INITIAL; + tx_frame_t* head = NULL; + tx_frame_t* tail = NULL; + size_t frame_index = 0U; + size_t offset = 0U; + bytes_scattered_reader_t reader = { .cursor = &payload, .position = 0U }; do { // Compute the size of the next frame, allocate it and link it up in the chain. - const size_t progress = smaller(payload.size - offset, mtu); + const size_t progress = smaller(meta.transfer_payload_size - offset, mtu); tx_frame_t* const item = tx_frame_new(tx, memory, progress + HEADER_SIZE_BYTES); if (NULL == head) { head = item; @@ -744,17 +776,19 @@ static tx_frame_t* tx_spool(udpard_tx_t* const tx, break; } // Populate the frame contents. - const byte_t* const read_ptr = ((const byte_t*)payload.data) + offset; - prefix_crc = crc_add(prefix_crc, progress, read_ptr); - byte_t* const write_ptr = + byte_t* const payload_ptr = &tail->data[HEADER_SIZE_BYTES]; + bytes_scattered_read(&reader, progress, payload_ptr); + prefix_crc = crc_add(prefix_crc, progress, payload_ptr); + const byte_t* const end_of_header = header_serialize(tail->data, meta, (uint32_t)frame_index, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR); - (void)memcpy(write_ptr, read_ptr, progress); // NOLINT(*DeprecatedOrUnsafeBufferHandling) + UDPARD_ASSERT(end_of_header == payload_ptr); + (void)end_of_header; // Advance the state. ++frame_index; offset += progress; - UDPARD_ASSERT(offset <= payload.size); - } while (offset < payload.size); - UDPARD_ASSERT((offset == payload.size) || ((head == NULL) && (tail == NULL))); + UDPARD_ASSERT(offset <= meta.transfer_payload_size); + } while (offset < meta.transfer_payload_size); + UDPARD_ASSERT((offset == meta.transfer_payload_size) || ((head == NULL) && (tail == NULL))); return head; } @@ -802,12 +836,12 @@ static size_t tx_predict_frame_count(const size_t mtu[UDPARD_IFAC return n_frames_total; } -static uint32_t tx_push(udpard_tx_t* const tx, - const udpard_us_t now, - const udpard_us_t deadline, - const meta_t meta, - const udpard_udpip_ep_t endpoint[UDPARD_IFACE_COUNT_MAX], - const udpard_bytes_t payload, +static uint32_t tx_push(udpard_tx_t* const tx, + const udpard_us_t now, + const udpard_us_t deadline, + const meta_t meta, + const udpard_udpip_ep_t endpoint[UDPARD_IFACE_COUNT_MAX], + const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), void* const user_transfer_reference, tx_transfer_t** const out_transfer) @@ -815,7 +849,6 @@ static uint32_t tx_push(udpard_tx_t* const tx, UDPARD_ASSERT(now <= deadline); UDPARD_ASSERT(tx != NULL); UDPARD_ASSERT(valid_ep_mask(endpoint) != 0); - UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); // Ensure the queue has enough space. for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { @@ -975,15 +1008,24 @@ static void tx_send_ack(udpard_rx_t* const rx, // Enqueue the transfer. const udpard_bytes_t payload = { .size = UDPARD_P2P_HEADER_BYTES, .data = header }; - const meta_t meta = { .priority = priority, - .flag_ack = false, - .transfer_payload_size = (uint32_t)payload.size, - .transfer_id = tx->p2p_transfer_id++, - .sender_uid = tx->local_uid, - .topic_hash = remote.uid }; - tx_transfer_t* tr = NULL; - const uint32_t count = - tx_push(tx, now, now + ACK_TX_DEADLINE, meta, remote.endpoints, payload, NULL, NULL, &tr); + const meta_t meta = { + .priority = priority, + .flag_ack = false, + .transfer_payload_size = (uint32_t)payload.size, + .transfer_id = tx->p2p_transfer_id++, + .sender_uid = tx->local_uid, + .topic_hash = remote.uid, + }; + tx_transfer_t* tr = NULL; + const uint32_t count = tx_push(tx, + now, + now + ACK_TX_DEADLINE, + meta, + remote.endpoints, + (udpard_bytes_scattered_t){ .bytes = payload, .next = NULL }, + NULL, + NULL, + &tr); UDPARD_ASSERT(count <= 1); if (count == 1) { // ack is always a single-frame transfer, so we get either 0 or 1 UDPARD_ASSERT(tr != NULL); @@ -1035,29 +1077,37 @@ bool udpard_tx_new(udpard_tx_t* const self, return ok; } -uint32_t udpard_tx_push(udpard_tx_t* const self, - const udpard_us_t now, - const udpard_us_t deadline, - const udpard_prio_t priority, - const uint64_t topic_hash, - const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX], - const uint64_t transfer_id, - const udpard_bytes_t payload, +uint32_t udpard_tx_push(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const uint64_t topic_hash, + const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX], + const uint64_t transfer_id, + const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), void* const user_transfer_reference) { uint32_t out = 0; const bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && (valid_ep_mask(remote_ep) != 0) && (priority <= UDPARD_PRIORITY_MAX) && - ((payload.data != NULL) || (payload.size == 0U)) && + ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)) && (tx_transfer_find(self, topic_hash, transfer_id) == NULL); if (ok) { // Before attempting to enqueue a new transfer, we need to update the transmission scheduler. // It may release some items from the tx queue, and it may also promote some staged transfers to the queue. udpard_tx_poll(self, now, UDPARD_IFACE_MASK_ALL); + // Compute the total payload size. + size_t size = payload.bytes.size; + const udpard_bytes_scattered_t* current = payload.next; + while (current != NULL) { + size += current->bytes.size; + current = current->next; + }; + // Enqueue the transfer. const meta_t meta = { .priority = priority, .flag_ack = feedback != NULL, - .transfer_payload_size = (uint32_t)payload.size, + .transfer_payload_size = (uint32_t)size, .transfer_id = transfer_id, .sender_uid = self->local_uid, .topic_hash = topic_hash }; @@ -1066,12 +1116,12 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, return out; } -uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, - const udpard_us_t now, - const udpard_us_t deadline, - const udpard_prio_t priority, - const udpard_remote_t remote, - const udpard_bytes_t payload, +uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const udpard_remote_t remote, + const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), void* const user_transfer_reference) { diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 21cd274..95c6ebf 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -139,18 +139,24 @@ typedef struct udpard_list_t udpard_list_member_t* tail; ///< NULL if list empty } udpard_list_t; -typedef struct udpard_bytes_mut_t -{ - size_t size; - void* data; -} udpard_bytes_mut_t; - typedef struct udpard_bytes_t { size_t size; const void* data; } udpard_bytes_t; +typedef struct udpard_bytes_scattered_t +{ + udpard_bytes_t bytes; + const struct udpard_bytes_scattered_t* next; ///< NULL in the last fragment. +} udpard_bytes_scattered_t; + +typedef struct udpard_bytes_mut_t +{ + size_t size; + void* data; +} udpard_bytes_mut_t; + /// Zeros if invalid/unset/unavailable. typedef struct udpard_udpip_ep_t { @@ -432,10 +438,12 @@ bool udpard_tx_new(udpard_tx_t* const self, /// The user_transfer_reference is an opaque pointer that will be stored for each enqueued item of this transfer. /// The library itself does not use or check this value in any way, so it can be NULL if not needed. /// -/// The function returns the number of payload fragments created, which is always a positive number, in case of success. +/// The function returns the number of payload fragments enqueued, which is always a positive number, on success. /// In case of failure, the function returns zero. Runtime failures increment the corresponding error counters, /// while invocations with invalid arguments just return zero without modifying the queue state. /// +/// The enqueued transfer will be emitted over all interfaces for which a valid (non-zero) remote endpoint is provided. +/// /// An attempt to push a transfer with a (topic hash, transfer-ID) pair that is already enqueued will fail, /// as that violates the transfer-ID uniqueness requirement stated above. /// @@ -450,25 +458,25 @@ bool udpard_tx_new(udpard_tx_t* const self, /// On success, the function allocates a single transfer state instance and a number of payload fragments. /// The time complexity is O(p + log e), where p is the transfer payload size, and e is the number of /// transfers already enqueued in the transmission queue. -uint32_t udpard_tx_push(udpard_tx_t* const self, - const udpard_us_t now, - const udpard_us_t deadline, - const udpard_prio_t priority, - const uint64_t topic_hash, - const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX], // May be invalid for some ifaces. - const uint64_t transfer_id, - const udpard_bytes_t payload, +uint32_t udpard_tx_push(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const uint64_t topic_hash, + const udpard_udpip_ep_t remote_ep[UDPARD_IFACE_COUNT_MAX], + const uint64_t transfer_id, + const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. void* const user_transfer_reference); /// Specialization for P2P transfers. The semantics are identical to udpard_tx_push(). /// The transfer-ID will be provided by the library based on the udpard_tx_t::p2p_transfer_id counter. -uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, - const udpard_us_t now, - const udpard_us_t deadline, - const udpard_prio_t priority, - const udpard_remote_t remote, // Endpoints may be invalid for some ifaces. - const udpard_bytes_t payload, +uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, + const udpard_us_t now, + const udpard_us_t deadline, + const udpard_prio_t priority, + const udpard_remote_t remote, // Endpoints may be invalid for some ifaces. + const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. void* const user_transfer_reference); diff --git a/tests/src/helpers.h b/tests/src/helpers.h index c26d23a..f3342cc 100644 --- a/tests/src/helpers.h +++ b/tests/src/helpers.h @@ -54,6 +54,16 @@ static inline void dummy_free(void* const user, const size_t size, void* const p TEST_PANIC_UNLESS(pointer == NULL); } +// Single-fragment scatter helper. +static inline udpard_bytes_scattered_t make_scattered(const void* const data, const size_t size) +{ + udpard_bytes_scattered_t out; + out.bytes.size = size; + out.bytes.data = data; + out.next = NULL; + return out; +} + /// The instrumented allocator tracks memory consumption, checks for heap corruption, and can be configured to fail /// allocations above a certain threshold. #define INSTRUMENTED_ALLOCATOR_CANARY_SIZE 1024U diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp index f0db682..a31210f 100644 --- a/tests/src/test_e2e_api.cpp +++ b/tests/src/test_e2e_api.cpp @@ -206,8 +206,8 @@ void test_reliable_delivery_under_losses() sub_rx.user = &ctx; // Reliable transfer with staged losses. - FeedbackState fb{}; - const udpard_bytes_t payload_view{ .size = payload.size(), .data = payload.data() }; + FeedbackState fb{}; + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); std::array dest_per_iface = subscriber_endpoints; pub_tx.mtu[0] = 600; pub_tx.mtu[1] = 900; @@ -324,9 +324,9 @@ void test_reliable_stats_and_failures() std::vector exp_frames; TEST_ASSERT_TRUE(udpard_tx_new(&exp_tx, 0x9999000011112222ULL, 2U, 4, exp_mem, &tx_vtable)); exp_tx.user = &exp_frames; - FeedbackState fb_fail{}; - const udpard_udpip_ep_t exp_dest[UDPARD_IFACE_COUNT_MAX] = { udpard_make_subject_endpoint(99U), {}, {} }; - const udpard_bytes_t exp_payload{ .size = 4, .data = "ping" }; + FeedbackState fb_fail{}; + const udpard_udpip_ep_t exp_dest[UDPARD_IFACE_COUNT_MAX] = { udpard_make_subject_endpoint(99U), {}, {} }; + const udpard_bytes_scattered_t exp_payload = make_scattered("ping", 4); TEST_ASSERT_GREATER_THAN_UINT32( 0U, udpard_tx_push( @@ -378,9 +378,9 @@ void test_reliable_stats_and_failures() TEST_ASSERT_TRUE( udpard_rx_port_new(&port, 0x12340000ULL, 64, UDPARD_RX_REORDERING_WINDOW_UNORDERED, rx_mem, &callbacks)); - const udpard_udpip_ep_t src_dest[UDPARD_IFACE_COUNT_MAX] = { udpard_make_subject_endpoint(12U), {}, {} }; - const udpard_bytes_t src_payload{ .size = ctx.expected.size(), .data = ctx.expected.data() }; - FeedbackState fb_ignore{}; + const udpard_udpip_ep_t src_dest[UDPARD_IFACE_COUNT_MAX] = { udpard_make_subject_endpoint(12U), {}, {} }; + const udpard_bytes_scattered_t src_payload = make_scattered(ctx.expected.data(), ctx.expected.size()); + FeedbackState fb_ignore{}; TEST_ASSERT_GREATER_THAN_UINT32( 0U, udpard_tx_push( diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 082460f..1e42260 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -148,8 +148,8 @@ struct Fixture for (size_t i = 0; i < payload_buf.size(); i++) { payload_buf[i] = static_cast(transfer_id >> (i * 8U)); } - const udpard_bytes_t payload{ .size = payload_buf.size(), .data = payload_buf.data() }; - const udpard_us_t deadline = ts + 1000000; + const udpard_bytes_scattered_t payload = make_scattered(payload_buf.data(), payload_buf.size()); + const udpard_us_t deadline = ts + 1000000; for (auto& mtu_value : tx.mtu) { mtu_value = UDPARD_MTU_DEFAULT; } @@ -326,9 +326,7 @@ void test_udpard_tx_feedback_always_called() FbState fb{}; udpard_udpip_ep_t dests[UDPARD_IFACE_COUNT_MAX] = { endpoint, {} }; TEST_ASSERT_GREATER_THAN_UINT32( - 0, - udpard_tx_push( - &tx, 10, 10, udpard_prio_fast, 1, dests, 11, udpard_bytes_t{ .size = 0, .data = nullptr }, fb_record, &fb)); + 0, udpard_tx_push(&tx, 10, 10, udpard_prio_fast, 1, dests, 11, make_scattered(nullptr, 0), fb_record, &fb)); udpard_tx_poll(&tx, 11, UDPARD_IFACE_MASK_ALL); TEST_ASSERT_EQUAL_size_t(1, fb.count); TEST_ASSERT_FALSE(fb.success); @@ -345,27 +343,11 @@ void test_udpard_tx_feedback_always_called() FbState fb_old{}; FbState fb_new{}; udpard_udpip_ep_t dests[UDPARD_IFACE_COUNT_MAX] = { endpoint, {} }; - TEST_ASSERT_GREATER_THAN_UINT32(0, - udpard_tx_push(&tx, - 0, - 1000, - udpard_prio_fast, - 2, - dests, - 21, - udpard_bytes_t{ .size = 0, .data = nullptr }, - fb_record, - &fb_old)); - (void)udpard_tx_push(&tx, - 0, - 1000, - udpard_prio_fast, - 3, - dests, - 22, - udpard_bytes_t{ .size = 0, .data = nullptr }, - fb_record, - &fb_new); + TEST_ASSERT_GREATER_THAN_UINT32( + 0, + udpard_tx_push(&tx, 0, 1000, udpard_prio_fast, 2, dests, 21, make_scattered(nullptr, 0), fb_record, &fb_old)); + (void)udpard_tx_push( + &tx, 0, 1000, udpard_prio_fast, 3, dests, 22, make_scattered(nullptr, 0), fb_record, &fb_new); TEST_ASSERT_EQUAL_size_t(1, fb_old.count); TEST_ASSERT_FALSE(fb_old.success); TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, tx.errors_sacrifice); @@ -382,17 +364,8 @@ void test_udpard_tx_feedback_always_called() tx.user = &frames; FbState fb{}; udpard_udpip_ep_t dests[UDPARD_IFACE_COUNT_MAX] = { endpoint, {} }; - TEST_ASSERT_GREATER_THAN_UINT32(0, - udpard_tx_push(&tx, - 0, - 1000, - udpard_prio_fast, - 4, - dests, - 33, - udpard_bytes_t{ .size = 0, .data = nullptr }, - fb_record, - &fb)); + TEST_ASSERT_GREATER_THAN_UINT32( + 0, udpard_tx_push(&tx, 0, 1000, udpard_prio_fast, 4, dests, 33, make_scattered(nullptr, 0), fb_record, &fb)); udpard_tx_free(&tx); TEST_ASSERT_EQUAL_size_t(1, fb.count); TEST_ASSERT_FALSE(fb.success); @@ -453,8 +426,8 @@ void test_udpard_tx_push_p2p() for (size_t i = 0; i < sizeof(response_transfer_id); i++) { payload_buf[16U + i] = static_cast((response_transfer_id >> (i * 8U)) & 0xFFU); } - const udpard_bytes_t payload{ .size = payload_buf.size(), .data = payload_buf.data() }; - const udpard_us_t now = 0; + const udpard_bytes_scattered_t payload = make_scattered(payload_buf.data(), payload_buf.size()); + const udpard_us_t now = 0; TEST_ASSERT_GREATER_THAN_UINT32( 0U, udpard_tx_push_p2p(&tx, now, now + 1000000, udpard_prio_nominal, remote, payload, nullptr, nullptr)); udpard_tx_poll(&tx, now, UDPARD_IFACE_MASK_ALL); diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp index 5117929..ee33257 100644 --- a/tests/src/test_e2e_random.cpp +++ b/tests/src/test_e2e_random.cpp @@ -279,11 +279,11 @@ void test_udpard_tx_rx_end_to_end() } // Each transfer is sent on all redundant interfaces with different MTUs to exercise fragmentation variety. - const udpard_bytes_t payload_view{ .size = payload.size(), .data = payload.data() }; - const auto priority = static_cast(random_range(0, UDPARD_PRIORITY_MAX)); - const udpard_udpip_ep_t dest = udpard_make_subject_endpoint(subject_ids[port_index]); - const TransferKey key{ .transfer_id = transfer_id, .topic_hash = topic_hashes[port_index] }; - const bool inserted = + const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); + const auto priority = static_cast(random_range(0, UDPARD_PRIORITY_MAX)); + const udpard_udpip_ep_t dest = udpard_make_subject_endpoint(subject_ids[port_index]); + const TransferKey key{ .transfer_id = transfer_id, .topic_hash = topic_hashes[port_index] }; + const bool inserted = ctx.expected.emplace(key, ExpectedPayload{ .payload = payload, .payload_size_wire = payload.size() }).second; TEST_ASSERT_TRUE(inserted); diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index 4bd0dc9..bcc056e 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -52,6 +52,40 @@ static void record_feedback(udpard_tx_t* const tx, const udpard_tx_feedback_t fb // Minimal endpoint helper. static udpard_udpip_ep_t make_ep(const uint32_t ip) { return (udpard_udpip_ep_t){ .ip = ip, .port = 1U }; } +static void test_bytes_scattered_read(void) +{ + // Skips empty fragments and spans boundaries. + { + const byte_t frag_a[] = { 1U, 2U, 3U }; + const byte_t frag_c[] = { 4U, 5U, 6U, 7U, 8U }; + const udpard_bytes_scattered_t frag3 = { .bytes = { .size = sizeof(frag_c), .data = frag_c }, .next = NULL }; + const udpard_bytes_scattered_t frag2 = { .bytes = { .size = 0U, .data = NULL }, .next = &frag3 }; + const udpard_bytes_scattered_t frag1 = { .bytes = { .size = sizeof(frag_a), .data = frag_a }, .next = &frag2 }; + const udpard_bytes_scattered_t frag0 = { .bytes = { .size = 0U, .data = NULL }, .next = &frag1 }; + bytes_scattered_reader_t reader = { .cursor = &frag0, .position = 0U }; + byte_t out[7] = { 0 }; + bytes_scattered_read(&reader, sizeof(out), out); + const byte_t expected[] = { 1U, 2U, 3U, 4U, 5U, 6U, 7U }; + TEST_ASSERT_EQUAL_UINT8_ARRAY(expected, out, sizeof(expected)); + TEST_ASSERT_EQUAL_PTR(&frag3, reader.cursor); + TEST_ASSERT_EQUAL_size_t(4U, reader.position); + } + + // Resumes mid-fragment when data remains. + { + const byte_t frag_tail[] = { 9U, 10U, 11U }; + const udpard_bytes_scattered_t frag = { .bytes = { .size = sizeof(frag_tail), .data = frag_tail }, + .next = NULL }; + bytes_scattered_reader_t reader = { .cursor = &frag, .position = 1U }; + byte_t out[2] = { 0 }; + bytes_scattered_read(&reader, sizeof(out), out); + const byte_t expected[] = { 10U, 11U }; + TEST_ASSERT_EQUAL_UINT8_ARRAY(expected, out, sizeof(out)); + TEST_ASSERT_EQUAL_PTR(&frag, reader.cursor); + TEST_ASSERT_EQUAL_size_t(frag.bytes.size, reader.position); + } +} + static void test_tx_serialize_header(void) { typedef struct @@ -190,17 +224,17 @@ static void test_tx_spool_and_queue_errors(void) // OOM in spool after first frame. instrumented_allocator_t alloc_payload = { 0 }; instrumented_allocator_new(&alloc_payload); - alloc_payload.limit_fragments = 1; - udpard_tx_t tx = { .enqueued_frames_limit = 1, .enqueued_frames_count = 0 }; - tx.memory.payload[0] = instrumented_allocator_make_resource(&alloc_payload); - byte_t buffer[64] = { 0 }; - udpard_bytes_t payload = { .size = sizeof(buffer), .data = buffer }; - const meta_t meta = { .priority = udpard_prio_fast, - .flag_ack = false, - .transfer_payload_size = (uint32_t)payload.size, - .transfer_id = 1, - .sender_uid = 1, - .topic_hash = 1 }; + alloc_payload.limit_fragments = 1; + udpard_tx_t tx = { .enqueued_frames_limit = 1, .enqueued_frames_count = 0 }; + tx.memory.payload[0] = instrumented_allocator_make_resource(&alloc_payload); + byte_t buffer[64] = { 0 }; + const udpard_bytes_scattered_t payload = make_scattered(buffer, sizeof(buffer)); + const meta_t meta = { .priority = udpard_prio_fast, + .flag_ack = false, + .transfer_payload_size = (uint32_t)payload.bytes.size, + .transfer_id = 1, + .sender_uid = 1, + .topic_hash = 1 }; TEST_ASSERT_NULL(tx_spool(&tx, tx.memory.payload[0], 32, meta, payload)); TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); TEST_ASSERT_EQUAL_UINT64(80, tx_ack_timeout(5, udpard_prio_high, 1)); @@ -213,9 +247,9 @@ static void test_tx_spool_and_queue_errors(void) mem.payload[i] = instrumented_allocator_make_resource(&alloc_payload); } TEST_ASSERT_TRUE(udpard_tx_new(&tx, 2U, 2U, 1U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); - udpard_udpip_ep_t ep[UDPARD_IFACE_COUNT_MAX] = { make_ep(1), { 0 } }; - byte_t big_buf[2000] = { 0 }; - const udpard_bytes_t big_payload = { .size = sizeof(big_buf), .data = big_buf }; + udpard_udpip_ep_t ep[UDPARD_IFACE_COUNT_MAX] = { make_ep(1), { 0 } }; + byte_t big_buf[2000] = { 0 }; + const udpard_bytes_scattered_t big_payload = make_scattered(big_buf, sizeof(big_buf)); TEST_ASSERT_EQUAL_UINT32(0, udpard_tx_push(&tx, 0, 1000, udpard_prio_fast, 11, ep, 1, big_payload, NULL, NULL)); TEST_ASSERT_EQUAL_size_t(1, tx.errors_capacity); @@ -242,13 +276,17 @@ static void test_tx_spool_and_queue_errors(void) victim.deadline = 1; victim.topic_hash = 7; victim.transfer_id = 9; - (void)cavl2_find_or_insert( - &tx_sac.index_deadline, &victim.deadline, tx_cavl_compare_deadline, &victim.index_deadline, cavl2_trivial_factory); - (void)cavl2_find_or_insert(&tx_sac.index_transfer, - &(tx_transfer_key_t){ .topic_hash = victim.topic_hash, .transfer_id = victim.transfer_id }, - tx_cavl_compare_transfer, - &victim.index_transfer, + (void)cavl2_find_or_insert(&tx_sac.index_deadline, + &victim.deadline, + tx_cavl_compare_deadline, + &victim.index_deadline, cavl2_trivial_factory); + (void)cavl2_find_or_insert( + &tx_sac.index_transfer, + &(tx_transfer_key_t){ .topic_hash = victim.topic_hash, .transfer_id = victim.transfer_id }, + tx_cavl_compare_transfer, + &victim.index_transfer, + cavl2_trivial_factory); enlist_head(&tx_sac.agewise, &victim.agewise); TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_sac, 1)); TEST_ASSERT_EQUAL_size_t(1, tx_sac.errors_sacrifice); @@ -258,7 +296,7 @@ static void test_tx_spool_and_queue_errors(void) tx.errors_capacity = 0; TEST_ASSERT_TRUE(udpard_tx_new(&tx, 3U, 3U, 2U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); TEST_ASSERT_EQUAL_UINT32( - 0, udpard_tx_push(&tx, 0, 1000, udpard_prio_fast, 12, ep, 2, (udpard_bytes_t){ 0 }, NULL, NULL)); + 0, udpard_tx_push(&tx, 0, 1000, udpard_prio_fast, 12, ep, 2, make_scattered(NULL, 0), NULL, NULL)); TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); // Spool OOM inside tx_push. @@ -274,7 +312,8 @@ static void test_tx_spool_and_queue_errors(void) tx.ack_baseline_timeout = 1; TEST_ASSERT_GREATER_THAN_UINT32( 0, - udpard_tx_push(&tx, 0, 100000, udpard_prio_nominal, 14, ep, 4, (udpard_bytes_t){ 0 }, record_feedback, &fstate)); + udpard_tx_push( + &tx, 0, 100000, udpard_prio_nominal, 14, ep, 4, make_scattered(NULL, 0), record_feedback, &fstate)); TEST_ASSERT_NOT_NULL(tx.index_staged); udpard_tx_free(&tx); instrumented_allocator_reset(&alloc_payload); @@ -295,7 +334,8 @@ static void test_tx_ack_and_scheduler(void) TEST_ASSERT_TRUE(udpard_tx_new(&tx1, 10U, 1U, 8U, mem, &(udpard_tx_vtable_t){ .eject = eject_with_flag })); udpard_udpip_ep_t ep[UDPARD_IFACE_COUNT_MAX] = { make_ep(2), { 0 } }; TEST_ASSERT_EQUAL_UINT32( - 1, udpard_tx_push(&tx1, 0, 1000, udpard_prio_fast, 21, ep, 42, (udpard_bytes_t){ 0 }, record_feedback, &fstate)); + 1, + udpard_tx_push(&tx1, 0, 1000, udpard_prio_fast, 21, ep, 42, make_scattered(NULL, 0), record_feedback, &fstate)); udpard_rx_t rx = { .tx = &tx1 }; tx_receive_ack(&rx, 21, 42); TEST_ASSERT_EQUAL_size_t(1, fstate.count); @@ -411,6 +451,7 @@ void tearDown(void) {} int main(void) { UNITY_BEGIN(); + RUN_TEST(test_bytes_scattered_read); RUN_TEST(test_tx_serialize_header); RUN_TEST(test_tx_validation_and_free); RUN_TEST(test_tx_comparators_and_feedback); From f311da6ac5f7a7b7faa9131fb14ec5d35b9dd4a2 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 18:23:22 +0200 Subject: [PATCH 32/42] compose proper P2P headers on tx --- .github/workflows/main.yml | 3 +++ libudpard/udpard.c | 17 ++++++++++++++++- libudpard/udpard.h | 12 +++++++++--- tests/src/test_e2e_edge.cpp | 29 +++++++++++++++-------------- 4 files changed, 43 insertions(+), 18 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3b2b58c..69b113b 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -41,6 +41,7 @@ jobs: # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} path: ${{github.workspace}}/**/* + include-hidden-files: true retention-days: 2 optimizations: @@ -85,6 +86,7 @@ jobs: # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} path: ${{github.workspace}}/**/* + include-hidden-files: true retention-days: 2 coverage: @@ -107,6 +109,7 @@ jobs: with: name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}} path: ${{github.workspace}}/**/* + include-hidden-files: true retention-days: 30 # TODO: re-enable this diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 7d3f28a..48f6ae5 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -1120,6 +1120,8 @@ uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, const udpard_us_t now, const udpard_us_t deadline, const udpard_prio_t priority, + const uint64_t request_topic_hash, + const uint64_t request_transfer_id, const udpard_remote_t remote, const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), @@ -1127,6 +1129,19 @@ uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, { uint32_t out = 0; if (self != NULL) { + // Serialize the P2P header. + byte_t header[UDPARD_P2P_HEADER_BYTES]; + byte_t* ptr = header; + *ptr++ = P2P_KIND_RESPONSE; + ptr += 7U; // Reserved bytes. + ptr = serialize_u64(ptr, request_topic_hash); + ptr = serialize_u64(ptr, request_transfer_id); + UDPARD_ASSERT((ptr - header) == UDPARD_P2P_HEADER_BYTES); + (void)ptr; + // Construct the full P2P payload with the header prepended. No copying needed! + const udpard_bytes_scattered_t headed_payload = { .bytes = { .size = UDPARD_P2P_HEADER_BYTES, .data = header }, + .next = &payload }; + // Enqueue the transfer. out = udpard_tx_push(self, now, deadline, @@ -1134,7 +1149,7 @@ uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, remote.uid, remote.endpoints, self->p2p_transfer_id++, - payload, + headed_payload, feedback, user_transfer_reference); } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 95c6ebf..bd12af7 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -469,12 +469,18 @@ uint32_t udpard_tx_push(udpard_tx_t* const self, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. void* const user_transfer_reference); -/// Specialization for P2P transfers. The semantics are identical to udpard_tx_push(). -/// The transfer-ID will be provided by the library based on the udpard_tx_t::p2p_transfer_id counter. +/// This is a specialization of the general push function for P2P transfers. +/// It is used to send P2P responses to messages received from topics; the request_* values shall be taken from +/// the message transfer that is being responded to. +/// P2P transfers are a bit more complex because they carry some additional metadata that is automatically +/// composed/parsed by the library transparently for the application. +/// The size of the serialized payload will include UDPARD_P2P_HEADER_BYTES additional bytes for the P2P header. uint32_t udpard_tx_push_p2p(udpard_tx_t* const self, const udpard_us_t now, const udpard_us_t deadline, const udpard_prio_t priority, + const uint64_t request_topic_hash, + const uint64_t request_transfer_id, const udpard_remote_t remote, // Endpoints may be invalid for some ifaces. const udpard_bytes_scattered_t payload, void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. @@ -647,7 +653,7 @@ struct udpard_rx_port_t /// Transfer payloads exceeding this extent may be truncated. /// The total size of the received payload may still exceed this extent setting by some small margin. - /// For P2P ports, UDPARD_P2P_HEADER_BYTES must be included in this value. + /// For P2P ports, UDPARD_P2P_HEADER_BYTES must be included in this value (the library takes care of this). size_t extent; /// See UDPARD_RX_REORDERING_WINDOW_... above. diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 1e42260..11ba254 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -416,20 +416,21 @@ void test_udpard_tx_push_p2p() remote.uid = local_uid; remote.endpoints[0U] = dest; - std::array payload_buf{}; - constexpr uint8_t p2p_kind_response = 0U; - payload_buf[0] = p2p_kind_response; - for (size_t i = 0; i < sizeof(topic_hash); i++) { - payload_buf[8U + i] = static_cast((topic_hash >> (i * 8U)) & 0xFFU); - } - const uint64_t response_transfer_id = 55; - for (size_t i = 0; i < sizeof(response_transfer_id); i++) { - payload_buf[16U + i] = static_cast((response_transfer_id >> (i * 8U)) & 0xFFU); - } - const udpard_bytes_scattered_t payload = make_scattered(payload_buf.data(), payload_buf.size()); + const uint64_t request_transfer_id = 55; + const std::array user_payload{ 0xAAU, 0xBBU, 0xCCU }; + const udpard_bytes_scattered_t payload = make_scattered(user_payload.data(), user_payload.size()); const udpard_us_t now = 0; - TEST_ASSERT_GREATER_THAN_UINT32( - 0U, udpard_tx_push_p2p(&tx, now, now + 1000000, udpard_prio_nominal, remote, payload, nullptr, nullptr)); + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push_p2p(&tx, + now, + now + 1000000, + udpard_prio_nominal, + topic_hash, + request_transfer_id, + remote, + payload, + nullptr, + nullptr)); udpard_tx_poll(&tx, now, UDPARD_IFACE_MASK_ALL); TEST_ASSERT_FALSE(frames.empty()); @@ -440,7 +441,7 @@ void test_udpard_tx_push_p2p() } udpard_rx_poll(&rx, now); TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size()); - TEST_ASSERT_EQUAL_UINT64(response_transfer_id, ctx.ids[0]); + TEST_ASSERT_EQUAL_UINT64(request_transfer_id, ctx.ids[0]); TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); udpard_rx_port_free(&rx, reinterpret_cast(&port)); From 9c1dad5ffaf20a42edde94f407d182b52ff75f37 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 18:34:37 +0200 Subject: [PATCH 33/42] ci --- .github/workflows/main.yml | 74 ++++++++++++++++++++++---------------- 1 file changed, 43 insertions(+), 31 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 69b113b..3322336 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -9,7 +9,7 @@ jobs: container: ghcr.io/opencyphal/toolshed:ts24.4.3 strategy: matrix: - toolchain: [ 'clang', 'gcc' ] + toolchain: [ "clang", "gcc" ] include: - toolchain: gcc c-compiler: gcc @@ -34,14 +34,18 @@ jobs: cd ${{ github.workspace }}/build make VERBOSE=1 -j$(nproc) make test ARGS="--verbose" + - name: Archive workspace + if: always() + run: | + cd ${{ github.workspace }} + tar --use-compress-program="gzip -9" -cf workspace.tar.gz --exclude=workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: # The matrix is shown for convenience but this is fragile because the values may not be string-convertible. # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} - path: ${{github.workspace}}/**/* - include-hidden-files: true + path: ${{github.workspace}}/workspace.tar.gz retention-days: 2 optimizations: @@ -50,7 +54,7 @@ jobs: container: ghcr.io/opencyphal/toolshed:ts24.4.3 strategy: matrix: - toolchain: [ 'clang', 'gcc' ] + toolchain: [ "clang", "gcc" ] build_type: [ Release, MinSizeRel ] include: - toolchain: gcc @@ -79,14 +83,18 @@ jobs: cd ${{ github.workspace }}/build make VERBOSE=1 -j$(nproc) make test ARGS="--verbose" + - name: Archive workspace + if: always() + run: | + cd ${{ github.workspace }} + tar --use-compress-program="gzip -9" -cf workspace.tar.gz --exclude=workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: # The matrix is shown for convenience but this is fragile because the values may not be string-convertible. # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} - path: ${{github.workspace}}/**/* - include-hidden-files: true + path: ${{github.workspace}}/workspace.tar.gz retention-days: 2 coverage: @@ -104,32 +112,36 @@ jobs: - run: | cd ${{ github.workspace }}/build make -j$(nproc) && make test && make coverage + - name: Archive workspace + if: always() + run: | + cd ${{ github.workspace }} + tar --use-compress-program="gzip -9" -cf workspace.tar.gz --exclude=workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}} - path: ${{github.workspace}}/**/* - include-hidden-files: true + path: ${{github.workspace}}/workspace.tar.gz retention-days: 30 -# TODO: re-enable this -# avr: -# if: github.event_name == 'push' -# runs-on: ubuntu-latest -# env: -# mcu: at90can64 -# flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits -# strategy: -# matrix: -# std: [ 'c99', 'c11', 'gnu99', 'gnu11' ] -# steps: -# - uses: actions/checkout@v4 -# # language=bash -# - run: | -# sudo apt update -y && sudo apt upgrade -y -# sudo apt install gcc-avr avr-libc -# avr-gcc --version -# - run: avr-gcc -Ilib/cavl/ libudpard/*.c -c -std=${{matrix.std}} -mmcu=${{env.mcu}} ${{env.flags}} + # TODO: re-enable this + # avr: + # if: github.event_name == 'push' + # runs-on: ubuntu-latest + # env: + # mcu: at90can64 + # flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits + # strategy: + # matrix: + # std: [ 'c99', 'c11', 'gnu99', 'gnu11' ] + # steps: + # - uses: actions/checkout@v4 + # # language=bash + # - run: | + # sudo apt update -y && sudo apt upgrade -y + # sudo apt install gcc-avr avr-libc + # avr-gcc --version + # - run: avr-gcc -Ilib/cavl/ libudpard/*.c -c -std=${{matrix.std}} -mmcu=${{env.mcu}} ${{env.flags}} arm: if: github.event_name == 'push' @@ -138,7 +150,7 @@ jobs: flags: -Wall -Wextra -Werror -pedantic -Wconversion -Wtype-limits -Wcast-align -Wfatal-errors strategy: matrix: - std: [ 'c99', 'c11', 'gnu99', 'gnu11' ] + std: [ "c99", "c11", "gnu99", "gnu11" ] steps: - uses: actions/checkout@v4 # language=bash @@ -164,12 +176,12 @@ jobs: steps: - uses: actions/checkout@v4 with: - fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis submodules: true - uses: actions/setup-java@v4 with: java-version: 17 - distribution: 'zulu' + distribution: "zulu" # language=bash - run: | clang --version @@ -204,6 +216,6 @@ jobs: - uses: actions/checkout@v4 - uses: DoozyX/clang-format-lint-action@v0.20 with: - source: './libudpard ./tests' - extensions: 'c,h,cpp,hpp' + source: "./libudpard ./tests" + extensions: "c,h,cpp,hpp" clangFormatVersion: ${{ env.LLVM_VERSION }} From ecddf5139a0102b6306d13e4b32512ecb06a20a9 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 18:46:00 +0200 Subject: [PATCH 34/42] ci --- .github/workflows/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3322336..ee93d8e 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -38,14 +38,14 @@ jobs: if: always() run: | cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf workspace.tar.gz --exclude=workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf ../workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: # The matrix is shown for convenience but this is fragile because the values may not be string-convertible. # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} - path: ${{github.workspace}}/workspace.tar.gz + path: ${{github.workspace}}/../workspace.tar.gz retention-days: 2 optimizations: @@ -87,14 +87,14 @@ jobs: if: always() run: | cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf workspace.tar.gz --exclude=workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf ../workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: # The matrix is shown for convenience but this is fragile because the values may not be string-convertible. # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} - path: ${{github.workspace}}/workspace.tar.gz + path: ${{github.workspace}}/../workspace.tar.gz retention-days: 2 coverage: @@ -116,12 +116,12 @@ jobs: if: always() run: | cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf workspace.tar.gz --exclude=workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf ../workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}} - path: ${{github.workspace}}/workspace.tar.gz + path: ${{github.workspace}}/../workspace.tar.gz retention-days: 30 # TODO: re-enable this From df6aa6453fdfe06b713494d7d2fe81bdcc428ff7 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 18:53:01 +0200 Subject: [PATCH 35/42] ci --- .github/workflows/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ee93d8e..5736635 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -38,14 +38,14 @@ jobs: if: always() run: | cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf ../workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf ${RUNNER_TEMP}/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: # The matrix is shown for convenience but this is fragile because the values may not be string-convertible. # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} - path: ${{github.workspace}}/../workspace.tar.gz + path: ${{runner.temp}}/workspace.tar.gz retention-days: 2 optimizations: @@ -87,14 +87,14 @@ jobs: if: always() run: | cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf ../workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf ${RUNNER_TEMP}/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: # The matrix is shown for convenience but this is fragile because the values may not be string-convertible. # Shall it break one day, feel free to remove the matrix from here. name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}}-${{join(matrix.*, ',')}} - path: ${{github.workspace}}/../workspace.tar.gz + path: ${{runner.temp}}/workspace.tar.gz retention-days: 2 coverage: @@ -116,12 +116,12 @@ jobs: if: always() run: | cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf ../workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf ${RUNNER_TEMP}/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: name: ${{github.job}}-#${{strategy.job-index}}-${{job.status}} - path: ${{github.workspace}}/../workspace.tar.gz + path: ${{runner.temp}}/workspace.tar.gz retention-days: 30 # TODO: re-enable this From 36db309b323f7509c1133c5371d7d8f75395861c Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 19:12:31 +0200 Subject: [PATCH 36/42] ci --- .github/workflows/main.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5736635..db969ad 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -115,8 +115,14 @@ jobs: - name: Archive workspace if: always() run: | - cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf ${RUNNER_TEMP}/workspace.tar.gz . + set -euo pipefail + ws="${GITHUB_WORKSPACE}" + parent="$(dirname "$ws")" + base="$(basename "$ws")" + echo "Archiving: $ws" + ls -la "$ws" + tar -C "$parent" --use-compress-program="gzip -9" -cf "${RUNNER_TEMP}/workspace.tar.gz" "$base" + tar -tzf "${RUNNER_TEMP}/workspace.tar.gz" | sed -n '1,120p' - uses: actions/upload-artifact@v4 if: always() with: From d6d6da15f7094ec8c5e7ebadd5ac7f965233d923 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 19:16:29 +0200 Subject: [PATCH 37/42] ci --- .github/workflows/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index db969ad..0543249 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -115,7 +115,6 @@ jobs: - name: Archive workspace if: always() run: | - set -euo pipefail ws="${GITHUB_WORKSPACE}" parent="$(dirname "$ws")" base="$(basename "$ws")" From dfbeb930a288ef3b4128757b3cedd5236a8ff565 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 19:29:26 +0200 Subject: [PATCH 38/42] ci --- .github/workflows/main.yml | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0543249..ac8e370 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -24,21 +24,21 @@ jobs: # language=bash - run: > cmake - -B ${{ github.workspace }}/build + -B $GITHUB_WORKSPACE/build -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_COMPILER=${{ matrix.c-compiler }} -DCMAKE_CXX_COMPILER=${{ matrix.cxx-compiler }} . # language=bash - run: | - cd ${{ github.workspace }}/build + cd $GITHUB_WORKSPACE/build make VERBOSE=1 -j$(nproc) make test ARGS="--verbose" - name: Archive workspace if: always() run: | - cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf ${RUNNER_TEMP}/workspace.tar.gz . + cd $GITHUB_WORKSPACE + tar --use-compress-program="gzip -9" -cf ${{runner.temp}}/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: @@ -71,7 +71,7 @@ jobs: # language=bash - run: > cmake - -B ${{ github.workspace }}/build + -B $GITHUB_WORKSPACE/build -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_C_COMPILER=${{ matrix.c-compiler }} -DCMAKE_CXX_COMPILER=${{ matrix.cxx-compiler }} @@ -80,14 +80,14 @@ jobs: . # language=bash - run: | - cd ${{ github.workspace }}/build + cd $GITHUB_WORKSPACE/build make VERBOSE=1 -j$(nproc) make test ARGS="--verbose" - name: Archive workspace if: always() run: | - cd ${{ github.workspace }} - tar --use-compress-program="gzip -9" -cf ${RUNNER_TEMP}/workspace.tar.gz . + cd $GITHUB_WORKSPACE + tar --use-compress-program="gzip -9" -cf ${{runner.temp}}/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: @@ -107,21 +107,16 @@ jobs: submodules: true # language=bash - run: > - cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=Debug -DNO_STATIC_ANALYSIS=ON -DENABLE_COVERAGE=ON . + cmake -B $GITHUB_WORKSPACE/build -DCMAKE_BUILD_TYPE=Debug -DNO_STATIC_ANALYSIS=ON -DENABLE_COVERAGE=ON . # language=bash - run: | - cd ${{ github.workspace }}/build + cd $GITHUB_WORKSPACE/build make -j$(nproc) && make test && make coverage - name: Archive workspace if: always() run: | - ws="${GITHUB_WORKSPACE}" - parent="$(dirname "$ws")" - base="$(basename "$ws")" - echo "Archiving: $ws" - ls -la "$ws" - tar -C "$parent" --use-compress-program="gzip -9" -cf "${RUNNER_TEMP}/workspace.tar.gz" "$base" - tar -tzf "${RUNNER_TEMP}/workspace.tar.gz" | sed -n '1,120p' + cd $GITHUB_WORKSPACE + tar --use-compress-program="gzip -9" -cf ${{runner.temp}}/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: From e8320bfdca0c58859183597b5824a7bd64c54da7 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 19:32:52 +0200 Subject: [PATCH 39/42] ci --- .github/workflows/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ac8e370..c7efd7c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -38,7 +38,7 @@ jobs: if: always() run: | cd $GITHUB_WORKSPACE - tar --use-compress-program="gzip -9" -cf ${{runner.temp}}/workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: @@ -87,7 +87,7 @@ jobs: if: always() run: | cd $GITHUB_WORKSPACE - tar --use-compress-program="gzip -9" -cf ${{runner.temp}}/workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: @@ -116,7 +116,7 @@ jobs: if: always() run: | cd $GITHUB_WORKSPACE - tar --use-compress-program="gzip -9" -cf ${{runner.temp}}/workspace.tar.gz . + tar --use-compress-program="gzip -9" -cf $RUNNER_TEMP/workspace.tar.gz . - uses: actions/upload-artifact@v4 if: always() with: From 72eda9c6dc0e16651a9ed1ebad48ac0bce8ab34c Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 19:37:35 +0200 Subject: [PATCH 40/42] tests --- .clang-tidy | 3 +- tests/.clang-tidy | 2 +- tests/CMakeLists.txt | 1 + tests/src/test_e2e_api.cpp | 168 ++++--- tests/src/test_e2e_responses.cpp | 776 +++++++++++++++++++++++++++++++ 5 files changed, 870 insertions(+), 80 deletions(-) create mode 100644 tests/src/test_e2e_responses.cpp diff --git a/.clang-tidy b/.clang-tidy index 62d2be1..70afdfd 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -22,8 +22,7 @@ Checks: >- -boost-use-ranges, -hicpp-static-assert, -misc-static-assert, - -modernize-macro-to-enum, - -cppcoreguidelines-macro-to-enum, + -*-macro-to-enum, -*-macro-usage, -*-enum-size, -*-use-using, diff --git a/tests/.clang-tidy b/tests/.clang-tidy index 971c4bd..116c02b 100644 --- a/tests/.clang-tidy +++ b/tests/.clang-tidy @@ -40,7 +40,7 @@ Checks: >- -*-no-malloc, -cert-msc30-c, -cert-msc50-cpp, - -modernize-macro-to-enum, + -*-macro-to-enum, -modernize-use-trailing-return-type, -*-macro-usage, -*-enum-size, diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index bfdf275..68a8108 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -96,6 +96,7 @@ gen_test_matrix(test_fragment "src/test_fragment.cpp;${library_dir}/udpard.c") gen_test_single(test_e2e_random "src/test_e2e_random.cpp;${library_dir}/udpard.c") gen_test_single(test_e2e_edge "src/test_e2e_edge.cpp;${library_dir}/udpard.c") gen_test_single(test_e2e_api "src/test_e2e_api.cpp;${library_dir}/udpard.c") +gen_test_single(test_e2e_responses "src/test_e2e_responses.cpp;${library_dir}/udpard.c") # Coverage targets. Usage: # cmake -DENABLE_COVERAGE=ON .. diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp index a31210f..b089816 100644 --- a/tests/src/test_e2e_api.cpp +++ b/tests/src/test_e2e_api.cpp @@ -118,66 +118,76 @@ void on_ack_response(udpard_rx_t*, udpard_rx_port_p2p_t* port, const udpard_rx_t constexpr udpard_rx_port_p2p_vtable_t ack_callbacks{ &on_ack_response }; // Reliable delivery must survive data and ack loss. +// Each node uses exactly one TX and one RX instance as per the library design. void test_reliable_delivery_under_losses() { seed_prng(); - // Allocators. - instrumented_allocator_t pub_alloc_transfer{}; - instrumented_allocator_t pub_alloc_payload{}; - instrumented_allocator_t sub_alloc_frag{}; - instrumented_allocator_t sub_alloc_session{}; - instrumented_allocator_t acktx_alloc_transfer{}; - instrumented_allocator_t acktx_alloc_payload{}; - instrumented_allocator_t ackrx_alloc_frag{}; - instrumented_allocator_t ackrx_alloc_session{}; - instrumented_allocator_new(&pub_alloc_transfer); - instrumented_allocator_new(&pub_alloc_payload); - instrumented_allocator_new(&sub_alloc_frag); - instrumented_allocator_new(&sub_alloc_session); - instrumented_allocator_new(&acktx_alloc_transfer); - instrumented_allocator_new(&acktx_alloc_payload); - instrumented_allocator_new(&ackrx_alloc_frag); - instrumented_allocator_new(&ackrx_alloc_session); - - // Memory views. - udpard_tx_mem_resources_t pub_mem{}; - pub_mem.transfer = instrumented_allocator_make_resource(&pub_alloc_transfer); - for (auto& res : pub_mem.payload) { - res = instrumented_allocator_make_resource(&pub_alloc_payload); + // Allocators - one TX and one RX per node. + // Publisher node allocators. + instrumented_allocator_t pub_tx_alloc_transfer{}; + instrumented_allocator_t pub_tx_alloc_payload{}; + instrumented_allocator_t pub_rx_alloc_frag{}; + instrumented_allocator_t pub_rx_alloc_session{}; + instrumented_allocator_new(&pub_tx_alloc_transfer); + instrumented_allocator_new(&pub_tx_alloc_payload); + instrumented_allocator_new(&pub_rx_alloc_frag); + instrumented_allocator_new(&pub_rx_alloc_session); + + // Subscriber node allocators. + instrumented_allocator_t sub_tx_alloc_transfer{}; + instrumented_allocator_t sub_tx_alloc_payload{}; + instrumented_allocator_t sub_rx_alloc_frag{}; + instrumented_allocator_t sub_rx_alloc_session{}; + instrumented_allocator_new(&sub_tx_alloc_transfer); + instrumented_allocator_new(&sub_tx_alloc_payload); + instrumented_allocator_new(&sub_rx_alloc_frag); + instrumented_allocator_new(&sub_rx_alloc_session); + + // Memory resources. + udpard_tx_mem_resources_t pub_tx_mem{}; + pub_tx_mem.transfer = instrumented_allocator_make_resource(&pub_tx_alloc_transfer); + for (auto& res : pub_tx_mem.payload) { + res = instrumented_allocator_make_resource(&pub_tx_alloc_payload); } - udpard_tx_mem_resources_t ack_mem{}; - ack_mem.transfer = instrumented_allocator_make_resource(&acktx_alloc_transfer); - for (auto& res : ack_mem.payload) { - res = instrumented_allocator_make_resource(&acktx_alloc_payload); + const udpard_rx_mem_resources_t pub_rx_mem{ .session = instrumented_allocator_make_resource(&pub_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&pub_rx_alloc_frag) }; + + udpard_tx_mem_resources_t sub_tx_mem{}; + sub_tx_mem.transfer = instrumented_allocator_make_resource(&sub_tx_alloc_transfer); + for (auto& res : sub_tx_mem.payload) { + res = instrumented_allocator_make_resource(&sub_tx_alloc_payload); } - const udpard_rx_mem_resources_t sub_mem{ .session = instrumented_allocator_make_resource(&sub_alloc_session), - .fragment = instrumented_allocator_make_resource(&sub_alloc_frag) }; - const udpard_rx_mem_resources_t ack_rx_mem{ .session = instrumented_allocator_make_resource(&ackrx_alloc_session), - .fragment = instrumented_allocator_make_resource(&ackrx_alloc_frag) }; + const udpard_rx_mem_resources_t sub_rx_mem{ .session = instrumented_allocator_make_resource(&sub_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&sub_rx_alloc_frag) }; - // Pipelines. + // Publisher node: single TX, single RX (linked to TX for ACK processing). + constexpr uint64_t pub_uid = 0x1111222233334444ULL; udpard_tx_t pub_tx{}; std::vector pub_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&pub_tx, 0x1111222233334444ULL, 10U, 64, pub_mem, &tx_vtable)); + TEST_ASSERT_TRUE(udpard_tx_new(&pub_tx, pub_uid, 10U, 64, pub_tx_mem, &tx_vtable)); pub_tx.user = &pub_frames; pub_tx.ack_baseline_timeout = 8000; - udpard_tx_t ack_tx{}; - std::vector ack_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&ack_tx, 0xABCDEF0012345678ULL, 77U, 8, ack_mem, &tx_vtable)); - ack_tx.user = &ack_frames; + + udpard_rx_t pub_rx{}; + udpard_rx_new(&pub_rx, &pub_tx); + udpard_rx_port_p2p_t pub_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new_p2p(&pub_p2p_port, pub_uid, UDPARD_P2P_HEADER_BYTES, pub_rx_mem, &ack_callbacks)); + + // Subscriber node: single TX, single RX (linked to TX for sending ACKs). + constexpr uint64_t sub_uid = 0xABCDEF0012345678ULL; + udpard_tx_t sub_tx{}; + std::vector sub_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&sub_tx, sub_uid, 77U, 8, sub_tx_mem, &tx_vtable)); + sub_tx.user = &sub_frames; udpard_rx_t sub_rx{}; - udpard_rx_new(&sub_rx, &ack_tx); + udpard_rx_new(&sub_rx, &sub_tx); udpard_rx_port_t sub_port{}; const uint64_t topic_hash = 0x0123456789ABCDEFULL; TEST_ASSERT_TRUE( - udpard_rx_port_new(&sub_port, topic_hash, 6000, UDPARD_RX_REORDERING_WINDOW_UNORDERED, sub_mem, &callbacks)); - udpard_rx_t ack_rx{}; - udpard_rx_port_p2p_t ack_port{}; - udpard_rx_new(&ack_rx, &pub_tx); - TEST_ASSERT_TRUE( - udpard_rx_port_new_p2p(&ack_port, pub_tx.local_uid, UDPARD_P2P_HEADER_BYTES, ack_rx_mem, &ack_callbacks)); + udpard_rx_port_new(&sub_port, topic_hash, 6000, UDPARD_RX_REORDERING_WINDOW_UNORDERED, sub_rx_mem, &callbacks)); // Endpoints. const std::array publisher_sources{ @@ -185,16 +195,16 @@ void test_reliable_delivery_under_losses() udpard_udpip_ep_t{ .ip = 0x0A000002U, .port = 7401U }, udpard_udpip_ep_t{ .ip = 0x0A000003U, .port = 7402U }, }; - const std::array subscriber_endpoints{ - udpard_make_subject_endpoint(111U), - udpard_udpip_ep_t{ .ip = 0x0A00000BU, .port = 7501U }, - udpard_udpip_ep_t{ .ip = 0x0A00000CU, .port = 7502U }, - }; - const std::array ack_sources{ + const std::array subscriber_sources{ udpard_udpip_ep_t{ .ip = 0x0A000010U, .port = 7600U }, udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 7601U }, udpard_udpip_ep_t{ .ip = 0x0A000012U, .port = 7602U }, }; + const std::array topic_multicast{ + udpard_make_subject_endpoint(111U), + udpard_udpip_ep_t{ .ip = 0x0A00000BU, .port = 7501U }, + udpard_udpip_ep_t{ .ip = 0x0A00000CU, .port = 7502U }, + }; // Payload and context. std::vector payload(4096); @@ -202,13 +212,13 @@ void test_reliable_delivery_under_losses() RxContext ctx{}; ctx.expected = payload; ctx.sources = publisher_sources; - ctx.remote_uid = pub_tx.local_uid; + ctx.remote_uid = pub_uid; sub_rx.user = &ctx; // Reliable transfer with staged losses. FeedbackState fb{}; const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - std::array dest_per_iface = subscriber_endpoints; + std::array dest_per_iface = topic_multicast; pub_tx.mtu[0] = 600; pub_tx.mtu[1] = 900; pub_tx.mtu[2] = 500; @@ -233,6 +243,7 @@ void test_reliable_delivery_under_losses() size_t attempts = 0; const size_t attempt_cap = 6; while ((fb.count == 0) && (attempts < attempt_cap)) { + // Publisher transmits topic message. pub_frames.clear(); udpard_tx_poll(&pub_tx, now, UDPARD_IFACE_MASK_ALL); bool data_loss_done = false; @@ -253,25 +264,26 @@ void test_reliable_delivery_under_losses() } udpard_rx_poll(&sub_rx, now); - ack_frames.clear(); - udpard_tx_poll(&ack_tx, now, UDPARD_IFACE_MASK_ALL); + // Subscriber transmits ACKs (via sub_tx since sub_rx is linked to it). + sub_frames.clear(); + udpard_tx_poll(&sub_tx, now, UDPARD_IFACE_MASK_ALL); bool ack_sent = false; - for (const auto& ack : ack_frames) { + for (const auto& ack : sub_frames) { const bool drop_ack = first_round && !ack_sent; if (drop_ack) { drop_frame(ack); continue; } ack_sent = true; - TEST_ASSERT_TRUE(udpard_rx_port_push(&ack_rx, - reinterpret_cast(&ack_port), + TEST_ASSERT_TRUE(udpard_rx_port_push(&pub_rx, + reinterpret_cast(&pub_p2p_port), now, - ack_sources[ack.iface_index], + subscriber_sources[ack.iface_index], ack.datagram, tx_payload_deleter, ack.iface_index)); } - udpard_rx_poll(&ack_rx, now); + udpard_rx_poll(&pub_rx, now); first_round = false; attempts++; now += pub_tx.ack_baseline_timeout + 5000; @@ -284,25 +296,27 @@ void test_reliable_delivery_under_losses() // Cleanup. udpard_rx_port_free(&sub_rx, &sub_port); - udpard_rx_port_free(&ack_rx, reinterpret_cast(&ack_port)); + udpard_rx_port_free(&pub_rx, reinterpret_cast(&pub_p2p_port)); udpard_tx_free(&pub_tx); - udpard_tx_free(&ack_tx); - TEST_ASSERT_EQUAL_size_t(0, sub_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, sub_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, pub_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, pub_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, acktx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, acktx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, ackrx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, ackrx_alloc_session.allocated_fragments); - instrumented_allocator_reset(&sub_alloc_frag); - instrumented_allocator_reset(&sub_alloc_session); - instrumented_allocator_reset(&pub_alloc_transfer); - instrumented_allocator_reset(&pub_alloc_payload); - instrumented_allocator_reset(&acktx_alloc_transfer); - instrumented_allocator_reset(&acktx_alloc_payload); - instrumented_allocator_reset(&ackrx_alloc_frag); - instrumented_allocator_reset(&ackrx_alloc_session); + udpard_tx_free(&sub_tx); + + TEST_ASSERT_EQUAL_size_t(0, pub_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, pub_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, pub_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, pub_rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, sub_rx_alloc_session.allocated_fragments); + + instrumented_allocator_reset(&pub_tx_alloc_transfer); + instrumented_allocator_reset(&pub_tx_alloc_payload); + instrumented_allocator_reset(&pub_rx_alloc_frag); + instrumented_allocator_reset(&pub_rx_alloc_session); + instrumented_allocator_reset(&sub_tx_alloc_transfer); + instrumented_allocator_reset(&sub_tx_alloc_payload); + instrumented_allocator_reset(&sub_rx_alloc_frag); + instrumented_allocator_reset(&sub_rx_alloc_session); } // Counters must reflect expired deliveries and ack failures. diff --git a/tests/src/test_e2e_responses.cpp b/tests/src/test_e2e_responses.cpp new file mode 100644 index 0000000..ad64bea --- /dev/null +++ b/tests/src/test_e2e_responses.cpp @@ -0,0 +1,776 @@ +/// This software is distributed under the terms of the MIT License. +/// Copyright (C) OpenCyphal Development Team +/// Copyright Amazon.com Inc. or its affiliates. +/// SPDX-License-Identifier: MIT + +#include +#include "helpers.h" +#include +#include +#include +#include + +namespace { + +// -------------------------------------------------------------------------------------------------------------------- +// COMMON INFRASTRUCTURE +// -------------------------------------------------------------------------------------------------------------------- + +struct CapturedFrame +{ + udpard_bytes_mut_t datagram; + uint_fast8_t iface_index; +}; + +void tx_refcount_free(void* const user, const size_t size, void* const payload) +{ + (void)user; + udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); +} + +bool capture_tx_frame(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection) +{ + auto* frames = static_cast*>(tx->user); + if (frames == nullptr) { + return false; + } + udpard_tx_refcount_inc(ejection.datagram); + void* const data = const_cast(ejection.datagram.data); // NOLINT + frames->push_back(CapturedFrame{ .datagram = { .size = ejection.datagram.size, .data = data }, + .iface_index = ejection.iface_index }); + return true; +} + +void drop_frame(const CapturedFrame& frame) +{ + udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data }); +} + +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx_frame }; +constexpr udpard_mem_deleter_t tx_payload_deleter{ .user = nullptr, .free = &tx_refcount_free }; + +// -------------------------------------------------------------------------------------------------------------------- +// FEEDBACK AND CONTEXT STRUCTURES +// -------------------------------------------------------------------------------------------------------------------- + +struct FeedbackState +{ + size_t count = 0; + bool success = false; + uint64_t topic_hash = 0; + uint64_t transfer_id = 0; +}; + +void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) +{ + auto* st = static_cast(fb.user_transfer_reference); + if (st != nullptr) { + st->count++; + st->success = fb.success; + st->topic_hash = fb.topic_hash; + st->transfer_id = fb.transfer_id; + } +} + +struct NodeBTopicContext +{ + std::vector received_payload; + std::array sender_sources{}; + uint64_t sender_uid = 0; + uint64_t received_topic = 0; + uint64_t received_tid = 0; + size_t message_count = 0; +}; + +struct NodeAResponseContext +{ + std::vector received_response; + uint64_t topic_hash = 0; + uint64_t transfer_id = 0; + size_t response_count = 0; +}; + +// Combined context for a node's RX instance +struct NodeContext +{ + NodeBTopicContext* topic_ctx = nullptr; + NodeAResponseContext* response_ctx = nullptr; +}; + +// -------------------------------------------------------------------------------------------------------------------- +// CALLBACK IMPLEMENTATIONS +// -------------------------------------------------------------------------------------------------------------------- + +// Node B's message reception callback - receives the topic message from A +void node_b_on_topic_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +{ + auto* node_ctx = static_cast(rx->user); + auto* ctx = node_ctx->topic_ctx; + if (ctx == nullptr) { + udpard_fragment_free_all(transfer.payload, port->memory.fragment); + return; + } + ctx->message_count++; + ctx->sender_uid = transfer.remote.uid; + ctx->sender_sources = {}; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + ctx->sender_sources[i] = transfer.remote.endpoints[i]; + } + ctx->received_topic = port->topic_hash; + ctx->received_tid = transfer.transfer_id; + + ctx->received_payload.resize(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, ctx->received_payload.data()); + + udpard_fragment_free_all(transfer.payload, port->memory.fragment); +} + +void on_collision(udpard_rx_t* const, udpard_rx_port_t* const, const udpard_remote_t) {} + +constexpr udpard_rx_port_vtable_t topic_callbacks{ .on_message = &node_b_on_topic_message, + .on_collision = &on_collision }; + +// Node A's P2P response reception callback - receives the response from B +void node_a_on_p2p_response(udpard_rx_t* const rx, + udpard_rx_port_p2p_t* const port, + const udpard_rx_transfer_p2p_t transfer) +{ + auto* node_ctx = static_cast(rx->user); + auto* ctx = node_ctx->response_ctx; + if (ctx == nullptr) { + udpard_fragment_free_all(transfer.base.payload, port->base.memory.fragment); + return; + } + ctx->response_count++; + ctx->topic_hash = transfer.topic_hash; + ctx->transfer_id = transfer.base.transfer_id; + + ctx->received_response.resize(transfer.base.payload_size_stored); + const udpard_fragment_t* cursor = transfer.base.payload; + (void)udpard_fragment_gather(&cursor, 0, transfer.base.payload_size_stored, ctx->received_response.data()); + + udpard_fragment_free_all(transfer.base.payload, port->base.memory.fragment); +} + +constexpr udpard_rx_port_p2p_vtable_t p2p_response_callbacks{ .on_message = &node_a_on_p2p_response }; + +// ACK-only P2P port callback (for receiving ACKs, which have no user payload) +void on_ack_only(udpard_rx_t*, udpard_rx_port_p2p_t* port, const udpard_rx_transfer_p2p_t tr) +{ + udpard_fragment_free_all(tr.base.payload, port->base.memory.fragment); +} + +constexpr udpard_rx_port_p2p_vtable_t ack_only_callbacks{ .on_message = &on_ack_only }; + +// -------------------------------------------------------------------------------------------------------------------- +// TEST: Basic topic message with P2P response flow +// -------------------------------------------------------------------------------------------------------------------- + +/// Node A publishes a reliable topic message, Node B receives it and sends a reliable P2P response. +/// Both nodes verify that their delivery callbacks are correctly invoked. +/// Each node uses exactly one TX and one RX instance. +void test_topic_with_p2p_response() +{ + seed_prng(); + + // ================================================================================================================ + // ALLOCATORS - One TX and one RX per node + // ================================================================================================================ + instrumented_allocator_t a_tx_alloc_transfer{}; + instrumented_allocator_t a_tx_alloc_payload{}; + instrumented_allocator_t a_rx_alloc_frag{}; + instrumented_allocator_t a_rx_alloc_session{}; + instrumented_allocator_new(&a_tx_alloc_transfer); + instrumented_allocator_new(&a_tx_alloc_payload); + instrumented_allocator_new(&a_rx_alloc_frag); + instrumented_allocator_new(&a_rx_alloc_session); + + instrumented_allocator_t b_tx_alloc_transfer{}; + instrumented_allocator_t b_tx_alloc_payload{}; + instrumented_allocator_t b_rx_alloc_frag{}; + instrumented_allocator_t b_rx_alloc_session{}; + instrumented_allocator_new(&b_tx_alloc_transfer); + instrumented_allocator_new(&b_tx_alloc_payload); + instrumented_allocator_new(&b_rx_alloc_frag); + instrumented_allocator_new(&b_rx_alloc_session); + + // ================================================================================================================ + // MEMORY RESOURCES + // ================================================================================================================ + udpard_tx_mem_resources_t a_tx_mem{}; + a_tx_mem.transfer = instrumented_allocator_make_resource(&a_tx_alloc_transfer); + for (auto& res : a_tx_mem.payload) { + res = instrumented_allocator_make_resource(&a_tx_alloc_payload); + } + const udpard_rx_mem_resources_t a_rx_mem{ .session = instrumented_allocator_make_resource(&a_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&a_rx_alloc_frag) }; + + udpard_tx_mem_resources_t b_tx_mem{}; + b_tx_mem.transfer = instrumented_allocator_make_resource(&b_tx_alloc_transfer); + for (auto& res : b_tx_mem.payload) { + res = instrumented_allocator_make_resource(&b_tx_alloc_payload); + } + const udpard_rx_mem_resources_t b_rx_mem{ .session = instrumented_allocator_make_resource(&b_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&b_rx_alloc_frag) }; + + // ================================================================================================================ + // NODE UIDs AND ENDPOINTS + // ================================================================================================================ + constexpr uint64_t node_a_uid = 0xAAAA1111BBBB2222ULL; + constexpr uint64_t node_b_uid = 0xCCCC3333DDDD4444ULL; + + const std::array node_a_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 7400U }, + udpard_udpip_ep_t{ .ip = 0x0A000002U, .port = 7401U }, + udpard_udpip_ep_t{ .ip = 0x0A000003U, .port = 7402U }, + }; + const std::array node_b_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 7500U }, + udpard_udpip_ep_t{ .ip = 0x0A000012U, .port = 7501U }, + udpard_udpip_ep_t{ .ip = 0x0A000013U, .port = 7502U }, + }; + + constexpr uint64_t topic_hash = 0x0123456789ABCDEFULL; + constexpr uint64_t transfer_id = 42; + const udpard_udpip_ep_t topic_multicast = udpard_make_subject_endpoint(111); + + // ================================================================================================================ + // TX/RX PIPELINES - One TX and one RX per node + // ================================================================================================================ + // Node A: single TX, single RX (linked to TX for ACK processing) + udpard_tx_t a_tx{}; + std::vector a_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&a_tx, node_a_uid, 100, 64, a_tx_mem, &tx_vtable)); + a_tx.user = &a_frames; + a_tx.ack_baseline_timeout = 10000; + + udpard_rx_t a_rx{}; + udpard_rx_new(&a_rx, &a_tx); + NodeAResponseContext a_response_ctx{}; + NodeContext a_node_ctx{ .topic_ctx = nullptr, .response_ctx = &a_response_ctx }; + a_rx.user = &a_node_ctx; + + // A's P2P port for receiving responses and ACKs + udpard_rx_port_p2p_t a_p2p_port{}; + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&a_p2p_port, node_a_uid, 4096, a_rx_mem, &p2p_response_callbacks)); + + // Node B: single TX, single RX (linked to TX for ACK processing) + udpard_tx_t b_tx{}; + std::vector b_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&b_tx, node_b_uid, 200, 64, b_tx_mem, &tx_vtable)); + b_tx.user = &b_frames; + b_tx.ack_baseline_timeout = 10000; + + udpard_rx_t b_rx{}; + udpard_rx_new(&b_rx, &b_tx); + NodeBTopicContext b_topic_ctx{}; + NodeContext b_node_ctx{ .topic_ctx = &b_topic_ctx, .response_ctx = nullptr }; + b_rx.user = &b_node_ctx; + + // B's topic subscription port + udpard_rx_port_t b_topic_port{}; + TEST_ASSERT_TRUE(udpard_rx_port_new( + &b_topic_port, topic_hash, 4096, UDPARD_RX_REORDERING_WINDOW_UNORDERED, b_rx_mem, &topic_callbacks)); + + // B's P2P port for receiving response ACKs + udpard_rx_port_p2p_t b_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new_p2p(&b_p2p_port, node_b_uid, UDPARD_P2P_HEADER_BYTES, b_rx_mem, &ack_only_callbacks)); + + // ================================================================================================================ + // PAYLOADS AND FEEDBACK STATES + // ================================================================================================================ + const std::vector topic_payload = { 0x01, 0x02, 0x03, 0x04, 0x05 }; + const std::vector response_payload = { 0xAA, 0xBB, 0xCC, 0xDD }; + const udpard_bytes_scattered_t topic_payload_scat = make_scattered(topic_payload.data(), topic_payload.size()); + + FeedbackState a_topic_fb{}; + FeedbackState b_response_fb{}; + + // ================================================================================================================ + // STEP 1: Node A publishes a reliable topic message + // ================================================================================================================ + udpard_us_t now = 0; + std::array topic_dest{}; + topic_dest[0] = topic_multicast; + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push(&a_tx, + now, + now + 1000000, + udpard_prio_nominal, + topic_hash, + topic_dest.data(), + transfer_id, + topic_payload_scat, + &record_feedback, + &a_topic_fb)); + a_frames.clear(); + udpard_tx_poll(&a_tx, now, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_FALSE(a_frames.empty()); + + // ================================================================================================================ + // STEP 2: Deliver topic message to Node B + // ================================================================================================================ + for (const auto& frame : a_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&b_rx, + &b_topic_port, + now, + node_a_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&b_rx, now); + a_frames.clear(); + + // Verify B received the message + TEST_ASSERT_EQUAL_size_t(1, b_topic_ctx.message_count); + TEST_ASSERT_EQUAL_UINT64(node_a_uid, b_topic_ctx.sender_uid); + TEST_ASSERT_EQUAL_size_t(topic_payload.size(), b_topic_ctx.received_payload.size()); + TEST_ASSERT_EQUAL_MEMORY(topic_payload.data(), b_topic_ctx.received_payload.data(), topic_payload.size()); + + // ================================================================================================================ + // STEP 3: Node B sends ACK back to A (for the topic message) - via b_tx since b_rx is linked to it + // ================================================================================================================ + b_frames.clear(); + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_MASK_ALL); + + // Deliver ACK frames to A + for (const auto& frame : b_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&a_rx, + reinterpret_cast(&a_p2p_port), + now, + node_b_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&a_rx, now); + b_frames.clear(); + + // Now A should have received the ACK - poll to process feedback + now += 100; + udpard_tx_poll(&a_tx, now, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_EQUAL_size_t(1, a_topic_fb.count); + TEST_ASSERT_TRUE(a_topic_fb.success); + TEST_ASSERT_EQUAL_UINT64(topic_hash, a_topic_fb.topic_hash); + TEST_ASSERT_EQUAL_UINT64(transfer_id, a_topic_fb.transfer_id); + + // ================================================================================================================ + // STEP 4: Node B sends a reliable P2P response to A + // ================================================================================================================ + udpard_remote_t remote_a{}; + remote_a.uid = b_topic_ctx.sender_uid; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + remote_a.endpoints[i] = node_a_sources[i]; + } + + const udpard_bytes_scattered_t response_scat = make_scattered(response_payload.data(), response_payload.size()); + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push_p2p(&b_tx, + now, + now + 1000000, + udpard_prio_nominal, + b_topic_ctx.received_topic, + b_topic_ctx.received_tid, + remote_a, + response_scat, + &record_feedback, + &b_response_fb)); + + b_frames.clear(); + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_FALSE(b_frames.empty()); + + // Deliver response frames to A + for (const auto& frame : b_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&a_rx, + reinterpret_cast(&a_p2p_port), + now, + node_b_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&a_rx, now); + b_frames.clear(); + + // Verify A received the response + TEST_ASSERT_EQUAL_size_t(1, a_response_ctx.response_count); + TEST_ASSERT_EQUAL_UINT64(topic_hash, a_response_ctx.topic_hash); + TEST_ASSERT_EQUAL_UINT64(transfer_id, a_response_ctx.transfer_id); + TEST_ASSERT_EQUAL_size_t(response_payload.size(), a_response_ctx.received_response.size()); + TEST_ASSERT_EQUAL_MEMORY(response_payload.data(), a_response_ctx.received_response.data(), response_payload.size()); + + // ================================================================================================================ + // STEP 5: A sends ACK for the response back to B - via a_tx since a_rx is linked to it + // ================================================================================================================ + a_frames.clear(); + udpard_tx_poll(&a_tx, now, UDPARD_IFACE_MASK_ALL); + + // Deliver ACK frames to B + for (const auto& frame : a_frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push(&b_rx, + reinterpret_cast(&b_p2p_port), + now, + node_a_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index)); + } + udpard_rx_poll(&b_rx, now); + a_frames.clear(); + + // Now B should have received the ACK for the response + now += 100; + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_EQUAL_size_t(1, b_response_fb.count); + TEST_ASSERT_TRUE(b_response_fb.success); + + // ================================================================================================================ + // CLEANUP + // ================================================================================================================ + udpard_rx_port_free(&b_rx, &b_topic_port); + udpard_rx_port_free(&b_rx, reinterpret_cast(&b_p2p_port)); + udpard_rx_port_free(&a_rx, reinterpret_cast(&a_p2p_port)); + udpard_tx_free(&a_tx); + udpard_tx_free(&b_tx); + + TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_session.allocated_fragments); + + instrumented_allocator_reset(&a_tx_alloc_transfer); + instrumented_allocator_reset(&a_tx_alloc_payload); + instrumented_allocator_reset(&a_rx_alloc_frag); + instrumented_allocator_reset(&a_rx_alloc_session); + instrumented_allocator_reset(&b_tx_alloc_transfer); + instrumented_allocator_reset(&b_tx_alloc_payload); + instrumented_allocator_reset(&b_rx_alloc_frag); + instrumented_allocator_reset(&b_rx_alloc_session); +} + +// -------------------------------------------------------------------------------------------------------------------- +// TEST: Topic message and response with simulated losses +// -------------------------------------------------------------------------------------------------------------------- + +/// Same as above, but with simulated packet loss on both the response and the response ACK. +/// Tests that reliable delivery works correctly with retransmissions. +/// Each node uses exactly one TX and one RX instance. +void test_topic_with_p2p_response_under_loss() +{ + seed_prng(); + + // ================================================================================================================ + // ALLOCATORS - One TX and one RX per node + // ================================================================================================================ + instrumented_allocator_t a_tx_alloc_transfer{}; + instrumented_allocator_t a_tx_alloc_payload{}; + instrumented_allocator_t a_rx_alloc_frag{}; + instrumented_allocator_t a_rx_alloc_session{}; + instrumented_allocator_new(&a_tx_alloc_transfer); + instrumented_allocator_new(&a_tx_alloc_payload); + instrumented_allocator_new(&a_rx_alloc_frag); + instrumented_allocator_new(&a_rx_alloc_session); + + instrumented_allocator_t b_tx_alloc_transfer{}; + instrumented_allocator_t b_tx_alloc_payload{}; + instrumented_allocator_t b_rx_alloc_frag{}; + instrumented_allocator_t b_rx_alloc_session{}; + instrumented_allocator_new(&b_tx_alloc_transfer); + instrumented_allocator_new(&b_tx_alloc_payload); + instrumented_allocator_new(&b_rx_alloc_frag); + instrumented_allocator_new(&b_rx_alloc_session); + + // ================================================================================================================ + // MEMORY RESOURCES + // ================================================================================================================ + udpard_tx_mem_resources_t a_tx_mem{}; + a_tx_mem.transfer = instrumented_allocator_make_resource(&a_tx_alloc_transfer); + for (auto& res : a_tx_mem.payload) { + res = instrumented_allocator_make_resource(&a_tx_alloc_payload); + } + const udpard_rx_mem_resources_t a_rx_mem{ .session = instrumented_allocator_make_resource(&a_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&a_rx_alloc_frag) }; + + udpard_tx_mem_resources_t b_tx_mem{}; + b_tx_mem.transfer = instrumented_allocator_make_resource(&b_tx_alloc_transfer); + for (auto& res : b_tx_mem.payload) { + res = instrumented_allocator_make_resource(&b_tx_alloc_payload); + } + const udpard_rx_mem_resources_t b_rx_mem{ .session = instrumented_allocator_make_resource(&b_rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&b_rx_alloc_frag) }; + + // ================================================================================================================ + // NODE UIDs AND ENDPOINTS + // ================================================================================================================ + constexpr uint64_t node_a_uid = 0x1111AAAA2222BBBBULL; + constexpr uint64_t node_b_uid = 0x3333CCCC4444DDDDULL; + + const std::array node_a_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000021U, .port = 8400U }, + udpard_udpip_ep_t{}, + udpard_udpip_ep_t{}, + }; + const std::array node_b_sources{ + udpard_udpip_ep_t{ .ip = 0x0A000031U, .port = 8500U }, + udpard_udpip_ep_t{}, + udpard_udpip_ep_t{}, + }; + + constexpr uint64_t topic_hash = 0xFEDCBA9876543210ULL; + constexpr uint64_t transfer_id = 99; + const udpard_udpip_ep_t topic_multicast = udpard_make_subject_endpoint(222); + + // ================================================================================================================ + // TX/RX PIPELINES - One TX and one RX per node + // ================================================================================================================ + udpard_tx_t a_tx{}; + std::vector a_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&a_tx, node_a_uid, 100, 64, a_tx_mem, &tx_vtable)); + a_tx.user = &a_frames; + a_tx.ack_baseline_timeout = 8000; + + udpard_rx_t a_rx{}; + udpard_rx_new(&a_rx, &a_tx); + NodeAResponseContext a_response_ctx{}; + NodeContext a_node_ctx{ .topic_ctx = nullptr, .response_ctx = &a_response_ctx }; + a_rx.user = &a_node_ctx; + + udpard_rx_port_p2p_t a_p2p_port{}; + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&a_p2p_port, node_a_uid, 4096, a_rx_mem, &p2p_response_callbacks)); + + udpard_tx_t b_tx{}; + std::vector b_frames; + TEST_ASSERT_TRUE(udpard_tx_new(&b_tx, node_b_uid, 200, 64, b_tx_mem, &tx_vtable)); + b_tx.user = &b_frames; + b_tx.ack_baseline_timeout = 8000; + + udpard_rx_t b_rx{}; + udpard_rx_new(&b_rx, &b_tx); + NodeBTopicContext b_topic_ctx{}; + NodeContext b_node_ctx{ .topic_ctx = &b_topic_ctx, .response_ctx = nullptr }; + b_rx.user = &b_node_ctx; + + udpard_rx_port_t b_topic_port{}; + TEST_ASSERT_TRUE(udpard_rx_port_new( + &b_topic_port, topic_hash, 4096, UDPARD_RX_REORDERING_WINDOW_UNORDERED, b_rx_mem, &topic_callbacks)); + + udpard_rx_port_p2p_t b_p2p_port{}; + TEST_ASSERT_TRUE( + udpard_rx_port_new_p2p(&b_p2p_port, node_b_uid, UDPARD_P2P_HEADER_BYTES, b_rx_mem, &ack_only_callbacks)); + + // ================================================================================================================ + // PAYLOADS AND FEEDBACK STATES + // ================================================================================================================ + const std::vector topic_payload = { 0x10, 0x20, 0x30 }; + const std::vector response_payload = { 0xDE, 0xAD, 0xBE, 0xEF }; + const udpard_bytes_scattered_t topic_payload_scat = make_scattered(topic_payload.data(), topic_payload.size()); + + FeedbackState a_topic_fb{}; + FeedbackState b_response_fb{}; + + // ================================================================================================================ + // STEP 1: Node A publishes a reliable topic message + // ================================================================================================================ + udpard_us_t now = 0; + std::array topic_dest{}; + topic_dest[0] = topic_multicast; + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push(&a_tx, + now, + now + 500000, + udpard_prio_fast, + topic_hash, + topic_dest.data(), + transfer_id, + topic_payload_scat, + &record_feedback, + &a_topic_fb)); + + // ================================================================================================================ + // SIMULATION LOOP WITH LOSSES + // ================================================================================================================ + size_t iterations = 0; + constexpr size_t max_iterations = 30; + bool first_response_dropped = false; + bool first_resp_ack_dropped = false; + bool response_sent = false; + + while (iterations < max_iterations) { + iterations++; + + // --- Node A transmits (topic message, topic ACKs, or response ACKs) --- + a_frames.clear(); + udpard_tx_poll(&a_tx, now, UDPARD_IFACE_MASK_ALL); + + for (const auto& frame : a_frames) { + if (b_topic_ctx.message_count == 0) { + // Topic message frames go to B's topic port + (void)udpard_rx_port_push(&b_rx, + &b_topic_port, + now, + node_a_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index); + } else { + // Response ACK frames go to B's P2P port + if (!first_resp_ack_dropped && (a_response_ctx.response_count > 0) && (b_response_fb.count == 0)) { + first_resp_ack_dropped = true; + drop_frame(frame); + continue; + } + + (void)udpard_rx_port_push(&b_rx, + reinterpret_cast(&b_p2p_port), + now, + node_a_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index); + } + } + a_frames.clear(); + udpard_rx_poll(&b_rx, now); + + // --- Node B transmits (topic ACKs first, before pushing response) --- + b_frames.clear(); + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_MASK_ALL); + + // Deliver B's frames (topic ACKs) to A before pushing response + for (const auto& frame : b_frames) { + (void)udpard_rx_port_push(&a_rx, + reinterpret_cast(&a_p2p_port), + now, + node_b_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index); + } + b_frames.clear(); + udpard_rx_poll(&a_rx, now); + + // --- If B received topic, send response --- + if ((b_topic_ctx.message_count > 0) && !response_sent) { + response_sent = true; + + udpard_remote_t remote_a{}; + remote_a.uid = b_topic_ctx.sender_uid; + remote_a.endpoints[0] = node_a_sources[0]; + + const udpard_bytes_scattered_t response_scat = + make_scattered(response_payload.data(), response_payload.size()); + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push_p2p(&b_tx, + now, + now + 500000, + udpard_prio_fast, + b_topic_ctx.received_topic, + b_topic_ctx.received_tid, + remote_a, + response_scat, + &record_feedback, + &b_response_fb)); + } + + // --- Node B transmits (responses) --- + b_frames.clear(); + udpard_tx_poll(&b_tx, now, UDPARD_IFACE_MASK_ALL); + + for (const auto& frame : b_frames) { + // Check if this frame has a payload (response) vs just an ACK + // Response frames have payload data beyond the P2P header + const bool has_payload = frame.datagram.size > UDPARD_P2P_HEADER_BYTES; + + // Drop first response (with payload) to test retransmission + if (!first_response_dropped && response_sent && has_payload) { + first_response_dropped = true; + drop_frame(frame); + continue; + } + + (void)udpard_rx_port_push(&a_rx, + reinterpret_cast(&a_p2p_port), + now, + node_b_sources[frame.iface_index], + frame.datagram, + tx_payload_deleter, + frame.iface_index); + } + b_frames.clear(); + udpard_rx_poll(&a_rx, now); + + // Check if both feedbacks have fired + if ((a_topic_fb.count > 0) && (b_response_fb.count > 0)) { + break; + } + + now += a_tx.ack_baseline_timeout + 5000; + } + + // ================================================================================================================ + // VERIFY + // ================================================================================================================ + TEST_ASSERT_LESS_THAN_size_t(max_iterations, iterations); + TEST_ASSERT_TRUE(first_response_dropped); + TEST_ASSERT_TRUE(first_resp_ack_dropped); + + TEST_ASSERT_EQUAL_size_t(1, a_topic_fb.count); + TEST_ASSERT_TRUE(a_topic_fb.success); + + TEST_ASSERT_EQUAL_size_t(1, b_response_fb.count); + TEST_ASSERT_TRUE(b_response_fb.success); + + TEST_ASSERT_GREATER_OR_EQUAL_size_t(1, b_topic_ctx.message_count); + TEST_ASSERT_EQUAL_size_t(1, a_response_ctx.response_count); + TEST_ASSERT_EQUAL_size_t(response_payload.size(), a_response_ctx.received_response.size()); + TEST_ASSERT_EQUAL_MEMORY(response_payload.data(), a_response_ctx.received_response.data(), response_payload.size()); + + // ================================================================================================================ + // CLEANUP + // ================================================================================================================ + udpard_rx_port_free(&b_rx, &b_topic_port); + udpard_rx_port_free(&b_rx, reinterpret_cast(&b_p2p_port)); + udpard_rx_port_free(&a_rx, reinterpret_cast(&a_p2p_port)); + udpard_tx_free(&a_tx); + udpard_tx_free(&b_tx); + + TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_session.allocated_fragments); + + instrumented_allocator_reset(&a_tx_alloc_transfer); + instrumented_allocator_reset(&a_tx_alloc_payload); + instrumented_allocator_reset(&a_rx_alloc_frag); + instrumented_allocator_reset(&a_rx_alloc_session); + instrumented_allocator_reset(&b_tx_alloc_transfer); + instrumented_allocator_reset(&b_tx_alloc_payload); + instrumented_allocator_reset(&b_rx_alloc_frag); + instrumented_allocator_reset(&b_rx_alloc_session); +} + +} // namespace + +extern "C" void setUp() {} + +extern "C" void tearDown() {} + +int main() +{ + UNITY_BEGIN(); + RUN_TEST(test_topic_with_p2p_response); + RUN_TEST(test_topic_with_p2p_response_under_loss); + return UNITY_END(); +} From 06ddbe1061a93f427b3928c03fdd4bdf122e6b00 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 19:41:41 +0200 Subject: [PATCH 41/42] linter --- .clang-tidy | 2 +- libudpard/udpard.h | 1 + tests/.clang-tidy | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.clang-tidy b/.clang-tidy index 70afdfd..503d9af 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -26,7 +26,7 @@ Checks: >- -*-macro-usage, -*-enum-size, -*-use-using, - -bugprone-casting-through-void, + -*-casting-through-void, -misc-include-cleaner, -cppcoreguidelines-avoid-do-while, -*-magic-numbers, diff --git a/libudpard/udpard.h b/libudpard/udpard.h index bd12af7..9c4e383 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -133,6 +133,7 @@ typedef struct udpard_list_member_t struct udpard_list_member_t* next; struct udpard_list_member_t* prev; } udpard_list_member_t; + typedef struct udpard_list_t { udpard_list_member_t* head; ///< NULL if list empty diff --git a/tests/.clang-tidy b/tests/.clang-tidy index 116c02b..942b2b5 100644 --- a/tests/.clang-tidy +++ b/tests/.clang-tidy @@ -54,6 +54,7 @@ Checks: >- -*-pro-bounds-avoid-unchecked-container-access, -*-array*decay, -*-avoid-c-arrays, + -*-casting-through-void, -*-named-parameter, WarningsAsErrors: '*' HeaderFilterRegex: '.*\.hpp' From 2d337395c0f15a16fa999651e5dae2a409435885 Mon Sep 17 00:00:00 2001 From: Pavel Kirienko Date: Sun, 28 Dec 2025 19:53:27 +0200 Subject: [PATCH 42/42] full test coverage --- libudpard/udpard.c | 1 + tests/src/test_e2e_edge.cpp | 95 +++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 48f6ae5..7b87a33 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -2268,6 +2268,7 @@ static void rx_p2p_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const por } else if (kind == P2P_KIND_RESPONSE) { self->vtable->on_message(rx, self, (udpard_rx_transfer_p2p_t){ .base = transfer, .topic_hash = topic_hash }); } else { // malformed + ++rx->errors_transfer_malformed; udpard_fragment_free_all(transfer.payload, port->memory.fragment); } } diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 11ba254..c1da189 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -456,6 +456,100 @@ void test_udpard_tx_push_p2p() instrumented_allocator_reset(&rx_alloc_session); } +/// P2P messages with invalid kind byte should be silently dropped. +/// This tests the malformed branch in rx_p2p_on_message. +void test_udpard_rx_p2p_malformed_kind() +{ + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_t rx_alloc_frag{}; + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_new(&rx_alloc_session); + + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); + } + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1122334455667788ULL, 5U, 8, tx_mem, &tx_vtable)); + std::vector frames; + tx.user = &frames; + + const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; + udpard_rx_t rx{}; + udpard_rx_port_p2p_t port{}; + Context ctx{}; + const udpard_udpip_ep_t source{ .ip = 0x0A0000BBU, .port = 7700U }; + const uint64_t local_uid = 0xDEADBEEFCAFEBABEULL; + ctx.expected_uid = tx.local_uid; + ctx.source = source; + udpard_rx_new(&rx, nullptr); + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&port, local_uid, 1024, rx_mem, &p2p_callbacks)); + + // Construct a P2P payload with an invalid kind byte. + // P2P header format: kind (1 byte) + reserved (7 bytes) + topic_hash (8 bytes) + transfer_id (8 bytes) = 24 bytes + // Valid kinds are 0 (P2P_KIND_RESPONSE) and 1 (P2P_KIND_ACK). Use 0xFF as invalid. + std::array p2p_payload{}; + p2p_payload[0] = 0xFFU; // Invalid kind + // Rest of P2P header (reserved, topic_hash, transfer_id) can be zeros - doesn't matter for this test. + // Add some user payload bytes. + p2p_payload[UDPARD_P2P_HEADER_BYTES + 0] = 0x11U; + p2p_payload[UDPARD_P2P_HEADER_BYTES + 1] = 0x22U; + p2p_payload[UDPARD_P2P_HEADER_BYTES + 2] = 0x33U; + p2p_payload[UDPARD_P2P_HEADER_BYTES + 3] = 0x44U; + + // Send using regular udpard_tx_push - the library handles all CRC calculations. + const udpard_us_t now = 0; + const udpard_bytes_scattered_t payload = make_scattered(p2p_payload.data(), p2p_payload.size()); + std::array dest{}; + dest[0] = { .ip = 0x0A000010U, .port = 7400U }; + TEST_ASSERT_GREATER_THAN_UINT32(0U, + udpard_tx_push(&tx, + now, + now + 1000000, + udpard_prio_nominal, + local_uid, // topic_hash = local_uid for P2P port matching + dest.data(), + 42U, + payload, + nullptr, + nullptr)); + udpard_tx_poll(&tx, now, UDPARD_IFACE_MASK_ALL); + TEST_ASSERT_FALSE(frames.empty()); + + // Push the frame to RX P2P port. + TEST_ASSERT_EQUAL_UINT64(0, rx.errors_transfer_malformed); + const udpard_mem_deleter_t tx_payload_deleter{ .user = nullptr, .free = &tx_refcount_free }; + for (const auto& f : frames) { + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, reinterpret_cast(&port), now, source, f.datagram, tx_payload_deleter, f.iface_index)); + } + udpard_rx_poll(&rx, now); + + // The malformed message should be dropped - no callback invoked, error counter incremented. + TEST_ASSERT_EQUAL_size_t(0, ctx.ids.size()); + TEST_ASSERT_EQUAL_size_t(0, ctx.collisions); + TEST_ASSERT_EQUAL_UINT64(1, rx.errors_transfer_malformed); + + // Cleanup - verify no memory leaks. + udpard_rx_port_free(&rx, reinterpret_cast(&port)); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); + TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_frag); + instrumented_allocator_reset(&rx_alloc_session); +} + } // namespace extern "C" void setUp() {} @@ -470,5 +564,6 @@ int main() RUN_TEST(test_udpard_rx_ordered_head_advanced_late); RUN_TEST(test_udpard_tx_feedback_always_called); RUN_TEST(test_udpard_tx_push_p2p); + RUN_TEST(test_udpard_rx_p2p_malformed_kind); return UNITY_END(); }