From 931ebd1dc5ab477731eece5bc5bd7864fbf5e68e Mon Sep 17 00:00:00 2001 From: Christian Hopps Date: Sat, 9 Sep 2023 04:56:28 -0400 Subject: [PATCH] checkpoint --- net/xfrm/xfrm_iptfs.c | 66 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 14 deletions(-) diff --git a/net/xfrm/xfrm_iptfs.c b/net/xfrm/xfrm_iptfs.c index 44202856e99fc6..717bbb1ab763a9 100644 --- a/net/xfrm/xfrm_iptfs.c +++ b/net/xfrm/xfrm_iptfs.c @@ -1798,6 +1798,26 @@ static struct sk_buff *iptfs_alloc_header_skb(void) } #endif +/** + * Allocate an skb to hold the headers for a cloned data skb + */ +static struct sk_buff *iptfs_alloc_header_for_skb(struct sk_buff *data_skb, + uint data_len) +{ + struct sk_buff *skb; + + skb = alloc_skb(extralen + XFRM_IPTFS_MIN_HEADROOM, GFP_ATOMIC); + if (!skb) { + XFRM_INC_STATS(dev_net(skb->dev), LINUX_MIB_XFRMINERROR); + pr_err_ratelimited("failed to alloc skb\n"); + return NULL; + } + pr_devinf("resv=%u data_len=%u skb %p\n", resv, data_len, skb); + skb_reserve(skb, resv); + return skb; +} +#endif + static int iptfs_xfrm_output(struct sk_buff *skb, uint remaining) { int err; @@ -1964,11 +1984,38 @@ static int iptfs_copy_create_frags(struct sk_buff **skbp, return 0; } +static bool iptfs_first_should_copy(struct sk_buff *first_skb, uint mtu) +{ + uint frag_copy_max; + + /* If we have less than frag_copy_max for remaining packet we copy + * those tail bytes as it is more efficient. + */ + frag_copy_max = mtu <= IPTFS_FRAG_COPY_MAX ? mtu : IPTFS_FRAG_COPY_MAX; + if (first_skb->len - mtu < frag_copy_max) + return true; + + /* We actually want to use our nice clone algorithm here */ + if (!skb_is_nonlinear(first_skb)) + return false; + + /* If we have skb fragment lists, as these should be uncommon and + * dealing with lists and page fragments is not worth the complexity + * given that. + */ + if (skb_shinfo(first_skb)->frag_list) + return true; + + /* Here we have nr_frags which we can share with split and share */ + + /* For now we also copy under all other conditions */ + return true; +} + static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, uint mtu) { struct sk_buff *skb = *skbp; - uint frag_copy_max; int err; /* @@ -2004,20 +2051,9 @@ static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, BUG_ON(xtfs->cfg.dont_frag); - /* If we have less than frag_copy_max for remaining packet we copy - * those tail bytes as it is more efficient. - * - * Also if we have skb fragment lists, as these should be uncommon and - * dealing with lists and page fragments is not worth the complexity - * given that. - */ - - frag_copy_max = mtu <= IPTFS_FRAG_COPY_MAX ? mtu : IPTFS_FRAG_COPY_MAX; - if (1 || (skb->len - mtu < frag_copy_max) || - (skb_is_nonlinear(skb) && skb_shinfo(skb)->frag_list)) { - /* create frags with copies */ + if (iptfs_first_should_copy(skb, mtu)) return iptfs_copy_create_frags(skbp, xtfs, mtu); - } + #if 0 /* A user packet has come in on from an interface with larger @@ -2045,6 +2081,7 @@ static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, * testing, if this is a problem try really complex page sharing * thing if we have to. This is not a common code path, though. */ + /* XXX we don't want to be here with non-linear for clones below */ if (skb_is_nonlinear(skb)) { pr_info_once("LINEARIZE: skb len %u\n", skb->len); err = __skb_linearize(skb); @@ -2075,6 +2112,7 @@ static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, head_skb->dev = skb->dev; memcpy(head_skb->cb, skb->cb, sizeof(skb->cb)); skb_dst_copy(head_skb, skb); + /* XXX need to inc secpath refcnt */ __skb_ext_copy(head_skb, skb); __nf_copy(head_skb, skb, false); }