summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
authorPaul Blakey <paulb@nvidia.com>2022-01-03 13:44:50 +0200
committerDavid S. Miller <davem@davemloft.net>2022-01-04 12:12:55 +0000
commit9795ded7f924b6486e54976619b1b094fcc1969d (patch)
tree6abaf6b26a219230d6d4d9a62515c27e3b4f03f7 /net/sched
parent9d2c27aad0ea2c84413d0971be7fa340ba4617bc (diff)
net/sched: act_ct: Fill offloading tuple iifidx
Driver offloading ct tuples can use the information of which devices received the packets that created the offloaded connections, to more efficiently offload them only to the relevant device. Add new act_ct nf conntrack extension, which is used to store the skb devices before offloading the connection, and then fill in the tuple iifindex so drivers can get the device via metadata dissector match. Signed-off-by: Oz Shlomo <ozsh@nvidia.com> Signed-off-by: Paul Blakey <paulb@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_ct.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index f9afb5abff21..ebdf7caf7084 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -32,6 +32,7 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#include <net/netfilter/nf_conntrack_act_ct.h>
#include <uapi/linux/netfilter/nf_nat.h>
static struct workqueue_struct *act_ct_wq;
@@ -56,6 +57,12 @@ static const struct rhashtable_params zones_params = {
.automatic_shrinking = true,
};
+static struct nf_ct_ext_type act_ct_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_act_ct_ext),
+ .align = __alignof__(struct nf_conn_act_ct_ext),
+ .id = NF_CT_EXT_ACT_CT,
+};
+
static struct flow_action_entry *
tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
{
@@ -358,6 +365,7 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
struct nf_conn *ct,
bool tcp)
{
+ struct nf_conn_act_ct_ext *act_ct_ext;
struct flow_offload *entry;
int err;
@@ -375,6 +383,14 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
}
+ act_ct_ext = nf_conn_act_ct_ext_find(ct);
+ if (act_ct_ext) {
+ entry->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
+ act_ct_ext->ifindex[IP_CT_DIR_ORIGINAL];
+ entry->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
+ act_ct_ext->ifindex[IP_CT_DIR_REPLY];
+ }
+
err = flow_offload_add(&ct_ft->nf_ft, entry);
if (err)
goto err_add;
@@ -1027,6 +1043,7 @@ do_nat:
if (!ct)
goto out_push;
nf_ct_deliver_cached_events(ct);
+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
if (err != NF_ACCEPT)
@@ -1036,6 +1053,9 @@ do_nat:
tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
+ if (!nf_ct_is_confirmed(ct))
+ nf_conn_act_ct_ext_add(ct);
+
/* This will take care of sending queued events
* even if the connection is already confirmed.
*/
@@ -1583,10 +1603,16 @@ static int __init ct_init_module(void)
if (err)
goto err_register;
+ err = nf_ct_extend_register(&act_ct_extend);
+ if (err)
+ goto err_register_extend;
+
static_branch_inc(&tcf_frag_xmit_count);
return 0;
+err_register_extend:
+ tcf_unregister_action(&act_ct_ops, &ct_net_ops);
err_register:
tcf_ct_flow_tables_uninit();
err_tbl_init:
@@ -1597,6 +1623,7 @@ err_tbl_init:
static void __exit ct_cleanup_module(void)
{
static_branch_dec(&tcf_frag_xmit_count);
+ nf_ct_extend_unregister(&act_ct_extend);
tcf_unregister_action(&act_ct_ops, &ct_net_ops);
tcf_ct_flow_tables_uninit();
destroy_workqueue(act_ct_wq);