summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2018-10-19 11:48:24 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2018-10-19 14:00:34 +0200
commitaf510ebd8913bee016492832f532ed919b51c09c (patch)
tree078b91ac2572e5aa61029954015289ae1366df42 /net
parent468c041cff57e87f18e1022cacf9f5c98bf00b58 (diff)
Revert "netfilter: xt_quota: fix the behavior of xt_quota module"
This reverts commit e9837e55b0200da544a095a1fca36efd7fd3ba30. When talking to Maze and Chenbo, we agreed to keep this back by now due to problems in the ruleset listing path with 32-bit arches. Signed-off-by: Maciej Żenczykowski <maze@google.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/xt_quota.c55
1 files changed, 33 insertions, 22 deletions
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index fceae245eb03..10d61a6eed71 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -11,6 +11,11 @@
#include <linux/netfilter/xt_quota.h>
#include <linux/module.h>
+struct xt_quota_priv {
+ spinlock_t lock;
+ uint64_t quota;
+};
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
MODULE_DESCRIPTION("Xtables: countdown quota match");
@@ -21,48 +26,54 @@ static bool
quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct xt_quota_info *q = (void *)par->matchinfo;
- u64 current_count = atomic64_read(&q->counter);
+ struct xt_quota_priv *priv = q->master;
bool ret = q->flags & XT_QUOTA_INVERT;
- u64 old_count, new_count;
-
- do {
- if (current_count == 1)
- return ret;
- if (current_count <= skb->len) {
- atomic64_set(&q->counter, 1);
- return ret;
- }
- old_count = current_count;
- new_count = current_count - skb->len;
- current_count = atomic64_cmpxchg(&q->counter, old_count,
- new_count);
- } while (current_count != old_count);
- return !ret;
+
+ spin_lock_bh(&priv->lock);
+ if (priv->quota >= skb->len) {
+ priv->quota -= skb->len;
+ ret = !ret;
+ } else {
+ /* we do not allow even small packets from now on */
+ priv->quota = 0;
+ }
+ spin_unlock_bh(&priv->lock);
+
+ return ret;
}
static int quota_mt_check(const struct xt_mtchk_param *par)
{
struct xt_quota_info *q = par->matchinfo;
- BUILD_BUG_ON(sizeof(atomic64_t) != sizeof(__u64));
-
if (q->flags & ~XT_QUOTA_MASK)
return -EINVAL;
- if (atomic64_read(&q->counter) > q->quota + 1)
- return -ERANGE;
- if (atomic64_read(&q->counter) == 0)
- atomic64_set(&q->counter, q->quota + 1);
+ q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
+ if (q->master == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&q->master->lock);
+ q->master->quota = q->quota;
return 0;
}
+static void quota_mt_destroy(const struct xt_mtdtor_param *par)
+{
+ const struct xt_quota_info *q = par->matchinfo;
+
+ kfree(q->master);
+}
+
static struct xt_match quota_mt_reg __read_mostly = {
.name = "quota",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = quota_mt,
.checkentry = quota_mt_check,
+ .destroy = quota_mt_destroy,
.matchsize = sizeof(struct xt_quota_info),
+ .usersize = offsetof(struct xt_quota_info, master),
.me = THIS_MODULE,
};