summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2012-12-21 01:27:54 -0800
committerKent Overstreet <koverstreet@google.com>2012-12-21 01:27:54 -0800
commitb69ddbf42418c41dcd69666679cb9e399f20525b (patch)
tree5cacbe3866d9fa0a0b110b5a24c044369d79fb65
parent29594404d7fe73cd80eaa4ee8c43dcc53970c60e (diff)
Percpu tag allocatorper-cpu-tags
-rw-r--r--include/linux/tags.h38
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile1
-rw-r--r--lib/tags.c161
4 files changed, 203 insertions, 0 deletions
diff --git a/include/linux/tags.h b/include/linux/tags.h
new file mode 100644
index 000000000000..74b34d7f0ec7
--- /dev/null
+++ b/include/linux/tags.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ * Author: koverstreet@google.com (Kent Overstreet)
+ *
+ * Per cpu tag allocator.
+ */
+
+#ifndef _LINUX_TAGS_H
+#define _LINUX_TAGS_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct tag_cpu_freelist;
+
+struct tag_pool {
+ uint16_t watermark;
+ uint16_t nr_tags;
+
+ struct tag_cpu_freelist *tag_cpu;
+
+ struct {
+ /* Global freelist */
+ uint16_t nr_free;
+ uint16_t *free;
+ spinlock_t lock;
+ struct list_head wait;
+ } ____cacheline_aligned;
+};
+
+uint16_t tag_alloc(struct tag_pool *pool, bool wait);
+void tag_free(struct tag_pool *pool, uint16_t tag);
+
+void tag_pool_free(struct tag_pool *pool);
+int tag_pool_init(struct tag_pool *pool, uint16_t nr_tags);
+
+
+#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index 4b31a46fb307..2d6c15b13ba3 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -401,4 +401,7 @@ config OID_REGISTRY
help
Enable fast lookup object identifier registry.
+config PERCPU_TAG
+ bool
+
endmenu
diff --git a/lib/Makefile b/lib/Makefile
index a08b791200f3..ae97e733555b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
+obj-$(CONFIG_PERCPU_TAG) += tags.o
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o
$(foreach file, $(libfdt_files), \
diff --git a/lib/tags.c b/lib/tags.c
new file mode 100644
index 000000000000..8e567d0404ab
--- /dev/null
+++ b/lib/tags.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ * Author: koverstreet@google.com (Kent Overstreet)
+ *
+ * Per cpu tag allocator.
+ */
+
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/tags.h>
+
+struct tag_cpu_freelist {
+ uint16_t nr_free;
+ uint16_t free[];
+};
+
+struct tag_waiter {
+ struct list_head list;
+ struct task_struct *task;
+};
+
+static inline void move_tags(u16 *dst, u16 *dst_nr,
+ u16 *src, u16 *src_nr,
+ u16 nr)
+{
+ *src_nr -= nr;
+ memcpy(dst + *dst_nr, src + *src_nr, sizeof(u16) * nr);
+ *dst_nr += nr;
+}
+
+uint16_t tag_alloc(struct tag_pool *pool, bool wait)
+{
+ struct tag_cpu_freelist *tags;
+ unsigned long flags;
+ uint16_t ret;
+retry:
+ preempt_disable();
+ local_irq_save(flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+
+ while (!tags->nr_free) {
+ spin_lock(&pool->lock);
+
+ if (pool->nr_free)
+ move_tags(tags->free, &tags->nr_free,
+ pool->free, &pool->nr_free,
+ min(pool->nr_free, pool->watermark));
+ else if (wait) {
+ struct tag_waiter wait = { .task = current };
+
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ list_add(&wait.list, &pool->wait);
+
+ spin_unlock(&pool->lock);
+ local_irq_restore(flags);
+ preempt_enable();
+
+ schedule();
+ __set_current_state(TASK_RUNNING);
+
+ if (!list_empty_careful(&wait.list)) {
+ spin_lock_irqsave(&pool->lock, flags);
+ list_del_init(&wait.list);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ }
+
+ goto retry;
+ } else
+ goto fail;
+
+ spin_unlock(&pool->lock);
+ }
+
+ ret = tags->free[--tags->nr_free];
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ return ret;
+fail:
+ local_irq_restore(flags);
+ preempt_enable();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tag_alloc);
+
+void tag_free(struct tag_pool *pool, uint16_t tag)
+{
+ struct tag_cpu_freelist *tags;
+ unsigned long flags;
+
+ preempt_disable();
+ local_irq_save(flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+
+ tags->free[tags->nr_free++] = tag;
+
+ if (tags->nr_free == pool->watermark * 2) {
+ spin_lock(&pool->lock);
+
+ move_tags(pool->free, &pool->nr_free,
+ tags->free, &tags->nr_free,
+ pool->watermark);
+
+ while (!list_empty(&pool->wait)) {
+ struct tag_waiter *wait;
+ wait = list_first_entry(&pool->wait,
+ struct tag_waiter, list);
+ list_del_init(&wait->list);
+ wake_up_process(wait->task);
+ }
+
+ spin_unlock(&pool->lock);
+ }
+
+ local_irq_restore(flags);
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(tag_free);
+
+void tag_pool_free(struct tag_pool *pool)
+{
+ free_percpu(pool->tag_cpu);
+
+ free_pages((unsigned long) pool->free,
+ get_order(pool->nr_tags * sizeof(u16)));
+}
+EXPORT_SYMBOL_GPL(tag_pool_free);
+
+int tag_pool_init(struct tag_pool *pool, uint16_t nr_tags)
+{
+ unsigned i, order;
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->wait);
+ pool->nr_tags = nr_tags;
+
+ order = get_order(nr_tags * sizeof(u16));
+ pool->free = (void *) __get_free_pages(GFP_KERNEL, order);
+ if (!pool->free)
+ return -ENOMEM;
+
+ for (i = 1; i < nr_tags; i++)
+ pool->free[pool->nr_free++] = i;
+
+ /* nr_possible_cpus would be more correct */
+ pool->watermark = nr_tags / (num_possible_cpus() * 3);
+
+ if (pool->watermark > 64)
+ pool->watermark = round_down(pool->watermark, 32);
+
+ pool->tag_cpu = __alloc_percpu(sizeof(struct tag_cpu_freelist) +
+ pool->watermark * 2 * sizeof(u16),
+ sizeof(u16));
+ if (!pool->tag_cpu)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tag_pool_init);